浏览代码

layerscape: update kernel patches

Updated kernel patches to align layerscape kernel
with latest LSDK linux (LSDK-17.09-update-103017-V4.9 tag).

Signed-off-by: Yangbo Lu <[email protected]>
Yangbo Lu 8 年之前
父节点
当前提交
ce6311d283

+ 57 - 8
target/linux/layerscape/patches-4.9/201-config-support-layerscape.patch

@@ -1,4 +1,4 @@
-From 11edf9c88acea13d1a02901289060263b4027a77 Mon Sep 17 00:00:00 2001
+From 7992b4384d94c5e1bad998ca3a9a5781caac8e62 Mon Sep 17 00:00:00 2001
 From: Yangbo Lu <[email protected]>
 Date: Mon, 25 Sep 2017 09:52:26 +0800
 Subject: [PATCH] config: support layerscape
@@ -30,11 +30,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  drivers/soc/fsl/layerscape/Kconfig      |  10 +++
  drivers/soc/fsl/layerscape/Makefile     |   1 +
  drivers/soc/fsl/rcpm.c                  | 154 ++++++++++++++++++++++++++++++++
- drivers/staging/Kconfig                 |   4 +
- drivers/staging/Makefile                |   2 +
+ drivers/staging/Kconfig                 |   6 ++
+ drivers/staging/Makefile                |   3 +
  drivers/staging/fsl-dpaa2/Kconfig       |  41 +++++++++
  drivers/staging/fsl-dpaa2/Makefile      |   9 ++
- 18 files changed, 309 insertions(+), 4 deletions(-)
+ 18 files changed, 312 insertions(+), 4 deletions(-)
  create mode 100644 drivers/soc/fsl/Kconfig
  create mode 100644 drivers/soc/fsl/Kconfig.arm
  create mode 100644 drivers/soc/fsl/layerscape/Kconfig
@@ -43,6 +43,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  create mode 100644 drivers/staging/fsl-dpaa2/Kconfig
  create mode 100644 drivers/staging/fsl-dpaa2/Makefile
 
+diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
+index e1c0e2e0..4211a7fd 100644
 --- a/drivers/base/Kconfig
 +++ b/drivers/base/Kconfig
 @@ -237,6 +237,7 @@ config GENERIC_CPU_AUTOPROBE
@@ -53,9 +55,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  source "drivers/base/regmap/Kconfig"
  
+diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
+index ad7250fa..6d788fd7 100644
 --- a/drivers/crypto/Makefile
 +++ b/drivers/crypto/Makefile
-@@ -3,7 +3,7 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += at
+@@ -3,7 +3,7 @@ obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
  obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
  obj-$(CONFIG_CRYPTO_DEV_BFIN_CRC) += bfin_crc.o
  obj-$(CONFIG_CRYPTO_DEV_CCP) += ccp/
@@ -64,6 +68,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
  obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
  obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
+diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
+index d1ca45fb..74a2864e 100644
 --- a/drivers/net/ethernet/freescale/Kconfig
 +++ b/drivers/net/ethernet/freescale/Kconfig
 @@ -5,7 +5,7 @@
@@ -82,6 +88,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +source "drivers/net/ethernet/freescale/sdk_fman/Kconfig"
 +source "drivers/net/ethernet/freescale/sdk_dpaa/Kconfig"
  endif # NET_VENDOR_FREESCALE
+diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
+index cbe21dc7..a5d4405f 100644
 --- a/drivers/net/ethernet/freescale/Makefile
 +++ b/drivers/net/ethernet/freescale/Makefile
 @@ -21,4 +21,6 @@ gianfar_driver-objs := gianfar.o \
@@ -91,6 +99,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +obj-$(if $(CONFIG_FSL_SDK_FMAN),y) += sdk_fman/
 +obj-$(if $(CONFIG_FSL_SDK_DPAA_ETH),y) += sdk_dpaa/
  obj-$(CONFIG_FSL_FMAN) += fman/
+diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
+index ee3de342..4c45beda 100644
 --- a/drivers/ptp/Kconfig
 +++ b/drivers/ptp/Kconfig
 @@ -39,6 +39,35 @@ config PTP_1588_CLOCK_GIANFAR
@@ -129,6 +139,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  config PTP_1588_CLOCK_IXP46X
  	tristate "Intel IXP46x as PTP clock"
  	depends on IXP4XX_ETH
+diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
+index 0723c97e..df610dcd 100644
 --- a/drivers/rtc/Kconfig
 +++ b/drivers/rtc/Kconfig
 @@ -414,6 +414,14 @@ config RTC_DRV_PCF85063
@@ -146,9 +158,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  config RTC_DRV_PCF8563
  	tristate "Philips PCF8563/Epson RTC8564"
  	help
+diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
+index 1ac694a3..7675b8a7 100644
 --- a/drivers/rtc/Makefile
 +++ b/drivers/rtc/Makefile
-@@ -111,6 +111,7 @@ obj-$(CONFIG_RTC_DRV_PCF2127)	+= rtc-pcf
+@@ -111,6 +111,7 @@ obj-$(CONFIG_RTC_DRV_PCF2127)	+= rtc-pcf2127.o
  obj-$(CONFIG_RTC_DRV_PCF50633)	+= rtc-pcf50633.o
  obj-$(CONFIG_RTC_DRV_PCF85063)	+= rtc-pcf85063.o
  obj-$(CONFIG_RTC_DRV_PCF8523)	+= rtc-pcf8523.o
@@ -156,6 +170,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  obj-$(CONFIG_RTC_DRV_PCF8563)	+= rtc-pcf8563.o
  obj-$(CONFIG_RTC_DRV_PCF8583)	+= rtc-pcf8583.o
  obj-$(CONFIG_RTC_DRV_PIC32)	+= rtc-pic32.o
+diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
+index e6e90e80..f31bceb6 100644
 --- a/drivers/soc/Kconfig
 +++ b/drivers/soc/Kconfig
 @@ -1,8 +1,7 @@
@@ -168,6 +184,9 @@ Signed-off-by: Yangbo Lu <[email protected]>
  source "drivers/soc/mediatek/Kconfig"
  source "drivers/soc/qcom/Kconfig"
  source "drivers/soc/rockchip/Kconfig"
+diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
+new file mode 100644
+index 00000000..d4cd25f1
 --- /dev/null
 +++ b/drivers/soc/fsl/Kconfig
 @@ -0,0 +1,22 @@
@@ -193,6 +212,9 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +if ARM || ARM64
 +source "drivers/soc/fsl/Kconfig.arm"
 +endif
+diff --git a/drivers/soc/fsl/Kconfig.arm b/drivers/soc/fsl/Kconfig.arm
+new file mode 100644
+index 00000000..106c9b98
 --- /dev/null
 +++ b/drivers/soc/fsl/Kconfig.arm
 @@ -0,0 +1,16 @@
@@ -212,6 +234,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +if LS_SOC_DRIVERS
 +	source "drivers/soc/fsl/layerscape/Kconfig"
 +endif
+diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
+index 75e1f533..b8708569 100644
 --- a/drivers/soc/fsl/Makefile
 +++ b/drivers/soc/fsl/Makefile
 @@ -5,3 +5,7 @@
@@ -222,6 +246,9 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +obj-$(CONFIG_FSL_LS2_CONSOLE)		+= ls2-console/
 +obj-$(CONFIG_SUSPEND)			+= rcpm.o
 +obj-$(CONFIG_LS_SOC_DRIVERS)		+= layerscape/
+diff --git a/drivers/soc/fsl/layerscape/Kconfig b/drivers/soc/fsl/layerscape/Kconfig
+new file mode 100644
+index 00000000..e1373aa1
 --- /dev/null
 +++ b/drivers/soc/fsl/layerscape/Kconfig
 @@ -0,0 +1,10 @@
@@ -235,10 +262,16 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +	Say y here to enable FTM alarm support.  The FTM alarm provides
 +	alarm functions for wakeup system from deep sleep.  There is only
 +	one FTM can be used in ALARM(FTM 0).
+diff --git a/drivers/soc/fsl/layerscape/Makefile b/drivers/soc/fsl/layerscape/Makefile
+new file mode 100644
+index 00000000..6299aa1d
 --- /dev/null
 +++ b/drivers/soc/fsl/layerscape/Makefile
 @@ -0,0 +1 @@
 +obj-$(CONFIG_FTM_ALARM) += ftm_alarm.o
+diff --git a/drivers/soc/fsl/rcpm.c b/drivers/soc/fsl/rcpm.c
+new file mode 100644
+index 00000000..a6a31c87
 --- /dev/null
 +++ b/drivers/soc/fsl/rcpm.c
 @@ -0,0 +1,154 @@
@@ -396,6 +429,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +}
 +
 +subsys_initcall(layerscape_rcpm_init);
+diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
+index 58a7b350..6c69e3bd 100644
 --- a/drivers/staging/Kconfig
 +++ b/drivers/staging/Kconfig
 @@ -94,6 +94,8 @@ source "drivers/staging/fbtft/Kconfig"
@@ -407,16 +442,20 @@ Signed-off-by: Yangbo Lu <[email protected]>
  source "drivers/staging/wilc1000/Kconfig"
  
  source "drivers/staging/most/Kconfig"
-@@ -106,4 +108,6 @@ source "drivers/staging/greybus/Kconfig"
+@@ -106,4 +108,8 @@ source "drivers/staging/greybus/Kconfig"
  
  source "drivers/staging/vc04_services/Kconfig"
  
 +source "drivers/staging/fsl_qbman/Kconfig"
++
++source "drivers/staging/fsl_ppfe/Kconfig"
 +
  endif # STAGING
+diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
+index 2fa9745d..ee817a5e 100644
 --- a/drivers/staging/Makefile
 +++ b/drivers/staging/Makefile
-@@ -36,9 +36,11 @@ obj-$(CONFIG_UNISYSSPAR)	+= unisys/
+@@ -36,9 +36,12 @@ obj-$(CONFIG_UNISYSSPAR)	+= unisys/
  obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD)	+= clocking-wizard/
  obj-$(CONFIG_FB_TFT)		+= fbtft/
  obj-$(CONFIG_FSL_MC_BUS)	+= fsl-mc/
@@ -428,6 +467,10 @@ Signed-off-by: Yangbo Lu <[email protected]>
  obj-$(CONFIG_GREYBUS)		+= greybus/
  obj-$(CONFIG_BCM2708_VCHIQ)	+= vc04_services/
 +obj-$(CONFIG_FSL_SDK_DPA)	+= fsl_qbman/
++obj-$(CONFIG_FSL_PPFE)		+= fsl_ppfe/
+diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig
+new file mode 100644
+index 00000000..8042d9cc
 --- /dev/null
 +++ b/drivers/staging/fsl-dpaa2/Kconfig
 @@ -0,0 +1,41 @@
@@ -472,6 +515,9 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +source "drivers/staging/fsl-dpaa2/mac/Kconfig"
 +source "drivers/staging/fsl-dpaa2/evb/Kconfig"
 +source "drivers/staging/fsl-dpaa2/ethsw/Kconfig"
+diff --git a/drivers/staging/fsl-dpaa2/Makefile b/drivers/staging/fsl-dpaa2/Makefile
+new file mode 100644
+index 00000000..cbaa8c20
 --- /dev/null
 +++ b/drivers/staging/fsl-dpaa2/Makefile
 @@ -0,0 +1,9 @@
@@ -484,3 +530,6 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +obj-$(CONFIG_FSL_DPAA2_EVB)	+= evb/
 +obj-$(CONFIG_FSL_DPAA2_ETHSW)	+= ethsw/
 +obj-$(CONFIG_PTP_1588_CLOCK_DPAA2) += rtc/
+-- 
+2.14.1
+

+ 87 - 27
target/linux/layerscape/patches-4.9/301-arch-support-layerscape.patch

@@ -1,4 +1,4 @@
-From 7edaf7ed8fbd5fb50950a4fc8067a9c14557d010 Mon Sep 17 00:00:00 2001
+From 739029f49bd9181b821298f9d27b29ce2d292967 Mon Sep 17 00:00:00 2001
 From: Yangbo Lu <[email protected]>
 Date: Mon, 25 Sep 2017 10:03:52 +0800
 Subject: [PATCH] arch: support layerscape
@@ -34,9 +34,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  arch/arm64/include/asm/pgtable-prot.h |  1 +
  arch/arm64/include/asm/pgtable.h      |  5 +++
  arch/arm64/kernel/pci.c               | 62 +++++++++++++++++++++++++++++++++++
- arch/arm64/mm/dma-mapping.c           |  6 ++++
- 15 files changed, 197 insertions(+), 3 deletions(-)
+ arch/arm64/mm/dma-mapping.c           | 23 ++++++++++---
+ 15 files changed, 209 insertions(+), 8 deletions(-)
 
+diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
+index b1ce037e..1445b0ca 100644
 --- a/arch/arm/include/asm/delay.h
 +++ b/arch/arm/include/asm/delay.h
 @@ -57,6 +57,22 @@ extern void __bad_udelay(void);
@@ -62,9 +64,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  /* Loop-based definitions for assembly code. */
  extern void __loop_delay(unsigned long loops);
  extern void __loop_udelay(unsigned long usecs);
+diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
+index 021692c6..172a4f2e 100644
 --- a/arch/arm/include/asm/io.h
 +++ b/arch/arm/include/asm/io.h
-@@ -129,6 +129,7 @@ static inline u32 __raw_readl(const vola
+@@ -129,6 +129,7 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
  #define MT_DEVICE_NONSHARED	1
  #define MT_DEVICE_CACHED	2
  #define MT_DEVICE_WC		3
@@ -72,7 +76,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  /*
   * types 4 onwards can be found in asm/mach/map.h and are undefined
   * for ioremap
-@@ -220,6 +221,34 @@ extern int pci_ioremap_io(unsigned int o
+@@ -220,6 +221,34 @@ extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
  #endif
  #endif
  
@@ -107,7 +111,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  /*
   *  IO port access primitives
   *  -------------------------
-@@ -408,6 +437,8 @@ void __iomem *ioremap_wc(resource_size_t
+@@ -408,6 +437,8 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
  #define ioremap_wc ioremap_wc
  #define ioremap_wt ioremap_wc
  
@@ -116,6 +120,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  void iounmap(volatile void __iomem *iomem_cookie);
  #define iounmap iounmap
  
+diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
+index 9b7c328f..27f3df7d 100644
 --- a/arch/arm/include/asm/mach/map.h
 +++ b/arch/arm/include/asm/mach/map.h
 @@ -21,9 +21,9 @@ struct map_desc {
@@ -130,6 +136,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	MT_CACHECLEAN,
  	MT_MINICLEAN,
  	MT_LOW_VECTORS,
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index a8d656d9..4ab57b37 100644
 --- a/arch/arm/include/asm/pgtable.h
 +++ b/arch/arm/include/asm/pgtable.h
 @@ -118,6 +118,13 @@ extern pgprot_t		pgprot_s2_device;
@@ -146,6 +154,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  #define pgprot_writecombine(prot) \
  	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
  
+diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
+index 2f0e0773..d2f4869a 100644
 --- a/arch/arm/kernel/bios32.c
 +++ b/arch/arm/kernel/bios32.c
 @@ -11,6 +11,8 @@
@@ -157,10 +167,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  #include <asm/mach-types.h>
  #include <asm/mach/map.h>
-@@ -64,6 +66,47 @@ void pcibios_report_status(u_int status_
+@@ -63,6 +65,47 @@ void pcibios_report_status(u_int status_mask, int warn)
+ 		pcibios_bus_report_status(bus, status_mask, warn);
  }
  
- /*
++/*
 + * Check device tree if the service interrupts are there
 + */
 +int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
@@ -201,13 +212,14 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +	return count;
 +}
 +
-+/*
+ /*
   * We don't use this to fix the device, but initialisation of it.
   * It's not the correct use for this, but it works.
-  * Note that the arbiter/ISA bridge appears to be buggy, specifically in
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index ab771000..9b5f4465 100644
 --- a/arch/arm/mm/dma-mapping.c
 +++ b/arch/arm/mm/dma-mapping.c
-@@ -2392,6 +2392,7 @@ void arch_setup_dma_ops(struct device *d
+@@ -2392,6 +2392,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
  
  	set_dma_ops(dev, dma_ops);
  }
@@ -215,9 +227,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  void arch_teardown_dma_ops(struct device *dev)
  {
+diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
+index ff0eed23..2f2f4269 100644
 --- a/arch/arm/mm/ioremap.c
 +++ b/arch/arm/mm/ioremap.c
-@@ -398,6 +398,13 @@ void __iomem *ioremap_wc(resource_size_t
+@@ -398,6 +398,13 @@ void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
  }
  EXPORT_SYMBOL(ioremap_wc);
  
@@ -231,9 +245,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  /*
   * Remap an arbitrary physical address space into the kernel virtual
   * address space as memory. Needed when the kernel wants to execute
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index f7c74135..4a2fb704 100644
 --- a/arch/arm/mm/mmu.c
 +++ b/arch/arm/mm/mmu.c
-@@ -313,6 +313,13 @@ static struct mem_type mem_types[] __ro_
+@@ -313,6 +313,13 @@ static struct mem_type mem_types[] __ro_after_init = {
  		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
  		.domain    = DOMAIN_KERNEL,
  	},
@@ -247,7 +263,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	[MT_ROM] = {
  		.prot_sect = PMD_TYPE_SECT,
  		.domain    = DOMAIN_KERNEL,
-@@ -644,6 +651,7 @@ static void __init build_mem_type_table(
+@@ -644,6 +651,7 @@ static void __init build_mem_type_table(void)
  	}
  	kern_pgprot |= PTE_EXT_AF;
  	vecs_pgprot |= PTE_EXT_AF;
@@ -255,7 +271,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	/*
  	 * Set PXN for user mappings
-@@ -672,6 +680,7 @@ static void __init build_mem_type_table(
+@@ -672,6 +680,7 @@ static void __init build_mem_type_table(void)
  	mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
  	mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
  	mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
@@ -263,6 +279,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
  	mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
  	mem_types[MT_ROM].prot_sect |= cp->pmd;
+diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
+index 5082b30b..bde44993 100644
 --- a/arch/arm64/include/asm/cache.h
 +++ b/arch/arm64/include/asm/cache.h
 @@ -18,7 +18,7 @@
@@ -274,9 +292,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
  
  /*
+diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
+index 0bba427b..36c1fbf3 100644
 --- a/arch/arm64/include/asm/io.h
 +++ b/arch/arm64/include/asm/io.h
-@@ -171,6 +171,8 @@ extern void __iomem *ioremap_cache(phys_
+@@ -171,6 +171,8 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
  #define ioremap_nocache(addr, size)	__ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
  #define ioremap_wc(addr, size)		__ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
  #define ioremap_wt(addr, size)		__ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
@@ -285,9 +305,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  #define iounmap				__iounmap
  
  /*
+diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
+index b9a7ba9c..8a189159 100644
 --- a/arch/arm64/include/asm/pci.h
 +++ b/arch/arm64/include/asm/pci.h
-@@ -31,6 +31,10 @@ static inline int pci_get_legacy_ide_irq
+@@ -31,6 +31,10 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
  	return -ENODEV;
  }
  
@@ -298,6 +320,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  static inline int pci_proc_domain(struct pci_bus *bus)
  {
  	return 1;
+diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
+index 2142c772..cdf8b25d 100644
 --- a/arch/arm64/include/asm/pgtable-prot.h
 +++ b/arch/arm64/include/asm/pgtable-prot.h
 @@ -42,6 +42,7 @@
@@ -308,9 +332,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  #define PROT_SECT_DEVICE_nGnRE	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
  #define PROT_SECT_NORMAL	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 61e21401..b8c876fb 100644
 --- a/arch/arm64/include/asm/pgtable.h
 +++ b/arch/arm64/include/asm/pgtable.h
-@@ -356,6 +356,11 @@ static inline int pmd_protnone(pmd_t pmd
+@@ -356,6 +356,11 @@ static inline int pmd_protnone(pmd_t pmd)
  	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
  #define pgprot_writecombine(prot) \
  	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
@@ -322,6 +348,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  #define pgprot_device(prot) \
  	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
  #define __HAVE_PHYS_MEM_ACCESS_PROT
+diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
+index 409abc45..0568ec3a 100644
 --- a/arch/arm64/kernel/pci.c
 +++ b/arch/arm64/kernel/pci.c
 @@ -17,6 +17,8 @@
@@ -333,11 +361,10 @@ Signed-off-by: Yangbo Lu <[email protected]>
  #include <linux/pci.h>
  #include <linux/pci-acpi.h>
  #include <linux/pci-ecam.h>
-@@ -53,6 +55,66 @@ int pcibios_alloc_irq(struct pci_dev *de
- 
+@@ -54,6 +56,66 @@ int pcibios_alloc_irq(struct pci_dev *dev)
  	return 0;
  }
-+
+ 
 +/*
 + * Check device tree if the service interrupts are there
 + */
@@ -397,9 +424,12 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +
 +	return 0;
 +}
- 
++
  /*
   * raw_pci_read/write - Platform-specific PCI config space access.
+  */
+diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
+index b5bf46ce..5a010bcc 100644
 --- a/arch/arm64/mm/dma-mapping.c
 +++ b/arch/arm64/mm/dma-mapping.c
 @@ -30,6 +30,7 @@
@@ -410,19 +440,49 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  static int swiotlb __ro_after_init;
  
-@@ -918,6 +919,10 @@ static int __init __iommu_dma_init(void)
+@@ -836,14 +837,21 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
+ 	 * then the IOMMU core will have already configured a group for this
+ 	 * device, and allocated the default domain for that group.
+ 	 */
+-	if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) {
+-		pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
+-			dev_name(dev));
+-		return false;
++	if (!domain)
++		goto out_err;
++
++	if (domain->type == IOMMU_DOMAIN_DMA) {
++		if (iommu_dma_init_domain(domain, dma_base, size, dev))
++			goto out_err;
++
++		dev->archdata.dma_ops = &iommu_dma_ops;
+ 	}
+ 
+-	dev->archdata.dma_ops = &iommu_dma_ops;
+ 	return true;
++out_err:
++	pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
++		 dev_name(dev));
++	return false;
+ }
+ 
+ static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
+@@ -917,6 +925,10 @@ static int __init __iommu_dma_init(void)
+ #ifdef CONFIG_PCI
  	if (!ret)
  		ret = register_iommu_dma_ops_notifier(&pci_bus_type);
- #endif
++#endif
 +#ifdef CONFIG_FSL_MC_BUS
 +	if (!ret)
 +		ret = register_iommu_dma_ops_notifier(&fsl_mc_bus_type);
-+#endif
+ #endif
  	return ret;
  }
- arch_initcall(__iommu_dma_init);
-@@ -971,3 +976,4 @@ void arch_setup_dma_ops(struct device *d
+@@ -971,3 +983,4 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
  	dev->archdata.dma_coherent = coherent;
  	__iommu_setup_dma_ops(dev, dma_base, size, iommu);
  }
 +EXPORT_SYMBOL(arch_setup_dma_ops);
+-- 
+2.14.1
+

文件差异内容过多而无法显示
+ 434 - 65
target/linux/layerscape/patches-4.9/302-dts-support-layercape.patch


+ 66 - 97
target/linux/layerscape/patches-4.9/401-mtd-spi-nor-support-layerscape.patch

@@ -1,4 +1,4 @@
-From 120fa458ffe2250ea58578ccfc85e674005463dc Mon Sep 17 00:00:00 2001
+From a3757157751a8a5302ee5e11faf828dc5db02018 Mon Sep 17 00:00:00 2001
 From: Yangbo Lu <[email protected]>
 Date: Mon, 25 Sep 2017 10:53:50 +0800
 Subject: [PATCH] mtd: spi-nor: support layerscape
@@ -18,14 +18,16 @@ Signed-off-by: Ash Benz <[email protected]>
 Signed-off-by: Yangbo Lu <[email protected]>
 ---
  drivers/mtd/mtdchar.c             |   2 +-
- drivers/mtd/spi-nor/fsl-quadspi.c | 356 +++++++++++++++++++++++++++++++-------
- drivers/mtd/spi-nor/spi-nor.c     | 136 +++++++++++++--
+ drivers/mtd/spi-nor/fsl-quadspi.c | 327 +++++++++++++++++++++++++++++++-------
+ drivers/mtd/spi-nor/spi-nor.c     | 136 ++++++++++++++--
  include/linux/mtd/spi-nor.h       |  14 +-
- 4 files changed, 432 insertions(+), 76 deletions(-)
+ 4 files changed, 409 insertions(+), 70 deletions(-)
 
+diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
+index 2a47a3f0..4f21401d 100644
 --- a/drivers/mtd/mtdchar.c
 +++ b/drivers/mtd/mtdchar.c
-@@ -451,7 +451,7 @@ static int mtdchar_readoob(struct file *
+@@ -451,7 +451,7 @@ static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
  	 * data. For our userspace tools it is important to dump areas
  	 * with ECC errors!
  	 * For kernel internal usage it also might return -EUCLEAN
@@ -34,6 +36,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	 * been corrected by the ECC algorithm.
  	 *
  	 * Note: currently the standard NAND function, nand_read_oob_std,
+diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
+index 5c82e4ef..33ecc27a 100644
 --- a/drivers/mtd/spi-nor/fsl-quadspi.c
 +++ b/drivers/mtd/spi-nor/fsl-quadspi.c
 @@ -41,6 +41,8 @@
@@ -86,7 +90,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	.devtype = FSL_QUADSPI_VYBRID,
  	.rxfifo = 128,
  	.txfifo = 64,
-@@ -232,7 +241,7 @@ static struct fsl_qspi_devtype_data vybr
+@@ -232,7 +241,7 @@ static struct fsl_qspi_devtype_data vybrid_data = {
  	.driver_data = QUADSPI_QUIRK_SWAP_ENDIAN,
  };
  
@@ -95,7 +99,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	.devtype = FSL_QUADSPI_IMX6SX,
  	.rxfifo = 128,
  	.txfifo = 512,
-@@ -241,7 +250,7 @@ static struct fsl_qspi_devtype_data imx6
+@@ -241,7 +250,7 @@ static struct fsl_qspi_devtype_data imx6sx_data = {
  		       | QUADSPI_QUIRK_TKT245618,
  };
  
@@ -104,7 +108,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	.devtype = FSL_QUADSPI_IMX7D,
  	.rxfifo = 512,
  	.txfifo = 512,
-@@ -250,7 +259,7 @@ static struct fsl_qspi_devtype_data imx7
+@@ -250,7 +259,7 @@ static struct fsl_qspi_devtype_data imx7d_data = {
  		       | QUADSPI_QUIRK_4X_INT_CLK,
  };
  
@@ -113,7 +117,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	.devtype = FSL_QUADSPI_IMX6UL,
  	.rxfifo = 128,
  	.txfifo = 512,
-@@ -267,6 +276,14 @@ static struct fsl_qspi_devtype_data ls10
+@@ -267,6 +276,14 @@ static struct fsl_qspi_devtype_data ls1021a_data = {
  	.driver_data = 0,
  };
  
@@ -136,7 +140,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	unsigned int chip_base_addr; /* We may support two chips. */
  	bool has_second_chip;
  	bool big_endian;
-@@ -309,6 +327,23 @@ static inline int needs_wakeup_wait_mode
+@@ -309,6 +327,23 @@ static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
  	return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT245618;
  }
  
@@ -160,7 +164,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  /*
   * R/W functions for big- or little-endian registers:
   * The qSPI controller's endian is independent of the CPU core's endian.
-@@ -331,6 +366,31 @@ static u32 qspi_readl(struct fsl_qspi *q
+@@ -331,6 +366,31 @@ static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
  		return ioread32(addr);
  }
  
@@ -192,7 +196,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  /*
   * An IC bug makes us to re-arrange the 32-bit data.
   * The following chips, such as IMX6SLX, have fixed this bug.
-@@ -373,8 +433,15 @@ static void fsl_qspi_init_lut(struct fsl
+@@ -373,8 +433,15 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
  	void __iomem *base = q->iobase;
  	int rxfifo = q->devtype_data->rxfifo;
  	u32 lut_base;
@@ -209,7 +213,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	fsl_qspi_unlock_lut(q);
  
-@@ -382,25 +449,51 @@ static void fsl_qspi_init_lut(struct fsl
+@@ -382,25 +449,51 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
  	for (i = 0; i < QUADSPI_LUT_NUM; i++)
  		qspi_writel(q, 0, base + QUADSPI_LUT_BASE + i * 4);
  
@@ -226,10 +230,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
 -		addrlen = ADDR32BIT;
 -		dummy = 8;
 -	}
+-
+-	qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
 +	/* Read */
 +	lut_base = SEQID_READ * 4;
- 
--	qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
++
 +	if (nor->flash_read == SPI_NOR_FAST) {
 +		qspi_writel(q, LUT0(CMD, PAD1, read_op) |
 +			    LUT1(ADDR, PAD1, addrlen),
@@ -276,7 +281,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	/* Write enable */
  	lut_base = SEQID_WREN * 4;
  	qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WREN),
-@@ -409,16 +502,8 @@ static void fsl_qspi_init_lut(struct fsl
+@@ -409,16 +502,8 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
  	/* Page Program */
  	lut_base = SEQID_PP * 4;
  
@@ -295,7 +300,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  			base + QUADSPI_LUT(lut_base));
  	qspi_writel(q, LUT0(FSL_WRITE, PAD1, 0),
  			base + QUADSPI_LUT(lut_base + 1));
-@@ -432,10 +517,8 @@ static void fsl_qspi_init_lut(struct fsl
+@@ -432,10 +517,8 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
  	/* Erase a sector */
  	lut_base = SEQID_SE * 4;
  
@@ -308,7 +313,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  			base + QUADSPI_LUT(lut_base));
  
  	/* Erase the whole chip */
-@@ -476,6 +559,44 @@ static void fsl_qspi_init_lut(struct fsl
+@@ -476,6 +559,44 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
  	qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_BRWR),
  			base + QUADSPI_LUT(lut_base));
  
@@ -353,7 +358,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	fsl_qspi_lock_lut(q);
  }
  
-@@ -483,8 +604,24 @@ static void fsl_qspi_init_lut(struct fsl
+@@ -483,8 +604,24 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
  static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
  {
  	switch (cmd) {
@@ -379,7 +384,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	case SPINOR_OP_WREN:
  		return SEQID_WREN;
  	case SPINOR_OP_WRDI:
-@@ -496,6 +633,7 @@ static int fsl_qspi_get_seqid(struct fsl
+@@ -496,6 +633,7 @@ static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
  	case SPINOR_OP_CHIP_ERASE:
  		return SEQID_CHIP_ERASE;
  	case SPINOR_OP_PP:
@@ -387,7 +392,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		return SEQID_PP;
  	case SPINOR_OP_RDID:
  		return SEQID_RDID;
-@@ -507,6 +645,8 @@ static int fsl_qspi_get_seqid(struct fsl
+@@ -507,6 +645,8 @@ static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
  		return SEQID_EN4B;
  	case SPINOR_OP_BRWR:
  		return SEQID_BRWR;
@@ -396,7 +401,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	default:
  		if (cmd == q->nor[0].erase_opcode)
  			return SEQID_SE;
-@@ -531,8 +671,11 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 c
+@@ -531,8 +671,11 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len)
  	/* save the reg */
  	reg = qspi_readl(q, base + QUADSPI_MCR);
  
@@ -410,7 +415,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS,
  			base + QUADSPI_RBCT);
  	qspi_writel(q, reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR);
-@@ -582,10 +725,10 @@ static void fsl_qspi_read_data(struct fs
+@@ -582,10 +725,10 @@ static void fsl_qspi_read_data(struct fsl_qspi *q, int len, u8 *rxbuf)
  				q->chip_base_addr, tmp);
  
  		if (len >= 4) {
@@ -423,7 +428,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  			break;
  		}
  
-@@ -619,11 +762,12 @@ static inline void fsl_qspi_invalid(stru
+@@ -619,11 +762,12 @@ static inline void fsl_qspi_invalid(struct fsl_qspi *q)
  }
  
  static ssize_t fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
@@ -437,7 +442,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len : %d\n",
  		q->chip_base_addr, to, count);
-@@ -633,10 +777,13 @@ static ssize_t fsl_qspi_nor_write(struct
+@@ -633,10 +777,13 @@ static ssize_t fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
  	qspi_writel(q, tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR);
  
  	/* fill the TX data to the FIFO */
@@ -453,7 +458,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	}
  
  	/* fill the TXFIFO upto 16 bytes for i.MX7d */
-@@ -657,11 +804,43 @@ static void fsl_qspi_set_map_addr(struct
+@@ -657,11 +804,43 @@ static void fsl_qspi_set_map_addr(struct fsl_qspi *q)
  {
  	int nor_size = q->nor_size;
  	void __iomem *base = q->iobase;
@@ -501,50 +506,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  }
  
  /*
-@@ -681,19 +860,36 @@ static void fsl_qspi_init_abh_read(struc
- {
- 	void __iomem *base = q->iobase;
- 	int seqid;
-+	const struct fsl_qspi_devtype_data *devtype_data = q->devtype_data;
- 
- 	/* AHB configuration for access buffer 0/1/2 .*/
- 	qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR);
- 	qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR);
- 	qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR);
-+
- 	/*
--	 * Set ADATSZ with the maximum AHB buffer size to improve the
--	 * read performance.
-+	 * Errata: A-009282: QuadSPI data prefetch may result in incorrect data
-+	 * Workaround: Keep the read data size to 64 bits (8 bytes).
-+	 * This disables the prefetch on the AHB buffer and
-+	 * prevents this issue from occurring.
- 	 */
--	qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
--			((q->devtype_data->ahb_buf_size / 8)
--			<< QUADSPI_BUF3CR_ADATSZ_SHIFT),
--			base + QUADSPI_BUF3CR);
-+	if (devtype_data->devtype == FSL_QUADSPI_LS2080A ||
-+	    devtype_data->devtype == FSL_QUADSPI_LS1021A) {
-+
-+		qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
-+				(1 << QUADSPI_BUF3CR_ADATSZ_SHIFT),
-+				base + QUADSPI_BUF3CR);
-+
-+	} else {
-+		/*
-+		 * Set ADATSZ with the maximum AHB buffer size to improve the
-+		 * read performance.
-+		*/
-+		qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
-+				((q->devtype_data->ahb_buf_size / 8)
-+				<< QUADSPI_BUF3CR_ADATSZ_SHIFT),
-+				base + QUADSPI_BUF3CR);
-+	}
- 
- 	/* We only use the buffer3 */
- 	qspi_writel(q, 0, base + QUADSPI_BUF0IND);
-@@ -704,6 +900,11 @@ static void fsl_qspi_init_abh_read(struc
+@@ -704,6 +883,11 @@ static void fsl_qspi_init_abh_read(struct fsl_qspi *q)
  	seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode);
  	qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT,
  		q->iobase + QUADSPI_BFGENCR);
@@ -556,7 +518,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  }
  
  /* This function was used to prepare and enable QSPI clock */
-@@ -822,6 +1023,7 @@ static const struct of_device_id fsl_qsp
+@@ -822,6 +1006,7 @@ static const struct of_device_id fsl_qspi_dt_ids[] = {
  	{ .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, },
  	{ .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, },
  	{ .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, },
@@ -564,21 +526,21 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	{ /* sentinel */ }
  };
  MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
-@@ -835,8 +1037,12 @@ static int fsl_qspi_read_reg(struct spi_
+@@ -835,8 +1020,12 @@ static int fsl_qspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
  {
  	int ret;
  	struct fsl_qspi *q = nor->priv;
 +	u32 to = 0;
-+
-+	if (opcode == SPINOR_OP_SPANSION_RDAR)
-+		u8tou32(&to, nor->cmd_buf, 4);
  
 -	ret = fsl_qspi_runcmd(q, opcode, 0, len);
++	if (opcode == SPINOR_OP_SPANSION_RDAR)
++		u8tou32(&to, nor->cmd_buf, 4);
++
 +	ret = fsl_qspi_runcmd(q, opcode, to, len);
  	if (ret)
  		return ret;
  
-@@ -848,9 +1054,13 @@ static int fsl_qspi_write_reg(struct spi
+@@ -848,9 +1037,13 @@ static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
  {
  	struct fsl_qspi *q = nor->priv;
  	int ret;
@@ -593,7 +555,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		if (ret)
  			return ret;
  
-@@ -859,7 +1069,7 @@ static int fsl_qspi_write_reg(struct spi
+@@ -859,7 +1052,7 @@ static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
  
  	} else if (len > 0) {
  		ret = fsl_qspi_nor_write(q, nor, opcode, 0,
@@ -602,7 +564,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		if (ret > 0)
  			return 0;
  	} else {
-@@ -875,7 +1085,7 @@ static ssize_t fsl_qspi_write(struct spi
+@@ -875,7 +1068,7 @@ static ssize_t fsl_qspi_write(struct spi_nor *nor, loff_t to,
  {
  	struct fsl_qspi *q = nor->priv;
  	ssize_t ret = fsl_qspi_nor_write(q, nor, nor->program_opcode, to,
@@ -611,7 +573,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	/* invalid the data in the AHB buffer. */
  	fsl_qspi_invalid(q);
-@@ -922,7 +1132,7 @@ static ssize_t fsl_qspi_read(struct spi_
+@@ -922,7 +1115,7 @@ static ssize_t fsl_qspi_read(struct spi_nor *nor, loff_t from,
  		len);
  
  	/* Read out the data directly from the AHB buffer.*/
@@ -620,7 +582,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		len);
  
  	return len;
-@@ -980,6 +1190,8 @@ static int fsl_qspi_probe(struct platfor
+@@ -980,6 +1173,8 @@ static int fsl_qspi_probe(struct platform_device *pdev)
  	struct spi_nor *nor;
  	struct mtd_info *mtd;
  	int ret, i = 0;
@@ -629,7 +591,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL);
  	if (!q)
-@@ -1027,6 +1239,12 @@ static int fsl_qspi_probe(struct platfor
+@@ -1027,6 +1222,12 @@ static int fsl_qspi_probe(struct platform_device *pdev)
  		goto clk_failed;
  	}
  
@@ -642,7 +604,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	/* find the irq */
  	ret = platform_get_irq(pdev, 0);
  	if (ret < 0) {
-@@ -1050,6 +1268,7 @@ static int fsl_qspi_probe(struct platfor
+@@ -1050,6 +1251,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
  
  	mutex_init(&q->lock);
  
@@ -650,7 +612,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	/* iterate the subnodes. */
  	for_each_available_child_of_node(dev->of_node, np) {
  		/* skip the holes */
-@@ -1076,18 +1295,25 @@ static int fsl_qspi_probe(struct platfor
+@@ -1076,18 +1278,25 @@ static int fsl_qspi_probe(struct platform_device *pdev)
  		ret = of_property_read_u32(np, "spi-max-frequency",
  				&q->clk_rate);
  		if (ret < 0)
@@ -680,7 +642,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  		/* Set the correct NOR size now. */
  		if (q->nor_size == 0) {
-@@ -1110,8 +1336,12 @@ static int fsl_qspi_probe(struct platfor
+@@ -1110,8 +1319,12 @@ static int fsl_qspi_probe(struct platform_device *pdev)
  			nor->page_size = q->devtype_data->txfifo;
  
  		i++;
@@ -693,6 +655,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	/* finish the rest init. */
  	ret = fsl_qspi_nor_setup_last(q);
  	if (ret)
+diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
+index 793d321d..190e0e45 100644
 --- a/drivers/mtd/spi-nor/spi-nor.c
 +++ b/drivers/mtd/spi-nor/spi-nor.c
 @@ -40,6 +40,13 @@
@@ -740,7 +704,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
   */
  static int read_cr(struct spi_nor *nor)
  {
-@@ -160,6 +170,8 @@ static inline int spi_nor_read_dummy_cyc
+@@ -160,6 +170,8 @@ static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
  	case SPI_NOR_DUAL:
  	case SPI_NOR_QUAD:
  		return 8;
@@ -749,7 +713,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	case SPI_NOR_NORMAL:
  		return 0;
  	}
-@@ -961,6 +973,8 @@ static const struct flash_info spi_nor_i
+@@ -961,6 +973,8 @@ static const struct flash_info spi_nor_ids[] = {
  
  	/* ESMT */
  	{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
@@ -758,7 +722,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	/* Everspin */
  	{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
-@@ -1014,12 +1028,15 @@ static const struct flash_info spi_nor_i
+@@ -1014,12 +1028,15 @@ static const struct flash_info spi_nor_ids[] = {
  	{ "mx25l3205d",  INFO(0xc22016, 0, 64 * 1024,  64, SECT_4K) },
  	{ "mx25l3255e",  INFO(0xc29e16, 0, 64 * 1024,  64, SECT_4K) },
  	{ "mx25l6405d",  INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
@@ -775,7 +739,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
  	{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
  	{ "mx66l1g55g",  INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
-@@ -1033,10 +1050,11 @@ static const struct flash_info spi_nor_i
+@@ -1033,10 +1050,11 @@ static const struct flash_info spi_nor_ids[] = {
  	{ "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024,  256, SECT_4K | SPI_NOR_QUAD_READ) },
  	{ "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024,  256, SECT_4K | SPI_NOR_QUAD_READ) },
  	{ "n25q256a",    INFO(0x20ba19, 0, 64 * 1024,  512, SECT_4K | SPI_NOR_QUAD_READ) },
@@ -789,7 +753,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	/* PMC */
  	{ "pm25lv512",   INFO(0,        0, 32 * 1024,    2, SECT_4K_PMC) },
-@@ -1054,8 +1072,11 @@ static const struct flash_info spi_nor_i
+@@ -1054,8 +1072,11 @@ static const struct flash_info spi_nor_ids[] = {
  	{ "s70fl01gs",  INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
  	{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024,  64, 0) },
  	{ "s25sl12801", INFO(0x012018, 0x0301,  64 * 1024, 256, 0) },
@@ -802,7 +766,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	{ "s25fl129p1", INFO(0x012018, 0x4d01,  64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
  	{ "s25sl004a",  INFO(0x010212,      0,  64 * 1024,   8, 0) },
  	{ "s25sl008a",  INFO(0x010213,      0,  64 * 1024,  16, 0) },
-@@ -1130,6 +1151,9 @@ static const struct flash_info spi_nor_i
+@@ -1130,6 +1151,9 @@ static const struct flash_info spi_nor_ids[] = {
  	{ "w25x80", INFO(0xef3014, 0, 64 * 1024,  16, SECT_4K) },
  	{ "w25x16", INFO(0xef3015, 0, 64 * 1024,  32, SECT_4K) },
  	{ "w25x32", INFO(0xef3016, 0, 64 * 1024,  64, SECT_4K) },
@@ -812,7 +776,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	{ "w25q32", INFO(0xef4016, 0, 64 * 1024,  64, SECT_4K) },
  	{
  		"w25q32dw", INFO(0xef6016, 0, 64 * 1024,  64,
-@@ -1192,6 +1216,53 @@ static const struct flash_info *spi_nor_
+@@ -1192,6 +1216,53 @@ static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
  		id[0], id[1], id[2]);
  	return ERR_PTR(-ENODEV);
  }
@@ -866,7 +830,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
  			size_t *retlen, u_char *buf)
-@@ -1411,7 +1482,7 @@ static int macronix_quad_enable(struct s
+@@ -1411,7 +1482,7 @@ static int macronix_quad_enable(struct spi_nor *nor)
   * Write status Register and configuration register with 2 bytes
   * The first byte will be written to the status register, while the
   * second byte will be written to the configuration register.
@@ -875,7 +839,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
   */
  static int write_sr_cr(struct spi_nor *nor, u16 val)
  {
-@@ -1459,6 +1530,24 @@ static int spansion_quad_enable(struct s
+@@ -1459,6 +1530,24 @@ static int spansion_quad_enable(struct spi_nor *nor)
  	return 0;
  }
  
@@ -900,7 +864,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
  {
  	int status;
-@@ -1604,9 +1693,25 @@ int spi_nor_scan(struct spi_nor *nor, co
+@@ -1604,9 +1693,25 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
  		write_sr(nor, 0);
  		spi_nor_wait_till_ready(nor);
  	}
@@ -926,7 +890,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	mtd->priv = nor;
  	mtd->type = MTD_NORFLASH;
  	mtd->writesize = 1;
-@@ -1639,6 +1744,8 @@ int spi_nor_scan(struct spi_nor *nor, co
+@@ -1639,6 +1744,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
  		nor->flags |= SNOR_F_USE_FSR;
  	if (info->flags & SPI_NOR_HAS_TB)
  		nor->flags |= SNOR_F_HAS_SR_TB;
@@ -935,7 +899,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
  	/* prefer "small sector" erase if possible */
-@@ -1676,9 +1783,15 @@ int spi_nor_scan(struct spi_nor *nor, co
+@@ -1676,9 +1783,15 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
  	/* Some devices cannot do fast-read, no matter what DT tells us */
  	if (info->flags & SPI_NOR_NO_FR)
  		nor->flash_read = SPI_NOR_NORMAL;
@@ -954,7 +918,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		ret = set_quad_mode(nor, info);
  		if (ret) {
  			dev_err(dev, "quad mode not supported\n");
-@@ -1691,6 +1804,9 @@ int spi_nor_scan(struct spi_nor *nor, co
+@@ -1691,6 +1804,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
  
  	/* Default commands */
  	switch (nor->flash_read) {
@@ -964,6 +928,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	case SPI_NOR_QUAD:
  		nor->read_opcode = SPINOR_OP_READ_1_1_4;
  		break;
+diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
+index f2a71803..5003ff64 100644
 --- a/include/linux/mtd/spi-nor.h
 +++ b/include/linux/mtd/spi-nor.h
 @@ -31,10 +31,10 @@
@@ -1028,3 +994,6 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
  	void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
+-- 
+2.14.1
+

文件差异内容过多而无法显示
+ 350 - 48
target/linux/layerscape/patches-4.9/701-sdk_dpaa-support-layerscape.patch


+ 60 - 32
target/linux/layerscape/patches-4.9/702-pci-support-layerscape.patch

@@ -1,4 +1,4 @@
-From c4813da334b0c31e9c55eea015f1e898e84ff45b Mon Sep 17 00:00:00 2001
+From 9e6e0a53b29190dbd86a39304b59c3028f5b36c2 Mon Sep 17 00:00:00 2001
 From: Yangbo Lu <[email protected]>
 Date: Mon, 25 Sep 2017 11:04:10 +0800
 Subject: [PATCH] pci: support layerscape
@@ -15,21 +15,23 @@ Signed-off-by: Mingkai Hu <[email protected]>
 Signed-off-by: Christoph Hellwig <[email protected]>
 Signed-off-by: Yangbo Lu <[email protected]>
 ---
- drivers/irqchip/irq-ls-scfg-msi.c            | 256 +++++++--
+ drivers/irqchip/irq-ls-scfg-msi.c            | 257 +++++++--
  drivers/pci/host/Makefile                    |   2 +-
  drivers/pci/host/pci-layerscape-ep-debugfs.c | 758 +++++++++++++++++++++++++++
  drivers/pci/host/pci-layerscape-ep.c         | 309 +++++++++++
  drivers/pci/host/pci-layerscape-ep.h         | 115 ++++
- drivers/pci/host/pci-layerscape.c            |  37 +-
+ drivers/pci/host/pci-layerscape.c            |  38 +-
  drivers/pci/host/pcie-designware.c           |   6 +
  drivers/pci/host/pcie-designware.h           |   1 +
  drivers/pci/pcie/portdrv_core.c              | 181 +++----
  include/linux/pci.h                          |   1 +
- 10 files changed, 1518 insertions(+), 148 deletions(-)
+ 10 files changed, 1520 insertions(+), 148 deletions(-)
  create mode 100644 drivers/pci/host/pci-layerscape-ep-debugfs.c
  create mode 100644 drivers/pci/host/pci-layerscape-ep.c
  create mode 100644 drivers/pci/host/pci-layerscape-ep.h
 
+diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
+index 02cca74c..57e3d900 100644
 --- a/drivers/irqchip/irq-ls-scfg-msi.c
 +++ b/drivers/irqchip/irq-ls-scfg-msi.c
 @@ -17,13 +17,32 @@
@@ -82,7 +84,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  };
  
  static struct irq_chip ls_scfg_msi_irq_chip = {
-@@ -49,19 +71,56 @@ static struct msi_domain_info ls_scfg_ms
+@@ -49,19 +71,56 @@ static struct msi_domain_info ls_scfg_msi_domain_info = {
  	.chip	= &ls_scfg_msi_irq_chip,
  };
  
@@ -141,7 +143,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  }
  
  static struct irq_chip ls_scfg_msi_parent_chip = {
-@@ -81,8 +140,8 @@ static int ls_scfg_msi_domain_irq_alloc(
+@@ -81,8 +140,8 @@ static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
  	WARN_ON(nr_irqs != 1);
  
  	spin_lock(&msi_data->lock);
@@ -152,7 +154,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		__set_bit(pos, msi_data->used);
  	else
  		err = -ENOSPC;
-@@ -106,7 +165,7 @@ static void ls_scfg_msi_domain_irq_free(
+@@ -106,7 +165,7 @@ static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
  	int pos;
  
  	pos = d->hwirq;
@@ -161,7 +163,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
  		return;
  	}
-@@ -123,15 +182,22 @@ static const struct irq_domain_ops ls_sc
+@@ -123,15 +182,22 @@ static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
  
  static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
  {
@@ -189,7 +191,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		if (virq)
  			generic_handle_irq(virq);
  	}
-@@ -143,7 +209,7 @@ static int ls_scfg_msi_domains_init(stru
+@@ -143,7 +209,7 @@ static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
  {
  	/* Initialize MSI domain parent */
  	msi_data->parent = irq_domain_add_linear(NULL,
@@ -198,7 +200,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  						 &ls_scfg_msi_domain_ops,
  						 msi_data);
  	if (!msi_data->parent) {
-@@ -164,16 +230,117 @@ static int ls_scfg_msi_domains_init(stru
+@@ -164,16 +230,118 @@ static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
  	return 0;
  }
  
@@ -288,6 +290,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +	{ .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
 +	{ .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
 +
++	{ .compatible = "fsl,ls1012a-msi", .data = &ls1021_msi_cfg },
 +	{ .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
 +	{ .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
 +	{ .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
@@ -317,7 +320,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  	msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
  	if (IS_ERR(msi_data->regs)) {
-@@ -182,23 +349,48 @@ static int ls_scfg_msi_probe(struct plat
+@@ -182,23 +350,48 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
  	}
  	msi_data->msiir_addr = res->start;
  
@@ -376,7 +379,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	platform_set_drvdata(pdev, msi_data);
  
  	return 0;
-@@ -207,8 +399,10 @@ static int ls_scfg_msi_probe(struct plat
+@@ -207,8 +400,10 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
  static int ls_scfg_msi_remove(struct platform_device *pdev)
  {
  	struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
@@ -388,7 +391,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	irq_domain_remove(msi_data->msi_domain);
  	irq_domain_remove(msi_data->parent);
-@@ -218,12 +412,6 @@ static int ls_scfg_msi_remove(struct pla
+@@ -218,12 +413,6 @@ static int ls_scfg_msi_remove(struct platform_device *pdev)
  	return 0;
  }
  
@@ -401,9 +404,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  static struct platform_driver ls_scfg_msi_driver = {
  	.driver = {
  		.name = "ls-scfg-msi",
+diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
+index 084cb498..88e87704 100644
 --- a/drivers/pci/host/Makefile
 +++ b/drivers/pci/host/Makefile
-@@ -17,7 +17,7 @@ obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx
+@@ -17,7 +17,7 @@ obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
  obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
  obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
  obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
@@ -412,6 +417,9 @@ Signed-off-by: Yangbo Lu <[email protected]>
  obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
  obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
  obj-$(CONFIG_PCIE_IPROC_MSI) += pcie-iproc-msi.o
+diff --git a/drivers/pci/host/pci-layerscape-ep-debugfs.c b/drivers/pci/host/pci-layerscape-ep-debugfs.c
+new file mode 100644
+index 00000000..5f4870ba
 --- /dev/null
 +++ b/drivers/pci/host/pci-layerscape-ep-debugfs.c
 @@ -0,0 +1,758 @@
@@ -1173,6 +1181,9 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +MODULE_AUTHOR("Minghuan Lian <[email protected]>");
 +MODULE_DESCRIPTION("Freescale Layerscape PCIe EP controller driver");
 +MODULE_LICENSE("GPL v2");
+diff --git a/drivers/pci/host/pci-layerscape-ep.c b/drivers/pci/host/pci-layerscape-ep.c
+new file mode 100644
+index 00000000..8f1cca6e
 --- /dev/null
 +++ b/drivers/pci/host/pci-layerscape-ep.c
 @@ -0,0 +1,309 @@
@@ -1485,6 +1496,9 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +MODULE_AUTHOR("Minghuan Lian <[email protected]>");
 +MODULE_DESCRIPTION("Freescale Layerscape PCIe EP driver");
 +MODULE_LICENSE("GPL v2");
+diff --git a/drivers/pci/host/pci-layerscape-ep.h b/drivers/pci/host/pci-layerscape-ep.h
+new file mode 100644
+index 00000000..990c0ff5
 --- /dev/null
 +++ b/drivers/pci/host/pci-layerscape-ep.h
 @@ -0,0 +1,115 @@
@@ -1603,6 +1617,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +int ls_pcie_ep_dbgfs_remove(struct ls_pcie *pcie);
 +
 +#endif /* _PCIE_LAYERSCAPE_EP_H */
+diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
+index 65370799..7ce32ff0 100644
 --- a/drivers/pci/host/pci-layerscape.c
 +++ b/drivers/pci/host/pci-layerscape.c
 @@ -35,12 +35,14 @@
@@ -1622,7 +1638,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	struct pcie_host_ops *ops;
  };
  
-@@ -86,6 +88,14 @@ static void ls_pcie_drop_msg_tlp(struct
+@@ -86,6 +88,14 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
  	iowrite32(val, pcie->pp.dbi_base + PCIE_STRFMR1);
  }
  
@@ -1637,7 +1653,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  static int ls1021_pcie_link_up(struct pcie_port *pp)
  {
  	u32 state;
-@@ -134,7 +144,7 @@ static int ls_pcie_link_up(struct pcie_p
+@@ -134,7 +144,7 @@ static int ls_pcie_link_up(struct pcie_port *pp)
  	struct ls_pcie *pcie = to_ls_pcie(pp);
  	u32 state;
  
@@ -1646,7 +1662,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		 pcie->drvdata->ltssm_shift) &
  		 LTSSM_STATE_MASK;
  
-@@ -153,6 +163,9 @@ static void ls_pcie_host_init(struct pci
+@@ -153,6 +163,9 @@ static void ls_pcie_host_init(struct pcie_port *pp)
  	ls_pcie_clear_multifunction(pcie);
  	ls_pcie_drop_msg_tlp(pcie);
  	iowrite32(0, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN);
@@ -1656,7 +1672,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  }
  
  static int ls_pcie_msi_host_init(struct pcie_port *pp,
-@@ -196,20 +209,38 @@ static struct ls_pcie_drvdata ls1021_drv
+@@ -196,20 +209,39 @@ static struct ls_pcie_drvdata ls1021_drvdata = {
  static struct ls_pcie_drvdata ls1043_drvdata = {
  	.lut_offset = 0x10000,
  	.ltssm_shift = 24,
@@ -1686,6 +1702,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  };
  
  static const struct of_device_id ls_pcie_of_match[] = {
++	{ .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
  	{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
  	{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
 +	{ .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
@@ -1695,9 +1712,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	{ },
  };
  
+diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
+index af8f6e92..2358e049 100644
 --- a/drivers/pci/host/pcie-designware.c
 +++ b/drivers/pci/host/pcie-designware.c
-@@ -478,6 +478,12 @@ int dw_pcie_wait_for_link(struct pcie_po
+@@ -478,6 +478,12 @@ int dw_pcie_wait_for_link(struct pcie_port *pp)
  	return -ETIMEDOUT;
  }
  
@@ -1710,18 +1729,22 @@ Signed-off-by: Yangbo Lu <[email protected]>
  int dw_pcie_link_up(struct pcie_port *pp)
  {
  	u32 val;
+diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
+index a567ea28..4e6672b2 100644
 --- a/drivers/pci/host/pcie-designware.h
 +++ b/drivers/pci/host/pcie-designware.h
-@@ -82,5 +82,6 @@ int dw_pcie_wait_for_link(struct pcie_po
+@@ -82,5 +82,6 @@ int dw_pcie_wait_for_link(struct pcie_port *pp);
  int dw_pcie_link_up(struct pcie_port *pp);
  void dw_pcie_setup_rc(struct pcie_port *pp);
  int dw_pcie_host_init(struct pcie_port *pp);
 +void dw_pcie_disable_outbound_atu(struct pcie_port *pp, int index);
  
  #endif /* _PCIE_DESIGNWARE_H */
+diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
+index e9270b40..1bad877a 100644
 --- a/drivers/pci/pcie/portdrv_core.c
 +++ b/drivers/pci/pcie/portdrv_core.c
-@@ -44,52 +44,30 @@ static void release_pcie_device(struct d
+@@ -44,52 +44,30 @@ static void release_pcie_device(struct device *dev)
  }
  
  /**
@@ -1785,7 +1808,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	/*
  	 * Allocate as many entries as the port wants, so that we can check
-@@ -97,20 +75,13 @@ static int pcie_port_enable_msix(struct
+@@ -97,20 +75,13 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
  	 * equal to the number of entries this port actually uses, we'll happily
  	 * go through without any tricks.
  	 */
@@ -1811,7 +1834,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  		/*
  		 * The code below follows the PCI Express Base Specification 2.0
-@@ -125,18 +96,16 @@ static int pcie_port_enable_msix(struct
+@@ -125,18 +96,16 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
  		pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
  		entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
  		if (entry >= nr_entries)
@@ -1835,7 +1858,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  		/*
  		 * The code below follows Section 7.10.10 of the PCI Express
-@@ -151,13 +120,11 @@ static int pcie_port_enable_msix(struct
+@@ -151,13 +120,11 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
  		pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
  		entry = reg32 >> 27;
  		if (entry >= nr_entries)
@@ -1852,7 +1875,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	}
  
  	/*
-@@ -165,41 +132,54 @@ static int pcie_port_enable_msix(struct
+@@ -165,41 +132,54 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
  	 * what we have.  Otherwise, the port has some extra entries not for the
  	 * services we know and we need to work around that.
  	 */
@@ -1926,7 +1949,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	/*
  	 * If MSI cannot be used for PCIe PME or hotplug, we have to use
-@@ -207,41 +187,25 @@ static int init_service_irqs(struct pci_
+@@ -207,41 +187,25 @@ static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
  	 */
  	if (((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) ||
  	    ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())) {
@@ -1980,7 +2003,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  /**
   * get_port_device_capability - discover capabilities of a PCI Express port
   * @dev: PCI Express port to examine
-@@ -378,7 +342,7 @@ int pcie_port_device_register(struct pci
+@@ -378,7 +342,7 @@ int pcie_port_device_register(struct pci_dev *dev)
  	 * that can be used in the absence of irqs.  Allow them to determine
  	 * if that is to be used.
  	 */
@@ -1989,7 +2012,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	if (status) {
  		capabilities &= PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_HP;
  		if (!capabilities)
-@@ -401,7 +365,7 @@ int pcie_port_device_register(struct pci
+@@ -401,7 +365,7 @@ int pcie_port_device_register(struct pci_dev *dev)
  	return 0;
  
  error_cleanup_irqs:
@@ -1998,7 +2021,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  error_disable:
  	pci_disable_device(dev);
  	return status;
-@@ -469,7 +433,7 @@ static int remove_iter(struct device *de
+@@ -469,7 +433,7 @@ static int remove_iter(struct device *dev, void *data)
  void pcie_port_device_remove(struct pci_dev *dev)
  {
  	device_for_each_child(&dev->dev, NULL, remove_iter);
@@ -2007,7 +2030,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	pci_disable_device(dev);
  }
  
-@@ -499,7 +463,6 @@ static int pcie_port_probe_service(struc
+@@ -499,7 +463,6 @@ static int pcie_port_probe_service(struct device *dev)
  	if (status)
  		return status;
  
@@ -2015,7 +2038,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	get_device(dev);
  	return 0;
  }
-@@ -524,8 +487,6 @@ static int pcie_port_remove_service(stru
+@@ -524,8 +487,6 @@ static int pcie_port_remove_service(struct device *dev)
  	pciedev = to_pcie_device(dev);
  	driver = to_service_driver(dev->driver);
  	if (driver && driver->remove) {
@@ -2024,9 +2047,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		driver->remove(pciedev);
  		put_device(dev);
  	}
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 1b711796..6738d816 100644
 --- a/include/linux/pci.h
 +++ b/include/linux/pci.h
-@@ -1823,6 +1823,7 @@ void pcibios_release_device(struct pci_d
+@@ -1823,6 +1823,7 @@ void pcibios_release_device(struct pci_dev *dev);
  void pcibios_penalize_isa_irq(int irq, int active);
  int pcibios_alloc_irq(struct pci_dev *dev);
  void pcibios_free_irq(struct pci_dev *dev);
@@ -2034,3 +2059,6 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  #ifdef CONFIG_HIBERNATE_CALLBACKS
  extern struct dev_pm_ops pcibios_pm_ops;
+-- 
+2.14.1
+

文件差异内容过多而无法显示
+ 208 - 182
target/linux/layerscape/patches-4.9/704-fsl-mc-layerscape-support.patch


+ 10653 - 0
target/linux/layerscape/patches-4.9/706-fsl_ppfe-support-layercape.patch

@@ -0,0 +1,10653 @@
+From 8b7935a883d42187716fe486c83352f24d01ddcd Mon Sep 17 00:00:00 2001
+From: Yangbo Lu <[email protected]>
+Date: Thu, 19 Oct 2017 12:48:19 +0800
+Subject: [PATCH] fsl_ppfe: support layercape
+
+This is a integrated patch for layerscape pfe support.
+
+Calvin Johnson <[email protected]>
+Signed-off-by: Yangbo Lu <[email protected]>
+---
+ drivers/staging/fsl_ppfe/Kconfig                   |   20 +
+ drivers/staging/fsl_ppfe/Makefile                  |   19 +
+ drivers/staging/fsl_ppfe/TODO                      |    2 +
+ drivers/staging/fsl_ppfe/include/pfe/cbus.h        |   78 +
+ drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h    |   55 +
+ .../staging/fsl_ppfe/include/pfe/cbus/class_csr.h  |  289 +++
+ .../staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h  |  242 ++
+ drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h    |   86 +
+ drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h    |  100 +
+ .../staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h  |   50 +
+ .../staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h    |  168 ++
+ .../staging/fsl_ppfe/include/pfe/cbus/util_csr.h   |   61 +
+ drivers/staging/fsl_ppfe/include/pfe/pfe.h         |  372 +++
+ drivers/staging/fsl_ppfe/pfe_ctrl.c                |  238 ++
+ drivers/staging/fsl_ppfe/pfe_ctrl.h                |  112 +
+ drivers/staging/fsl_ppfe/pfe_debugfs.c             |  111 +
+ drivers/staging/fsl_ppfe/pfe_debugfs.h             |   25 +
+ drivers/staging/fsl_ppfe/pfe_eth.c                 | 2434 ++++++++++++++++++++
+ drivers/staging/fsl_ppfe/pfe_eth.h                 |  184 ++
+ drivers/staging/fsl_ppfe/pfe_firmware.c            |  314 +++
+ drivers/staging/fsl_ppfe/pfe_firmware.h            |   32 +
+ drivers/staging/fsl_ppfe/pfe_hal.c                 | 1516 ++++++++++++
+ drivers/staging/fsl_ppfe/pfe_hif.c                 | 1072 +++++++++
+ drivers/staging/fsl_ppfe/pfe_hif.h                 |  211 ++
+ drivers/staging/fsl_ppfe/pfe_hif_lib.c             |  601 +++++
+ drivers/staging/fsl_ppfe/pfe_hif_lib.h             |  239 ++
+ drivers/staging/fsl_ppfe/pfe_hw.c                  |  176 ++
+ drivers/staging/fsl_ppfe/pfe_hw.h                  |   27 +
+ drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c    |  394 ++++
+ drivers/staging/fsl_ppfe/pfe_mod.c                 |  141 ++
+ drivers/staging/fsl_ppfe/pfe_mod.h                 |  112 +
+ drivers/staging/fsl_ppfe/pfe_perfmon.h             |   38 +
+ drivers/staging/fsl_ppfe/pfe_sysfs.c               |  818 +++++++
+ drivers/staging/fsl_ppfe/pfe_sysfs.h               |   29 +
+ 34 files changed, 10366 insertions(+)
+ create mode 100644 drivers/staging/fsl_ppfe/Kconfig
+ create mode 100644 drivers/staging/fsl_ppfe/Makefile
+ create mode 100644 drivers/staging/fsl_ppfe/TODO
+ create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus.h
+ create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
+ create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
+ create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
+ create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
+ create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
+ create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
+ create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
+ create mode 100644 drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
+ create mode 100644 drivers/staging/fsl_ppfe/include/pfe/pfe.h
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.c
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_ctrl.h
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.c
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_debugfs.h
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.c
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_eth.h
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.c
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_firmware.h
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_hal.c
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.c
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_hif.h
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.c
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_hif_lib.h
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.c
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_hw.h
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.c
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_mod.h
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_perfmon.h
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.c
+ create mode 100644 drivers/staging/fsl_ppfe/pfe_sysfs.h
+
+diff --git a/drivers/staging/fsl_ppfe/Kconfig b/drivers/staging/fsl_ppfe/Kconfig
+new file mode 100644
+index 00000000..e4096435
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/Kconfig
+@@ -0,0 +1,20 @@
++#
++# Freescale Programmable Packet Forwarding Engine driver
++#
++config FSL_PPFE
++	bool "Freescale PPFE Driver"
++	default n
++	---help---
++	Freescale LS1012A SoC has a Programmable Packet Forwarding Engine.
++	It provides two high performance ethernet interfaces.
++	This driver initializes, programs and controls the PPFE.
++	Use this driver to enable network connectivity on LS1012A platforms.
++
++if FSL_PPFE
++
++config FSL_PPFE_UTIL_DISABLED
++	bool "Disable PPFE UTIL Processor Engine"
++	---help---
++	UTIL PE has to be enabled only if required.
++
++endif # FSL_PPFE
+diff --git a/drivers/staging/fsl_ppfe/Makefile b/drivers/staging/fsl_ppfe/Makefile
+new file mode 100644
+index 00000000..07cd351b
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/Makefile
+@@ -0,0 +1,19 @@
++#
++# Makefile for Freesecale PPFE driver
++#
++
++ccflags-y +=  -I$(src)/include  -I$(src)
++
++obj-m += pfe.o
++
++pfe-y += pfe_mod.o \
++	pfe_hw.o \
++	pfe_firmware.o \
++	pfe_ctrl.o \
++	pfe_hif.o \
++	pfe_hif_lib.o\
++	pfe_eth.o \
++	pfe_sysfs.o \
++	pfe_debugfs.o \
++	pfe_ls1012a_platform.o \
++	pfe_hal.o
+diff --git a/drivers/staging/fsl_ppfe/TODO b/drivers/staging/fsl_ppfe/TODO
+new file mode 100644
+index 00000000..43c48ccd
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/TODO
+@@ -0,0 +1,2 @@
++TODO:
++	- provide pfe pe monitoring support
+diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus.h b/drivers/staging/fsl_ppfe/include/pfe/cbus.h
+new file mode 100644
+index 00000000..04503d28
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/include/pfe/cbus.h
+@@ -0,0 +1,78 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _CBUS_H_
++#define _CBUS_H_
++
++#define EMAC1_BASE_ADDR	(CBUS_BASE_ADDR + 0x200000)
++#define EGPI1_BASE_ADDR	(CBUS_BASE_ADDR + 0x210000)
++#define EMAC2_BASE_ADDR	(CBUS_BASE_ADDR + 0x220000)
++#define EGPI2_BASE_ADDR	(CBUS_BASE_ADDR + 0x230000)
++#define BMU1_BASE_ADDR	(CBUS_BASE_ADDR + 0x240000)
++#define BMU2_BASE_ADDR	(CBUS_BASE_ADDR + 0x250000)
++#define ARB_BASE_ADDR	(CBUS_BASE_ADDR + 0x260000)
++#define DDR_CONFIG_BASE_ADDR	(CBUS_BASE_ADDR + 0x270000)
++#define HIF_BASE_ADDR	(CBUS_BASE_ADDR + 0x280000)
++#define HGPI_BASE_ADDR	(CBUS_BASE_ADDR + 0x290000)
++#define LMEM_BASE_ADDR	(CBUS_BASE_ADDR + 0x300000)
++#define LMEM_SIZE	0x10000
++#define LMEM_END	(LMEM_BASE_ADDR + LMEM_SIZE)
++#define TMU_CSR_BASE_ADDR	(CBUS_BASE_ADDR + 0x310000)
++#define CLASS_CSR_BASE_ADDR	(CBUS_BASE_ADDR + 0x320000)
++#define HIF_NOCPY_BASE_ADDR	(CBUS_BASE_ADDR + 0x350000)
++#define UTIL_CSR_BASE_ADDR	(CBUS_BASE_ADDR + 0x360000)
++#define CBUS_GPT_BASE_ADDR	(CBUS_BASE_ADDR + 0x370000)
++
++/*
++ * defgroup XXX_MEM_ACCESS_ADDR PE memory access through CSR
++ * XXX_MEM_ACCESS_ADDR register bit definitions.
++ */
++#define PE_MEM_ACCESS_WRITE	BIT(31)	/* Internal Memory Write. */
++#define PE_MEM_ACCESS_IMEM	BIT(15)
++#define PE_MEM_ACCESS_DMEM	BIT(16)
++
++/* Byte Enables of the Internal memory access. These are interpred in BE */
++#define PE_MEM_ACCESS_BYTE_ENABLE(offset, size)	\
++	({ typeof(size) size_ = (size);		\
++	(((BIT(size_) - 1) << (4 - (offset) - (size_))) & 0xf) << 24; })
++
++#include "cbus/emac_mtip.h"
++#include "cbus/gpi.h"
++#include "cbus/bmu.h"
++#include "cbus/hif.h"
++#include "cbus/tmu_csr.h"
++#include "cbus/class_csr.h"
++#include "cbus/hif_nocpy.h"
++#include "cbus/util_csr.h"
++
++/* PFE cores states */
++#define CORE_DISABLE	0x00000000
++#define CORE_ENABLE	0x00000001
++#define CORE_SW_RESET	0x00000002
++
++/* LMEM defines */
++#define LMEM_HDR_SIZE	0x0010
++#define LMEM_BUF_SIZE_LN2	0x7
++#define LMEM_BUF_SIZE	BIT(LMEM_BUF_SIZE_LN2)
++
++/* DDR defines */
++#define DDR_HDR_SIZE	0x0100
++#define DDR_BUF_SIZE_LN2	0xb
++#define DDR_BUF_SIZE	BIT(DDR_BUF_SIZE_LN2)
++
++#endif /* _CBUS_H_ */
+diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
+new file mode 100644
+index 00000000..87738ca3
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
+@@ -0,0 +1,55 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _BMU_H_
++#define _BMU_H_
++
++#define BMU_VERSION	0x000
++#define BMU_CTRL	0x004
++#define BMU_UCAST_CONFIG	0x008
++#define BMU_UCAST_BASE_ADDR	0x00c
++#define BMU_BUF_SIZE	0x010
++#define BMU_BUF_CNT	0x014
++#define BMU_THRES	0x018
++#define BMU_INT_SRC	0x020
++#define BMU_INT_ENABLE	0x024
++#define BMU_ALLOC_CTRL	0x030
++#define BMU_FREE_CTRL	0x034
++#define BMU_FREE_ERR_ADDR	0x038
++#define BMU_CURR_BUF_CNT	0x03c
++#define BMU_MCAST_CNT	0x040
++#define BMU_MCAST_ALLOC_CTRL	0x044
++#define BMU_REM_BUF_CNT	0x048
++#define BMU_LOW_WATERMARK	0x050
++#define BMU_HIGH_WATERMARK	0x054
++#define BMU_INT_MEM_ACCESS	0x100
++
++struct BMU_CFG {
++	unsigned long baseaddr;
++	u32 count;
++	u32 size;
++	u32 low_watermark;
++	u32 high_watermark;
++};
++
++#define BMU1_BUF_SIZE	LMEM_BUF_SIZE_LN2
++#define BMU2_BUF_SIZE	DDR_BUF_SIZE_LN2
++
++#define BMU2_MCAST_ALLOC_CTRL	(BMU2_BASE_ADDR + BMU_MCAST_ALLOC_CTRL)
++
++#endif /* _BMU_H_ */
+diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
+new file mode 100644
+index 00000000..e4dadff5
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
+@@ -0,0 +1,289 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _CLASS_CSR_H_
++#define _CLASS_CSR_H_
++
++/* @file class_csr.h.
++ * class_csr - block containing all the classifier control and status register.
++ * Mapped on CBUS and accessible from all PE's and ARM.
++ */
++#define CLASS_VERSION	(CLASS_CSR_BASE_ADDR + 0x000)
++#define CLASS_TX_CTRL	(CLASS_CSR_BASE_ADDR + 0x004)
++#define CLASS_INQ_PKTPTR	(CLASS_CSR_BASE_ADDR + 0x010)
++
++/* (ddr_hdr_size[24:16], lmem_hdr_size[5:0]) */
++#define CLASS_HDR_SIZE	(CLASS_CSR_BASE_ADDR + 0x014)
++
++/* LMEM header size for the Classifier block.\ Data in the LMEM
++ * is written from this offset.
++ */
++#define CLASS_HDR_SIZE_LMEM(off)	((off) & 0x3f)
++
++/* DDR header size for the Classifier block.\ Data in the DDR
++ * is written from this offset.
++ */
++#define CLASS_HDR_SIZE_DDR(off)	(((off) & 0x1ff) << 16)
++
++#define CLASS_PE0_QB_DM_ADDR0	(CLASS_CSR_BASE_ADDR + 0x020)
++
++/* DMEM address of first [15:0] and second [31:16] buffers on QB side. */
++#define CLASS_PE0_QB_DM_ADDR1	(CLASS_CSR_BASE_ADDR + 0x024)
++
++/* DMEM address of third [15:0] and fourth [31:16] buffers on QB side. */
++#define CLASS_PE0_RO_DM_ADDR0	(CLASS_CSR_BASE_ADDR + 0x060)
++
++/* DMEM address of first [15:0] and second [31:16] buffers on RO side. */
++#define CLASS_PE0_RO_DM_ADDR1	(CLASS_CSR_BASE_ADDR + 0x064)
++
++/* DMEM address of third [15:0] and fourth [31:16] buffers on RO side. */
++
++/* @name Class PE memory access. Allows external PE's and HOST to
++ * read/write PMEM/DMEM memory ranges for each classifier PE.
++ */
++/* {sr_pe_mem_cmd[31], csr_pe_mem_wren[27:24], csr_pe_mem_addr[23:0]},
++ * See \ref XXX_MEM_ACCESS_ADDR for details.
++ */
++#define CLASS_MEM_ACCESS_ADDR	(CLASS_CSR_BASE_ADDR + 0x100)
++
++/* Internal Memory Access Write Data [31:0] */
++#define CLASS_MEM_ACCESS_WDATA	(CLASS_CSR_BASE_ADDR + 0x104)
++
++/* Internal Memory Access Read Data [31:0] */
++#define CLASS_MEM_ACCESS_RDATA	(CLASS_CSR_BASE_ADDR + 0x108)
++#define CLASS_TM_INQ_ADDR	(CLASS_CSR_BASE_ADDR + 0x114)
++#define CLASS_PE_STATUS	(CLASS_CSR_BASE_ADDR + 0x118)
++
++#define CLASS_PHY1_RX_PKTS	(CLASS_CSR_BASE_ADDR + 0x11c)
++#define CLASS_PHY1_TX_PKTS	(CLASS_CSR_BASE_ADDR + 0x120)
++#define CLASS_PHY1_LP_FAIL_PKTS	(CLASS_CSR_BASE_ADDR + 0x124)
++#define CLASS_PHY1_INTF_FAIL_PKTS	(CLASS_CSR_BASE_ADDR + 0x128)
++#define CLASS_PHY1_INTF_MATCH_PKTS	(CLASS_CSR_BASE_ADDR + 0x12c)
++#define CLASS_PHY1_L3_FAIL_PKTS	(CLASS_CSR_BASE_ADDR + 0x130)
++#define CLASS_PHY1_V4_PKTS	(CLASS_CSR_BASE_ADDR + 0x134)
++#define CLASS_PHY1_V6_PKTS	(CLASS_CSR_BASE_ADDR + 0x138)
++#define CLASS_PHY1_CHKSUM_ERR_PKTS	(CLASS_CSR_BASE_ADDR + 0x13c)
++#define CLASS_PHY1_TTL_ERR_PKTS	(CLASS_CSR_BASE_ADDR + 0x140)
++#define CLASS_PHY2_RX_PKTS	(CLASS_CSR_BASE_ADDR + 0x144)
++#define CLASS_PHY2_TX_PKTS	(CLASS_CSR_BASE_ADDR + 0x148)
++#define CLASS_PHY2_LP_FAIL_PKTS	(CLASS_CSR_BASE_ADDR + 0x14c)
++#define CLASS_PHY2_INTF_FAIL_PKTS	(CLASS_CSR_BASE_ADDR + 0x150)
++#define CLASS_PHY2_INTF_MATCH_PKTS	(CLASS_CSR_BASE_ADDR + 0x154)
++#define CLASS_PHY2_L3_FAIL_PKTS	(CLASS_CSR_BASE_ADDR + 0x158)
++#define CLASS_PHY2_V4_PKTS	(CLASS_CSR_BASE_ADDR + 0x15c)
++#define CLASS_PHY2_V6_PKTS	(CLASS_CSR_BASE_ADDR + 0x160)
++#define CLASS_PHY2_CHKSUM_ERR_PKTS	(CLASS_CSR_BASE_ADDR + 0x164)
++#define CLASS_PHY2_TTL_ERR_PKTS	(CLASS_CSR_BASE_ADDR + 0x168)
++#define CLASS_PHY3_RX_PKTS	(CLASS_CSR_BASE_ADDR + 0x16c)
++#define CLASS_PHY3_TX_PKTS	(CLASS_CSR_BASE_ADDR + 0x170)
++#define CLASS_PHY3_LP_FAIL_PKTS	(CLASS_CSR_BASE_ADDR + 0x174)
++#define CLASS_PHY3_INTF_FAIL_PKTS	(CLASS_CSR_BASE_ADDR + 0x178)
++#define CLASS_PHY3_INTF_MATCH_PKTS	(CLASS_CSR_BASE_ADDR + 0x17c)
++#define CLASS_PHY3_L3_FAIL_PKTS	(CLASS_CSR_BASE_ADDR + 0x180)
++#define CLASS_PHY3_V4_PKTS	(CLASS_CSR_BASE_ADDR + 0x184)
++#define CLASS_PHY3_V6_PKTS	(CLASS_CSR_BASE_ADDR + 0x188)
++#define CLASS_PHY3_CHKSUM_ERR_PKTS	(CLASS_CSR_BASE_ADDR + 0x18c)
++#define CLASS_PHY3_TTL_ERR_PKTS	(CLASS_CSR_BASE_ADDR + 0x190)
++#define CLASS_PHY1_ICMP_PKTS	(CLASS_CSR_BASE_ADDR + 0x194)
++#define CLASS_PHY1_IGMP_PKTS	(CLASS_CSR_BASE_ADDR + 0x198)
++#define CLASS_PHY1_TCP_PKTS	(CLASS_CSR_BASE_ADDR + 0x19c)
++#define CLASS_PHY1_UDP_PKTS	(CLASS_CSR_BASE_ADDR + 0x1a0)
++#define CLASS_PHY2_ICMP_PKTS	(CLASS_CSR_BASE_ADDR + 0x1a4)
++#define CLASS_PHY2_IGMP_PKTS	(CLASS_CSR_BASE_ADDR + 0x1a8)
++#define CLASS_PHY2_TCP_PKTS	(CLASS_CSR_BASE_ADDR + 0x1ac)
++#define CLASS_PHY2_UDP_PKTS	(CLASS_CSR_BASE_ADDR + 0x1b0)
++#define CLASS_PHY3_ICMP_PKTS	(CLASS_CSR_BASE_ADDR + 0x1b4)
++#define CLASS_PHY3_IGMP_PKTS	(CLASS_CSR_BASE_ADDR + 0x1b8)
++#define CLASS_PHY3_TCP_PKTS	(CLASS_CSR_BASE_ADDR + 0x1bc)
++#define CLASS_PHY3_UDP_PKTS	(CLASS_CSR_BASE_ADDR + 0x1c0)
++#define CLASS_PHY4_ICMP_PKTS	(CLASS_CSR_BASE_ADDR + 0x1c4)
++#define CLASS_PHY4_IGMP_PKTS	(CLASS_CSR_BASE_ADDR + 0x1c8)
++#define CLASS_PHY4_TCP_PKTS	(CLASS_CSR_BASE_ADDR + 0x1cc)
++#define CLASS_PHY4_UDP_PKTS	(CLASS_CSR_BASE_ADDR + 0x1d0)
++#define CLASS_PHY4_RX_PKTS	(CLASS_CSR_BASE_ADDR + 0x1d4)
++#define CLASS_PHY4_TX_PKTS	(CLASS_CSR_BASE_ADDR + 0x1d8)
++#define CLASS_PHY4_LP_FAIL_PKTS	(CLASS_CSR_BASE_ADDR + 0x1dc)
++#define CLASS_PHY4_INTF_FAIL_PKTS	(CLASS_CSR_BASE_ADDR + 0x1e0)
++#define CLASS_PHY4_INTF_MATCH_PKTS	(CLASS_CSR_BASE_ADDR + 0x1e4)
++#define CLASS_PHY4_L3_FAIL_PKTS	(CLASS_CSR_BASE_ADDR + 0x1e8)
++#define CLASS_PHY4_V4_PKTS	(CLASS_CSR_BASE_ADDR + 0x1ec)
++#define CLASS_PHY4_V6_PKTS	(CLASS_CSR_BASE_ADDR + 0x1f0)
++#define CLASS_PHY4_CHKSUM_ERR_PKTS	(CLASS_CSR_BASE_ADDR + 0x1f4)
++#define CLASS_PHY4_TTL_ERR_PKTS	(CLASS_CSR_BASE_ADDR + 0x1f8)
++
++#define CLASS_PE_SYS_CLK_RATIO	(CLASS_CSR_BASE_ADDR + 0x200)
++#define CLASS_AFULL_THRES	(CLASS_CSR_BASE_ADDR + 0x204)
++#define CLASS_GAP_BETWEEN_READS	(CLASS_CSR_BASE_ADDR + 0x208)
++#define CLASS_MAX_BUF_CNT	(CLASS_CSR_BASE_ADDR + 0x20c)
++#define CLASS_TSQ_FIFO_THRES	(CLASS_CSR_BASE_ADDR + 0x210)
++#define CLASS_TSQ_MAX_CNT	(CLASS_CSR_BASE_ADDR + 0x214)
++#define CLASS_IRAM_DATA_0	(CLASS_CSR_BASE_ADDR + 0x218)
++#define CLASS_IRAM_DATA_1	(CLASS_CSR_BASE_ADDR + 0x21c)
++#define CLASS_IRAM_DATA_2	(CLASS_CSR_BASE_ADDR + 0x220)
++#define CLASS_IRAM_DATA_3	(CLASS_CSR_BASE_ADDR + 0x224)
++
++#define CLASS_BUS_ACCESS_ADDR	(CLASS_CSR_BASE_ADDR + 0x228)
++
++#define CLASS_BUS_ACCESS_WDATA	(CLASS_CSR_BASE_ADDR + 0x22c)
++#define CLASS_BUS_ACCESS_RDATA	(CLASS_CSR_BASE_ADDR + 0x230)
++
++/* (route_entry_size[9:0], route_hash_size[23:16]
++ * (this is actually ln2(size)))
++ */
++#define CLASS_ROUTE_HASH_ENTRY_SIZE	(CLASS_CSR_BASE_ADDR + 0x234)
++
++#define CLASS_ROUTE_ENTRY_SIZE(size)	 ((size) & 0x1ff)
++#define CLASS_ROUTE_HASH_SIZE(hash_bits) (((hash_bits) & 0xff) << 16)
++
++#define CLASS_ROUTE_TABLE_BASE	(CLASS_CSR_BASE_ADDR + 0x238)
++
++#define CLASS_ROUTE_MULTI	(CLASS_CSR_BASE_ADDR + 0x23c)
++#define CLASS_SMEM_OFFSET	(CLASS_CSR_BASE_ADDR + 0x240)
++#define CLASS_LMEM_BUF_SIZE	(CLASS_CSR_BASE_ADDR + 0x244)
++#define CLASS_VLAN_ID	(CLASS_CSR_BASE_ADDR + 0x248)
++#define CLASS_BMU1_BUF_FREE	(CLASS_CSR_BASE_ADDR + 0x24c)
++#define CLASS_USE_TMU_INQ	(CLASS_CSR_BASE_ADDR + 0x250)
++#define CLASS_VLAN_ID1	(CLASS_CSR_BASE_ADDR + 0x254)
++
++#define CLASS_BUS_ACCESS_BASE	(CLASS_CSR_BASE_ADDR + 0x258)
++#define CLASS_BUS_ACCESS_BASE_MASK	(0xFF000000)
++/* bit 31:24 of PE peripheral address are stored in CLASS_BUS_ACCESS_BASE */
++
++#define CLASS_HIF_PARSE	(CLASS_CSR_BASE_ADDR + 0x25c)
++
++#define CLASS_HOST_PE0_GP	(CLASS_CSR_BASE_ADDR + 0x260)
++#define CLASS_PE0_GP	(CLASS_CSR_BASE_ADDR + 0x264)
++#define CLASS_HOST_PE1_GP	(CLASS_CSR_BASE_ADDR + 0x268)
++#define CLASS_PE1_GP	(CLASS_CSR_BASE_ADDR + 0x26c)
++#define CLASS_HOST_PE2_GP	(CLASS_CSR_BASE_ADDR + 0x270)
++#define CLASS_PE2_GP	(CLASS_CSR_BASE_ADDR + 0x274)
++#define CLASS_HOST_PE3_GP	(CLASS_CSR_BASE_ADDR + 0x278)
++#define CLASS_PE3_GP	(CLASS_CSR_BASE_ADDR + 0x27c)
++#define CLASS_HOST_PE4_GP	(CLASS_CSR_BASE_ADDR + 0x280)
++#define CLASS_PE4_GP	(CLASS_CSR_BASE_ADDR + 0x284)
++#define CLASS_HOST_PE5_GP	(CLASS_CSR_BASE_ADDR + 0x288)
++#define CLASS_PE5_GP	(CLASS_CSR_BASE_ADDR + 0x28c)
++
++#define CLASS_PE_INT_SRC	(CLASS_CSR_BASE_ADDR + 0x290)
++#define CLASS_PE_INT_ENABLE	(CLASS_CSR_BASE_ADDR + 0x294)
++
++#define CLASS_TPID0_TPID1	(CLASS_CSR_BASE_ADDR + 0x298)
++#define CLASS_TPID2	(CLASS_CSR_BASE_ADDR + 0x29c)
++
++#define CLASS_L4_CHKSUM_ADDR	(CLASS_CSR_BASE_ADDR + 0x2a0)
++
++#define CLASS_PE0_DEBUG	(CLASS_CSR_BASE_ADDR + 0x2a4)
++#define CLASS_PE1_DEBUG	(CLASS_CSR_BASE_ADDR + 0x2a8)
++#define CLASS_PE2_DEBUG	(CLASS_CSR_BASE_ADDR + 0x2ac)
++#define CLASS_PE3_DEBUG	(CLASS_CSR_BASE_ADDR + 0x2b0)
++#define CLASS_PE4_DEBUG	(CLASS_CSR_BASE_ADDR + 0x2b4)
++#define CLASS_PE5_DEBUG	(CLASS_CSR_BASE_ADDR + 0x2b8)
++
++#define CLASS_STATE	(CLASS_CSR_BASE_ADDR + 0x2bc)
++
++/* CLASS defines */
++#define CLASS_PBUF_SIZE	0x100	/* Fixed by hardware */
++#define CLASS_PBUF_HEADER_OFFSET	0x80	/* Can be configured */
++
++/* Can be configured */
++#define CLASS_PBUF0_BASE_ADDR	0x000
++/* Can be configured */
++#define CLASS_PBUF1_BASE_ADDR	(CLASS_PBUF0_BASE_ADDR + CLASS_PBUF_SIZE)
++/* Can be configured */
++#define CLASS_PBUF2_BASE_ADDR	(CLASS_PBUF1_BASE_ADDR + CLASS_PBUF_SIZE)
++/* Can be configured */
++#define CLASS_PBUF3_BASE_ADDR	(CLASS_PBUF2_BASE_ADDR + CLASS_PBUF_SIZE)
++
++#define CLASS_PBUF0_HEADER_BASE_ADDR	(CLASS_PBUF0_BASE_ADDR + \
++						CLASS_PBUF_HEADER_OFFSET)
++#define CLASS_PBUF1_HEADER_BASE_ADDR	(CLASS_PBUF1_BASE_ADDR + \
++						CLASS_PBUF_HEADER_OFFSET)
++#define CLASS_PBUF2_HEADER_BASE_ADDR	(CLASS_PBUF2_BASE_ADDR + \
++						CLASS_PBUF_HEADER_OFFSET)
++#define CLASS_PBUF3_HEADER_BASE_ADDR	(CLASS_PBUF3_BASE_ADDR + \
++						CLASS_PBUF_HEADER_OFFSET)
++
++#define CLASS_PE0_RO_DM_ADDR0_VAL	((CLASS_PBUF1_BASE_ADDR << 16) | \
++						CLASS_PBUF0_BASE_ADDR)
++#define CLASS_PE0_RO_DM_ADDR1_VAL	((CLASS_PBUF3_BASE_ADDR << 16) | \
++						CLASS_PBUF2_BASE_ADDR)
++
++#define CLASS_PE0_QB_DM_ADDR0_VAL	((CLASS_PBUF1_HEADER_BASE_ADDR << 16) |\
++						CLASS_PBUF0_HEADER_BASE_ADDR)
++#define CLASS_PE0_QB_DM_ADDR1_VAL	((CLASS_PBUF3_HEADER_BASE_ADDR << 16) |\
++						CLASS_PBUF2_HEADER_BASE_ADDR)
++
++#define CLASS_ROUTE_SIZE	128
++#define CLASS_MAX_ROUTE_SIZE	256
++#define CLASS_ROUTE_HASH_BITS	20
++#define CLASS_ROUTE_HASH_MASK	(BIT(CLASS_ROUTE_HASH_BITS) - 1)
++
++/* Can be configured */
++#define	CLASS_ROUTE0_BASE_ADDR	0x400
++/* Can be configured */
++#define CLASS_ROUTE1_BASE_ADDR	(CLASS_ROUTE0_BASE_ADDR + CLASS_ROUTE_SIZE)
++/* Can be configured */
++#define CLASS_ROUTE2_BASE_ADDR	(CLASS_ROUTE1_BASE_ADDR + CLASS_ROUTE_SIZE)
++/* Can be configured */
++#define CLASS_ROUTE3_BASE_ADDR	(CLASS_ROUTE2_BASE_ADDR + CLASS_ROUTE_SIZE)
++
++#define CLASS_SA_SIZE	128
++#define CLASS_IPSEC_SA0_BASE_ADDR	0x600
++/* not used */
++#define CLASS_IPSEC_SA1_BASE_ADDR  (CLASS_IPSEC_SA0_BASE_ADDR + CLASS_SA_SIZE)
++/* not used */
++#define CLASS_IPSEC_SA2_BASE_ADDR  (CLASS_IPSEC_SA1_BASE_ADDR + CLASS_SA_SIZE)
++/* not used */
++#define CLASS_IPSEC_SA3_BASE_ADDR  (CLASS_IPSEC_SA2_BASE_ADDR + CLASS_SA_SIZE)
++
++/* generic purpose free dmem buffer, last portion of 2K dmem pbuf */
++#define CLASS_GP_DMEM_BUF_SIZE	(2048 - (CLASS_PBUF_SIZE * 4) - \
++				(CLASS_ROUTE_SIZE * 4) - (CLASS_SA_SIZE))
++#define CLASS_GP_DMEM_BUF	((void *)(CLASS_IPSEC_SA0_BASE_ADDR + \
++					CLASS_SA_SIZE))
++
++#define TWO_LEVEL_ROUTE		BIT(0)
++#define PHYNO_IN_HASH		BIT(1)
++#define HW_ROUTE_FETCH		BIT(3)
++#define HW_BRIDGE_FETCH		BIT(5)
++#define IP_ALIGNED		BIT(6)
++#define ARC_HIT_CHECK_EN	BIT(7)
++#define CLASS_TOE		BIT(11)
++#define HASH_NORMAL		(0 << 12)
++#define HASH_CRC_PORT		BIT(12)
++#define HASH_CRC_IP		(2 << 12)
++#define HASH_CRC_PORT_IP	(3 << 12)
++#define QB2BUS_LE		BIT(15)
++
++#define TCP_CHKSUM_DROP		BIT(0)
++#define UDP_CHKSUM_DROP		BIT(1)
++#define IPV4_CHKSUM_DROP	BIT(9)
++
++/*CLASS_HIF_PARSE bits*/
++#define HIF_PKT_CLASS_EN	BIT(0)
++#define HIF_PKT_OFFSET(ofst)	(((ofst) & 0xF) << 1)
++
++struct class_cfg {
++	u32 toe_mode;
++	unsigned long route_table_baseaddr;
++	u32 route_table_hash_bits;
++	u32 pe_sys_clk_ratio;
++	u32 resume;
++};
++
++#endif /* _CLASS_CSR_H_ */
+diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
+new file mode 100644
+index 00000000..9c5d7919
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
+@@ -0,0 +1,242 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _EMAC_H_
++#define _EMAC_H_
++
++#include <linux/ethtool.h>
++
++#define EMAC_IEVENT_REG		0x004
++#define EMAC_IMASK_REG		0x008
++#define EMAC_R_DES_ACTIVE_REG	0x010
++#define EMAC_X_DES_ACTIVE_REG	0x014
++#define EMAC_ECNTRL_REG		0x024
++#define EMAC_MII_DATA_REG	0x040
++#define EMAC_MII_CTRL_REG	0x044
++#define EMAC_MIB_CTRL_STS_REG	0x064
++#define EMAC_RCNTRL_REG		0x084
++#define EMAC_TCNTRL_REG		0x0C4
++#define EMAC_PHY_ADDR_LOW	0x0E4
++#define EMAC_PHY_ADDR_HIGH	0x0E8
++#define EMAC_GAUR		0x120
++#define EMAC_GALR		0x124
++#define EMAC_TFWR_STR_FWD	0x144
++#define EMAC_RX_SECTION_FULL	0x190
++#define EMAC_RX_SECTION_EMPTY	0x194
++#define EMAC_TX_SECTION_EMPTY	0x1A0
++#define EMAC_TRUNC_FL		0x1B0
++
++#define RMON_T_DROP	0x200 /* Count of frames not cntd correctly */
++#define RMON_T_PACKETS	0x204 /* RMON TX packet count */
++#define RMON_T_BC_PKT	0x208 /* RMON TX broadcast pkts */
++#define RMON_T_MC_PKT	0x20c /* RMON TX multicast pkts */
++#define RMON_T_CRC_ALIGN	0x210 /* RMON TX pkts with CRC align err */
++#define RMON_T_UNDERSIZE	0x214 /* RMON TX pkts < 64 bytes, good CRC */
++#define RMON_T_OVERSIZE	0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
++#define RMON_T_FRAG	0x21c /* RMON TX pkts < 64 bytes, bad CRC */
++#define RMON_T_JAB	0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
++#define RMON_T_COL	0x224 /* RMON TX collision count */
++#define RMON_T_P64	0x228 /* RMON TX 64 byte pkts */
++#define RMON_T_P65TO127	0x22c /* RMON TX 65 to 127 byte pkts */
++#define RMON_T_P128TO255	0x230 /* RMON TX 128 to 255 byte pkts */
++#define RMON_T_P256TO511	0x234 /* RMON TX 256 to 511 byte pkts */
++#define RMON_T_P512TO1023	0x238 /* RMON TX 512 to 1023 byte pkts */
++#define RMON_T_P1024TO2047	0x23c /* RMON TX 1024 to 2047 byte pkts */
++#define RMON_T_P_GTE2048	0x240 /* RMON TX pkts > 2048 bytes */
++#define RMON_T_OCTETS	0x244 /* RMON TX octets */
++#define IEEE_T_DROP	0x248 /* Count of frames not counted crtly */
++#define IEEE_T_FRAME_OK	0x24c /* Frames tx'd OK */
++#define IEEE_T_1COL	0x250 /* Frames tx'd with single collision */
++#define IEEE_T_MCOL	0x254 /* Frames tx'd with multiple collision */
++#define IEEE_T_DEF	0x258 /* Frames tx'd after deferral delay */
++#define IEEE_T_LCOL	0x25c /* Frames tx'd with late collision */
++#define IEEE_T_EXCOL	0x260 /* Frames tx'd with excesv collisions */
++#define IEEE_T_MACERR	0x264 /* Frames tx'd with TX FIFO underrun */
++#define IEEE_T_CSERR	0x268 /* Frames tx'd with carrier sense err */
++#define IEEE_T_SQE	0x26c /* Frames tx'd with SQE err */
++#define IEEE_T_FDXFC	0x270 /* Flow control pause frames tx'd */
++#define IEEE_T_OCTETS_OK	0x274 /* Octet count for frames tx'd w/o err */
++#define RMON_R_PACKETS	0x284 /* RMON RX packet count */
++#define RMON_R_BC_PKT	0x288 /* RMON RX broadcast pkts */
++#define RMON_R_MC_PKT	0x28c /* RMON RX multicast pkts */
++#define RMON_R_CRC_ALIGN	0x290 /* RMON RX pkts with CRC alignment err */
++#define RMON_R_UNDERSIZE	0x294 /* RMON RX pkts < 64 bytes, good CRC */
++#define RMON_R_OVERSIZE	0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
++#define RMON_R_FRAG	0x29c /* RMON RX pkts < 64 bytes, bad CRC */
++#define RMON_R_JAB	0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
++#define RMON_R_RESVD_O	0x2a4 /* Reserved */
++#define RMON_R_P64	0x2a8 /* RMON RX 64 byte pkts */
++#define RMON_R_P65TO127	0x2ac /* RMON RX 65 to 127 byte pkts */
++#define RMON_R_P128TO255	0x2b0 /* RMON RX 128 to 255 byte pkts */
++#define RMON_R_P256TO511	0x2b4 /* RMON RX 256 to 511 byte pkts */
++#define RMON_R_P512TO1023	0x2b8 /* RMON RX 512 to 1023 byte pkts */
++#define RMON_R_P1024TO2047	0x2bc /* RMON RX 1024 to 2047 byte pkts */
++#define RMON_R_P_GTE2048	0x2c0 /* RMON RX pkts > 2048 bytes */
++#define RMON_R_OCTETS	0x2c4 /* RMON RX octets */
++#define IEEE_R_DROP	0x2c8 /* Count frames not counted correctly */
++#define IEEE_R_FRAME_OK	0x2cc /* Frames rx'd OK */
++#define IEEE_R_CRC	0x2d0 /* Frames rx'd with CRC err */
++#define IEEE_R_ALIGN	0x2d4 /* Frames rx'd with alignment err */
++#define IEEE_R_MACERR	0x2d8 /* Receive FIFO overflow count */
++#define IEEE_R_FDXFC	0x2dc /* Flow control pause frames rx'd */
++#define IEEE_R_OCTETS_OK	0x2e0 /* Octet cnt for frames rx'd w/o err */
++
++#define EMAC_SMAC_0_0	0x500 /*Supplemental MAC Address 0 (RW).*/
++#define EMAC_SMAC_0_1	0x504 /*Supplemental MAC Address 0 (RW).*/
++
++/* GEMAC definitions and settings */
++
++#define EMAC_PORT_0	0
++#define EMAC_PORT_1	1
++
++/* GEMAC Bit definitions */
++#define EMAC_IEVENT_HBERR		 0x80000000
++#define EMAC_IEVENT_BABR		 0x40000000
++#define EMAC_IEVENT_BABT		 0x20000000
++#define EMAC_IEVENT_GRA			 0x10000000
++#define EMAC_IEVENT_TXF			 0x08000000
++#define EMAC_IEVENT_TXB			 0x04000000
++#define EMAC_IEVENT_RXF			 0x02000000
++#define EMAC_IEVENT_RXB			 0x01000000
++#define EMAC_IEVENT_MII			 0x00800000
++#define EMAC_IEVENT_EBERR		 0x00400000
++#define EMAC_IEVENT_LC			 0x00200000
++#define EMAC_IEVENT_RL			 0x00100000
++#define EMAC_IEVENT_UN			 0x00080000
++
++#define EMAC_IMASK_HBERR                 0x80000000
++#define EMAC_IMASK_BABR                  0x40000000
++#define EMAC_IMASKT_BABT                 0x20000000
++#define EMAC_IMASK_GRA                   0x10000000
++#define EMAC_IMASKT_TXF                  0x08000000
++#define EMAC_IMASK_TXB                   0x04000000
++#define EMAC_IMASKT_RXF                  0x02000000
++#define EMAC_IMASK_RXB                   0x01000000
++#define EMAC_IMASK_MII                   0x00800000
++#define EMAC_IMASK_EBERR                 0x00400000
++#define EMAC_IMASK_LC                    0x00200000
++#define EMAC_IMASKT_RL                   0x00100000
++#define EMAC_IMASK_UN                    0x00080000
++
++#define EMAC_RCNTRL_MAX_FL_SHIFT         16
++#define EMAC_RCNTRL_LOOP                 0x00000001
++#define EMAC_RCNTRL_DRT                  0x00000002
++#define EMAC_RCNTRL_MII_MODE             0x00000004
++#define EMAC_RCNTRL_PROM                 0x00000008
++#define EMAC_RCNTRL_BC_REJ               0x00000010
++#define EMAC_RCNTRL_FCE                  0x00000020
++#define EMAC_RCNTRL_RGMII                0x00000040
++#define EMAC_RCNTRL_SGMII                0x00000080
++#define EMAC_RCNTRL_RMII                 0x00000100
++#define EMAC_RCNTRL_RMII_10T             0x00000200
++#define EMAC_RCNTRL_CRC_FWD		 0x00004000
++
++#define EMAC_TCNTRL_GTS                  0x00000001
++#define EMAC_TCNTRL_HBC                  0x00000002
++#define EMAC_TCNTRL_FDEN                 0x00000004
++#define EMAC_TCNTRL_TFC_PAUSE            0x00000008
++#define EMAC_TCNTRL_RFC_PAUSE            0x00000010
++
++#define EMAC_ECNTRL_RESET                0x00000001      /* reset the EMAC */
++#define EMAC_ECNTRL_ETHER_EN             0x00000002      /* enable the EMAC */
++#define EMAC_ECNTRL_MAGIC_ENA		 0x00000004
++#define EMAC_ECNTRL_SLEEP		 0x00000008
++#define EMAC_ECNTRL_SPEED                0x00000020
++#define EMAC_ECNTRL_DBSWAP               0x00000100
++
++#define EMAC_X_WMRK_STRFWD               0x00000100
++
++#define EMAC_X_DES_ACTIVE_TDAR           0x01000000
++#define EMAC_R_DES_ACTIVE_RDAR           0x01000000
++
++#define EMAC_RX_SECTION_EMPTY_V		0x00010006
++/*
++ * The possible operating speeds of the MAC, currently supporting 10, 100 and
++ * 1000Mb modes.
++ */
++enum mac_speed {SPEED_10M, SPEED_100M, SPEED_1000M, SPEED_1000M_PCS};
++
++/* MII-related definitios */
++#define EMAC_MII_DATA_ST         0x40000000      /* Start of frame delimiter */
++#define EMAC_MII_DATA_OP_RD      0x20000000      /* Perform a read operation */
++#define EMAC_MII_DATA_OP_CL45_RD 0x30000000      /* Perform a read operation */
++#define EMAC_MII_DATA_OP_WR      0x10000000      /* Perform a write operation */
++#define EMAC_MII_DATA_OP_CL45_WR 0x10000000      /* Perform a write operation */
++#define EMAC_MII_DATA_PA_MSK     0x0f800000      /* PHY Address field mask */
++#define EMAC_MII_DATA_RA_MSK     0x007c0000      /* PHY Register field mask */
++#define EMAC_MII_DATA_TA         0x00020000      /* Turnaround */
++#define EMAC_MII_DATA_DATAMSK    0x0000ffff      /* PHY data field */
++
++#define EMAC_MII_DATA_RA_SHIFT   18      /* MII Register address bits */
++#define EMAC_MII_DATA_RA_MASK	 0x1F      /* MII Register address mask */
++#define EMAC_MII_DATA_PA_SHIFT   23      /* MII PHY address bits */
++#define EMAC_MII_DATA_PA_MASK    0x1F      /* MII PHY address mask */
++
++#define EMAC_MII_DATA_RA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
++				EMAC_MII_DATA_RA_SHIFT)
++#define EMAC_MII_DATA_PA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
++				EMAC_MII_DATA_PA_SHIFT)
++#define EMAC_MII_DATA(v)    ((v) & 0xffff)
++
++#define EMAC_MII_SPEED_SHIFT	1
++#define EMAC_HOLDTIME_SHIFT	8
++#define EMAC_HOLDTIME_MASK	0x7
++#define EMAC_HOLDTIME(v)	(((v) & EMAC_HOLDTIME_MASK) << \
++					EMAC_HOLDTIME_SHIFT)
++
++/*
++ * The Address organisation for the MAC device.  All addresses are split into
++ * two 32-bit register fields.  The first one (bottom) is the lower 32-bits of
++ * the address and the other field are the high order bits - this may be 16-bits
++ * in the case of MAC addresses, or 32-bits for the hash address.
++ * In terms of memory storage, the first item (bottom) is assumed to be at a
++ * lower address location than 'top'. i.e. top should be at address location of
++ * 'bottom' + 4 bytes.
++ */
++struct pfe_mac_addr {
++	u32 bottom;     /* Lower 32-bits of address. */
++	u32 top;        /* Upper 32-bits of address. */
++};
++
++/*
++ * The following is the organisation of the address filters section of the MAC
++ * registers.  The Cadence MAC contains four possible specific address match
++ * addresses, if an incoming frame corresponds to any one of these four
++ * addresses then the frame will be copied to memory.
++ * It is not necessary for all four of the address match registers to be
++ * programmed, this is application dependent.
++ */
++struct spec_addr {
++	struct pfe_mac_addr one;        /* Specific address register 1. */
++	struct pfe_mac_addr two;        /* Specific address register 2. */
++	struct pfe_mac_addr three;      /* Specific address register 3. */
++	struct pfe_mac_addr four;       /* Specific address register 4. */
++};
++
++struct gemac_cfg {
++	u32 mode;
++	u32 speed;
++	u32 duplex;
++};
++
++/* EMAC Hash size */
++#define EMAC_HASH_REG_BITS       64
++
++#define EMAC_SPEC_ADDR_MAX	4
++
++#endif /* _EMAC_H_ */
+diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
+new file mode 100644
+index 00000000..7b295830
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
+@@ -0,0 +1,86 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _GPI_H_
++#define _GPI_H_
++
++#define GPI_VERSION	0x00
++#define GPI_CTRL	0x04
++#define GPI_RX_CONFIG	0x08
++#define GPI_HDR_SIZE	0x0c
++#define GPI_BUF_SIZE	0x10
++#define GPI_LMEM_ALLOC_ADDR	0x14
++#define GPI_LMEM_FREE_ADDR	0x18
++#define GPI_DDR_ALLOC_ADDR	0x1c
++#define GPI_DDR_FREE_ADDR	0x20
++#define GPI_CLASS_ADDR	0x24
++#define GPI_DRX_FIFO	0x28
++#define GPI_TRX_FIFO	0x2c
++#define GPI_INQ_PKTPTR	0x30
++#define GPI_DDR_DATA_OFFSET	0x34
++#define GPI_LMEM_DATA_OFFSET	0x38
++#define GPI_TMLF_TX	0x4c
++#define GPI_DTX_ASEQ	0x50
++#define GPI_FIFO_STATUS	0x54
++#define GPI_FIFO_DEBUG	0x58
++#define GPI_TX_PAUSE_TIME	0x5c
++#define GPI_LMEM_SEC_BUF_DATA_OFFSET	0x60
++#define GPI_DDR_SEC_BUF_DATA_OFFSET	0x64
++#define GPI_TOE_CHKSUM_EN	0x68
++#define GPI_OVERRUN_DROPCNT	0x6c
++#define GPI_CSR_MTIP_PAUSE_REG		0x74
++#define GPI_CSR_MTIP_PAUSE_QUANTUM	0x78
++#define GPI_CSR_RX_CNT			0x7c
++#define GPI_CSR_TX_CNT			0x80
++#define GPI_CSR_DEBUG1			0x84
++#define GPI_CSR_DEBUG2			0x88
++
++struct gpi_cfg {
++	u32 lmem_rtry_cnt;
++	u32 tmlf_txthres;
++	u32 aseq_len;
++	u32 mtip_pause_reg;
++};
++
++/* GPI commons defines */
++#define GPI_LMEM_BUF_EN	0x1
++#define GPI_DDR_BUF_EN	0x1
++
++/* EGPI 1 defines */
++#define EGPI1_LMEM_RTRY_CNT	0x40
++#define EGPI1_TMLF_TXTHRES	0xBC
++#define EGPI1_ASEQ_LEN	0x50
++
++/* EGPI 2 defines */
++#define EGPI2_LMEM_RTRY_CNT	0x40
++#define EGPI2_TMLF_TXTHRES	0xBC
++#define EGPI2_ASEQ_LEN	0x40
++
++/* EGPI 3 defines */
++#define EGPI3_LMEM_RTRY_CNT	0x40
++#define EGPI3_TMLF_TXTHRES	0xBC
++#define EGPI3_ASEQ_LEN	0x40
++
++/* HGPI defines */
++#define HGPI_LMEM_RTRY_CNT	0x40
++#define HGPI_TMLF_TXTHRES	0xBC
++#define HGPI_ASEQ_LEN	0x40
++
++#define EGPI_PAUSE_TIME		0x000007D0
++#define EGPI_PAUSE_ENABLE	0x40000000
++#endif /* _GPI_H_ */
+diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
+new file mode 100644
+index 00000000..71cf81a7
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
+@@ -0,0 +1,100 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _HIF_H_
++#define _HIF_H_
++
++/* @file hif.h.
++ * hif - PFE hif block control and status register.
++ * Mapped on CBUS and accessible from all PE's and ARM.
++ */
++#define HIF_VERSION	(HIF_BASE_ADDR + 0x00)
++#define HIF_TX_CTRL	(HIF_BASE_ADDR + 0x04)
++#define HIF_TX_CURR_BD_ADDR	(HIF_BASE_ADDR + 0x08)
++#define HIF_TX_ALLOC	(HIF_BASE_ADDR + 0x0c)
++#define HIF_TX_BDP_ADDR	(HIF_BASE_ADDR + 0x10)
++#define HIF_TX_STATUS	(HIF_BASE_ADDR + 0x14)
++#define HIF_RX_CTRL	(HIF_BASE_ADDR + 0x20)
++#define HIF_RX_BDP_ADDR	(HIF_BASE_ADDR + 0x24)
++#define HIF_RX_STATUS	(HIF_BASE_ADDR + 0x30)
++#define HIF_INT_SRC	(HIF_BASE_ADDR + 0x34)
++#define HIF_INT_ENABLE	(HIF_BASE_ADDR + 0x38)
++#define HIF_POLL_CTRL	(HIF_BASE_ADDR + 0x3c)
++#define HIF_RX_CURR_BD_ADDR	(HIF_BASE_ADDR + 0x40)
++#define HIF_RX_ALLOC	(HIF_BASE_ADDR + 0x44)
++#define HIF_TX_DMA_STATUS	(HIF_BASE_ADDR + 0x48)
++#define HIF_RX_DMA_STATUS	(HIF_BASE_ADDR + 0x4c)
++#define HIF_INT_COAL	(HIF_BASE_ADDR + 0x50)
++
++/* HIF_INT_SRC/ HIF_INT_ENABLE control bits */
++#define HIF_INT		BIT(0)
++#define HIF_RXBD_INT	BIT(1)
++#define HIF_RXPKT_INT	BIT(2)
++#define HIF_TXBD_INT	BIT(3)
++#define HIF_TXPKT_INT	BIT(4)
++
++/* HIF_TX_CTRL bits */
++#define HIF_CTRL_DMA_EN			BIT(0)
++#define HIF_CTRL_BDP_POLL_CTRL_EN	BIT(1)
++#define HIF_CTRL_BDP_CH_START_WSTB	BIT(2)
++
++/* HIF_RX_STATUS bits */
++#define BDP_CSR_RX_DMA_ACTV     BIT(16)
++
++/* HIF_INT_ENABLE bits */
++#define HIF_INT_EN		BIT(0)
++#define HIF_RXBD_INT_EN		BIT(1)
++#define HIF_RXPKT_INT_EN	BIT(2)
++#define HIF_TXBD_INT_EN		BIT(3)
++#define HIF_TXPKT_INT_EN	BIT(4)
++
++/* HIF_POLL_CTRL bits*/
++#define HIF_RX_POLL_CTRL_CYCLE	0x0400
++#define HIF_TX_POLL_CTRL_CYCLE	0x0400
++
++/* HIF_INT_COAL bits*/
++#define HIF_INT_COAL_ENABLE	BIT(31)
++
++/* Buffer descriptor control bits */
++#define BD_CTRL_BUFLEN_MASK	0x3fff
++#define BD_BUF_LEN(x)	((x) & BD_CTRL_BUFLEN_MASK)
++#define BD_CTRL_CBD_INT_EN	BIT(16)
++#define BD_CTRL_PKT_INT_EN	BIT(17)
++#define BD_CTRL_LIFM		BIT(18)
++#define BD_CTRL_LAST_BD		BIT(19)
++#define BD_CTRL_DIR		BIT(20)
++#define BD_CTRL_LMEM_CPY	BIT(21) /* Valid only for HIF_NOCPY */
++#define BD_CTRL_PKT_XFER	BIT(24)
++#define BD_CTRL_DESC_EN		BIT(31)
++#define BD_CTRL_PARSE_DISABLE	BIT(25)
++#define BD_CTRL_BRFETCH_DISABLE	BIT(26)
++#define BD_CTRL_RTFETCH_DISABLE	BIT(27)
++
++/* Buffer descriptor status bits*/
++#define BD_STATUS_CONN_ID(x)	((x) & 0xffff)
++#define BD_STATUS_DIR_PROC_ID	BIT(16)
++#define BD_STATUS_CONN_ID_EN	BIT(17)
++#define BD_STATUS_PE2PROC_ID(x)	(((x) & 7) << 18)
++#define BD_STATUS_LE_DATA	BIT(21)
++#define BD_STATUS_CHKSUM_EN	BIT(22)
++
++/* HIF Buffer descriptor status bits */
++#define DIR_PROC_ID	BIT(16)
++#define PROC_ID(id)	((id) << 18)
++
++#endif /* _HIF_H_ */
+diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
+new file mode 100644
+index 00000000..3d4d43ce
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
+@@ -0,0 +1,50 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _HIF_NOCPY_H_
++#define _HIF_NOCPY_H_
++
++#define HIF_NOCPY_VERSION	(HIF_NOCPY_BASE_ADDR + 0x00)
++#define HIF_NOCPY_TX_CTRL	(HIF_NOCPY_BASE_ADDR + 0x04)
++#define HIF_NOCPY_TX_CURR_BD_ADDR	(HIF_NOCPY_BASE_ADDR + 0x08)
++#define HIF_NOCPY_TX_ALLOC	(HIF_NOCPY_BASE_ADDR + 0x0c)
++#define HIF_NOCPY_TX_BDP_ADDR	(HIF_NOCPY_BASE_ADDR + 0x10)
++#define HIF_NOCPY_TX_STATUS	(HIF_NOCPY_BASE_ADDR + 0x14)
++#define HIF_NOCPY_RX_CTRL	(HIF_NOCPY_BASE_ADDR + 0x20)
++#define HIF_NOCPY_RX_BDP_ADDR	(HIF_NOCPY_BASE_ADDR + 0x24)
++#define HIF_NOCPY_RX_STATUS	(HIF_NOCPY_BASE_ADDR + 0x30)
++#define HIF_NOCPY_INT_SRC	(HIF_NOCPY_BASE_ADDR + 0x34)
++#define HIF_NOCPY_INT_ENABLE	(HIF_NOCPY_BASE_ADDR + 0x38)
++#define HIF_NOCPY_POLL_CTRL	(HIF_NOCPY_BASE_ADDR + 0x3c)
++#define HIF_NOCPY_RX_CURR_BD_ADDR	(HIF_NOCPY_BASE_ADDR + 0x40)
++#define HIF_NOCPY_RX_ALLOC	(HIF_NOCPY_BASE_ADDR + 0x44)
++#define HIF_NOCPY_TX_DMA_STATUS	(HIF_NOCPY_BASE_ADDR + 0x48)
++#define HIF_NOCPY_RX_DMA_STATUS	(HIF_NOCPY_BASE_ADDR + 0x4c)
++#define HIF_NOCPY_RX_INQ0_PKTPTR	(HIF_NOCPY_BASE_ADDR + 0x50)
++#define HIF_NOCPY_RX_INQ1_PKTPTR	(HIF_NOCPY_BASE_ADDR + 0x54)
++#define HIF_NOCPY_TX_PORT_NO	(HIF_NOCPY_BASE_ADDR + 0x60)
++#define HIF_NOCPY_LMEM_ALLOC_ADDR	(HIF_NOCPY_BASE_ADDR + 0x64)
++#define HIF_NOCPY_CLASS_ADDR	(HIF_NOCPY_BASE_ADDR + 0x68)
++#define HIF_NOCPY_TMU_PORT0_ADDR	(HIF_NOCPY_BASE_ADDR + 0x70)
++#define HIF_NOCPY_TMU_PORT1_ADDR	(HIF_NOCPY_BASE_ADDR + 0x74)
++#define HIF_NOCPY_TMU_PORT2_ADDR	(HIF_NOCPY_BASE_ADDR + 0x7c)
++#define HIF_NOCPY_TMU_PORT3_ADDR	(HIF_NOCPY_BASE_ADDR + 0x80)
++#define HIF_NOCPY_TMU_PORT4_ADDR	(HIF_NOCPY_BASE_ADDR + 0x84)
++#define HIF_NOCPY_INT_COAL	(HIF_NOCPY_BASE_ADDR + 0x90)
++
++#endif /* _HIF_NOCPY_H_ */
+diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
+new file mode 100644
+index 00000000..05f3d681
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
+@@ -0,0 +1,168 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _TMU_CSR_H_
++#define _TMU_CSR_H_
++
++#define TMU_VERSION	(TMU_CSR_BASE_ADDR + 0x000)
++#define TMU_INQ_WATERMARK	(TMU_CSR_BASE_ADDR + 0x004)
++#define TMU_PHY_INQ_PKTPTR	(TMU_CSR_BASE_ADDR + 0x008)
++#define TMU_PHY_INQ_PKTINFO	(TMU_CSR_BASE_ADDR + 0x00c)
++#define TMU_PHY_INQ_FIFO_CNT	(TMU_CSR_BASE_ADDR + 0x010)
++#define TMU_SYS_GENERIC_CONTROL	(TMU_CSR_BASE_ADDR + 0x014)
++#define TMU_SYS_GENERIC_STATUS	(TMU_CSR_BASE_ADDR + 0x018)
++#define TMU_SYS_GEN_CON0	(TMU_CSR_BASE_ADDR + 0x01c)
++#define TMU_SYS_GEN_CON1	(TMU_CSR_BASE_ADDR + 0x020)
++#define TMU_SYS_GEN_CON2	(TMU_CSR_BASE_ADDR + 0x024)
++#define TMU_SYS_GEN_CON3	(TMU_CSR_BASE_ADDR + 0x028)
++#define TMU_SYS_GEN_CON4	(TMU_CSR_BASE_ADDR + 0x02c)
++#define TMU_TEQ_DISABLE_DROPCHK	(TMU_CSR_BASE_ADDR + 0x030)
++#define TMU_TEQ_CTRL	(TMU_CSR_BASE_ADDR + 0x034)
++#define TMU_TEQ_QCFG	(TMU_CSR_BASE_ADDR + 0x038)
++#define TMU_TEQ_DROP_STAT	(TMU_CSR_BASE_ADDR + 0x03c)
++#define TMU_TEQ_QAVG	(TMU_CSR_BASE_ADDR + 0x040)
++#define TMU_TEQ_WREG_PROB	(TMU_CSR_BASE_ADDR + 0x044)
++#define TMU_TEQ_TRANS_STAT	(TMU_CSR_BASE_ADDR + 0x048)
++#define TMU_TEQ_HW_PROB_CFG0	(TMU_CSR_BASE_ADDR + 0x04c)
++#define TMU_TEQ_HW_PROB_CFG1	(TMU_CSR_BASE_ADDR + 0x050)
++#define TMU_TEQ_HW_PROB_CFG2	(TMU_CSR_BASE_ADDR + 0x054)
++#define TMU_TEQ_HW_PROB_CFG3	(TMU_CSR_BASE_ADDR + 0x058)
++#define TMU_TEQ_HW_PROB_CFG4	(TMU_CSR_BASE_ADDR + 0x05c)
++#define TMU_TEQ_HW_PROB_CFG5	(TMU_CSR_BASE_ADDR + 0x060)
++#define TMU_TEQ_HW_PROB_CFG6	(TMU_CSR_BASE_ADDR + 0x064)
++#define TMU_TEQ_HW_PROB_CFG7	(TMU_CSR_BASE_ADDR + 0x068)
++#define TMU_TEQ_HW_PROB_CFG8	(TMU_CSR_BASE_ADDR + 0x06c)
++#define TMU_TEQ_HW_PROB_CFG9	(TMU_CSR_BASE_ADDR + 0x070)
++#define TMU_TEQ_HW_PROB_CFG10	(TMU_CSR_BASE_ADDR + 0x074)
++#define TMU_TEQ_HW_PROB_CFG11	(TMU_CSR_BASE_ADDR + 0x078)
++#define TMU_TEQ_HW_PROB_CFG12	(TMU_CSR_BASE_ADDR + 0x07c)
++#define TMU_TEQ_HW_PROB_CFG13	(TMU_CSR_BASE_ADDR + 0x080)
++#define TMU_TEQ_HW_PROB_CFG14	(TMU_CSR_BASE_ADDR + 0x084)
++#define TMU_TEQ_HW_PROB_CFG15	(TMU_CSR_BASE_ADDR + 0x088)
++#define TMU_TEQ_HW_PROB_CFG16	(TMU_CSR_BASE_ADDR + 0x08c)
++#define TMU_TEQ_HW_PROB_CFG17	(TMU_CSR_BASE_ADDR + 0x090)
++#define TMU_TEQ_HW_PROB_CFG18	(TMU_CSR_BASE_ADDR + 0x094)
++#define TMU_TEQ_HW_PROB_CFG19	(TMU_CSR_BASE_ADDR + 0x098)
++#define TMU_TEQ_HW_PROB_CFG20	(TMU_CSR_BASE_ADDR + 0x09c)
++#define TMU_TEQ_HW_PROB_CFG21	(TMU_CSR_BASE_ADDR + 0x0a0)
++#define TMU_TEQ_HW_PROB_CFG22	(TMU_CSR_BASE_ADDR + 0x0a4)
++#define TMU_TEQ_HW_PROB_CFG23	(TMU_CSR_BASE_ADDR + 0x0a8)
++#define TMU_TEQ_HW_PROB_CFG24	(TMU_CSR_BASE_ADDR + 0x0ac)
++#define TMU_TEQ_HW_PROB_CFG25	(TMU_CSR_BASE_ADDR + 0x0b0)
++#define TMU_TDQ_IIFG_CFG	(TMU_CSR_BASE_ADDR + 0x0b4)
++/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
++ * This is a global Enable for all schedulers in PHY0
++ */
++#define TMU_TDQ0_SCH_CTRL	(TMU_CSR_BASE_ADDR + 0x0b8)
++
++#define TMU_LLM_CTRL	(TMU_CSR_BASE_ADDR + 0x0bc)
++#define TMU_LLM_BASE_ADDR	(TMU_CSR_BASE_ADDR + 0x0c0)
++#define TMU_LLM_QUE_LEN	(TMU_CSR_BASE_ADDR + 0x0c4)
++#define TMU_LLM_QUE_HEADPTR	(TMU_CSR_BASE_ADDR + 0x0c8)
++#define TMU_LLM_QUE_TAILPTR	(TMU_CSR_BASE_ADDR + 0x0cc)
++#define TMU_LLM_QUE_DROPCNT	(TMU_CSR_BASE_ADDR + 0x0d0)
++#define TMU_INT_EN	(TMU_CSR_BASE_ADDR + 0x0d4)
++#define TMU_INT_SRC	(TMU_CSR_BASE_ADDR + 0x0d8)
++#define TMU_INQ_STAT	(TMU_CSR_BASE_ADDR + 0x0dc)
++#define TMU_CTRL	(TMU_CSR_BASE_ADDR + 0x0e0)
++
++/* [31] Mem Access Command. 0 = Internal Memory Read, 1 = Internal memory
++ * Write [27:24] Byte Enables of the Internal memory access [23:0] Address of
++ * the internal memory. This address is used to access both the PM and DM of
++ * all the PE's
++ */
++#define TMU_MEM_ACCESS_ADDR	(TMU_CSR_BASE_ADDR + 0x0e4)
++
++/* Internal Memory Access Write Data */
++#define TMU_MEM_ACCESS_WDATA	(TMU_CSR_BASE_ADDR + 0x0e8)
++/* Internal Memory Access Read Data. The commands are blocked
++ * at the mem_access only
++ */
++#define TMU_MEM_ACCESS_RDATA	(TMU_CSR_BASE_ADDR + 0x0ec)
++
++/* [31:0] PHY0 in queue address (must be initialized with one of the
++ * xxx_INQ_PKTPTR cbus addresses)
++ */
++#define TMU_PHY0_INQ_ADDR	(TMU_CSR_BASE_ADDR + 0x0f0)
++/* [31:0] PHY1 in queue address (must be initialized with one of the
++ * xxx_INQ_PKTPTR cbus addresses)
++ */
++#define TMU_PHY1_INQ_ADDR	(TMU_CSR_BASE_ADDR + 0x0f4)
++/* [31:0] PHY2 in queue address (must be initialized with one of the
++ * xxx_INQ_PKTPTR cbus addresses)
++ */
++#define TMU_PHY2_INQ_ADDR	(TMU_CSR_BASE_ADDR + 0x0f8)
++/* [31:0] PHY3 in queue address (must be initialized with one of the
++ * xxx_INQ_PKTPTR cbus addresses)
++ */
++#define TMU_PHY3_INQ_ADDR	(TMU_CSR_BASE_ADDR + 0x0fc)
++#define TMU_BMU_INQ_ADDR	(TMU_CSR_BASE_ADDR + 0x100)
++#define TMU_TX_CTRL	(TMU_CSR_BASE_ADDR + 0x104)
++
++#define TMU_BUS_ACCESS_WDATA	(TMU_CSR_BASE_ADDR + 0x108)
++#define TMU_BUS_ACCESS	(TMU_CSR_BASE_ADDR + 0x10c)
++#define TMU_BUS_ACCESS_RDATA	(TMU_CSR_BASE_ADDR + 0x110)
++
++#define TMU_PE_SYS_CLK_RATIO	(TMU_CSR_BASE_ADDR + 0x114)
++#define TMU_PE_STATUS	(TMU_CSR_BASE_ADDR + 0x118)
++#define TMU_TEQ_MAX_THRESHOLD	(TMU_CSR_BASE_ADDR + 0x11c)
++/* [31:0] PHY4 in queue address (must be initialized with one of the
++ * xxx_INQ_PKTPTR cbus addresses)
++ */
++#define TMU_PHY4_INQ_ADDR	(TMU_CSR_BASE_ADDR + 0x134)
++/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
++ * This is a global Enable for all schedulers in PHY1
++ */
++#define TMU_TDQ1_SCH_CTRL	(TMU_CSR_BASE_ADDR + 0x138)
++/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
++ * This is a global Enable for all schedulers in PHY2
++ */
++#define TMU_TDQ2_SCH_CTRL	(TMU_CSR_BASE_ADDR + 0x13c)
++/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
++ * This is a global Enable for all schedulers in PHY3
++ */
++#define TMU_TDQ3_SCH_CTRL	(TMU_CSR_BASE_ADDR + 0x140)
++#define TMU_BMU_BUF_SIZE	(TMU_CSR_BASE_ADDR + 0x144)
++/* [31:0] PHY5 in queue address (must be initialized with one of the
++ * xxx_INQ_PKTPTR cbus addresses)
++ */
++#define TMU_PHY5_INQ_ADDR	(TMU_CSR_BASE_ADDR + 0x148)
++
++#define SW_RESET		BIT(0)	/* Global software reset */
++#define INQ_RESET		BIT(2)
++#define TEQ_RESET		BIT(3)
++#define TDQ_RESET		BIT(4)
++#define PE_RESET		BIT(5)
++#define MEM_INIT		BIT(6)
++#define MEM_INIT_DONE		BIT(7)
++#define LLM_INIT		BIT(8)
++#define LLM_INIT_DONE		BIT(9)
++#define ECC_MEM_INIT_DONE	BIT(10)
++
++struct tmu_cfg {
++	u32 pe_sys_clk_ratio;
++	unsigned long llm_base_addr;
++	u32 llm_queue_len;
++};
++
++/* Not HW related for pfe_ctrl / pfe common defines */
++#define DEFAULT_MAX_QDEPTH	80
++#define DEFAULT_Q0_QDEPTH	511 /*We keep one large queue for host tx qos */
++#define DEFAULT_TMU3_QDEPTH	127
++
++#endif /* _TMU_CSR_H_ */
+diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
+new file mode 100644
+index 00000000..ae623cda
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
+@@ -0,0 +1,61 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _UTIL_CSR_H_
++#define _UTIL_CSR_H_
++
++#define UTIL_VERSION	(UTIL_CSR_BASE_ADDR + 0x000)
++#define UTIL_TX_CTRL	(UTIL_CSR_BASE_ADDR + 0x004)
++#define UTIL_INQ_PKTPTR	(UTIL_CSR_BASE_ADDR + 0x010)
++
++#define UTIL_HDR_SIZE	(UTIL_CSR_BASE_ADDR + 0x014)
++
++#define UTIL_PE0_QB_DM_ADDR0	(UTIL_CSR_BASE_ADDR + 0x020)
++#define UTIL_PE0_QB_DM_ADDR1	(UTIL_CSR_BASE_ADDR + 0x024)
++#define UTIL_PE0_RO_DM_ADDR0	(UTIL_CSR_BASE_ADDR + 0x060)
++#define UTIL_PE0_RO_DM_ADDR1	(UTIL_CSR_BASE_ADDR + 0x064)
++
++#define UTIL_MEM_ACCESS_ADDR	(UTIL_CSR_BASE_ADDR + 0x100)
++#define UTIL_MEM_ACCESS_WDATA	(UTIL_CSR_BASE_ADDR + 0x104)
++#define UTIL_MEM_ACCESS_RDATA	(UTIL_CSR_BASE_ADDR + 0x108)
++
++#define UTIL_TM_INQ_ADDR	(UTIL_CSR_BASE_ADDR + 0x114)
++#define UTIL_PE_STATUS	(UTIL_CSR_BASE_ADDR + 0x118)
++
++#define UTIL_PE_SYS_CLK_RATIO	(UTIL_CSR_BASE_ADDR + 0x200)
++#define UTIL_AFULL_THRES	(UTIL_CSR_BASE_ADDR + 0x204)
++#define UTIL_GAP_BETWEEN_READS	(UTIL_CSR_BASE_ADDR + 0x208)
++#define UTIL_MAX_BUF_CNT	(UTIL_CSR_BASE_ADDR + 0x20c)
++#define UTIL_TSQ_FIFO_THRES	(UTIL_CSR_BASE_ADDR + 0x210)
++#define UTIL_TSQ_MAX_CNT	(UTIL_CSR_BASE_ADDR + 0x214)
++#define UTIL_IRAM_DATA_0	(UTIL_CSR_BASE_ADDR + 0x218)
++#define UTIL_IRAM_DATA_1	(UTIL_CSR_BASE_ADDR + 0x21c)
++#define UTIL_IRAM_DATA_2	(UTIL_CSR_BASE_ADDR + 0x220)
++#define UTIL_IRAM_DATA_3	(UTIL_CSR_BASE_ADDR + 0x224)
++
++#define UTIL_BUS_ACCESS_ADDR	(UTIL_CSR_BASE_ADDR + 0x228)
++#define UTIL_BUS_ACCESS_WDATA	(UTIL_CSR_BASE_ADDR + 0x22c)
++#define UTIL_BUS_ACCESS_RDATA	(UTIL_CSR_BASE_ADDR + 0x230)
++
++#define UTIL_INQ_AFULL_THRES	(UTIL_CSR_BASE_ADDR + 0x234)
++
++struct util_cfg {
++	u32 pe_sys_clk_ratio;
++};
++
++#endif /* _UTIL_CSR_H_ */
+diff --git a/drivers/staging/fsl_ppfe/include/pfe/pfe.h b/drivers/staging/fsl_ppfe/include/pfe/pfe.h
+new file mode 100644
+index 00000000..d93ae4c6
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/include/pfe/pfe.h
+@@ -0,0 +1,372 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _PFE_H_
++#define _PFE_H_
++
++#include "cbus.h"
++
++#define CLASS_DMEM_BASE_ADDR(i)	(0x00000000 | ((i) << 20))
++/*
++ * Only valid for mem access register interface
++ */
++#define CLASS_IMEM_BASE_ADDR(i)	(0x00000000 | ((i) << 20))
++#define CLASS_DMEM_SIZE	0x00002000
++#define CLASS_IMEM_SIZE	0x00008000
++
++#define TMU_DMEM_BASE_ADDR(i)	(0x00000000 + ((i) << 20))
++/*
++ * Only valid for mem access register interface
++ */
++#define TMU_IMEM_BASE_ADDR(i)	(0x00000000 + ((i) << 20))
++#define TMU_DMEM_SIZE	0x00000800
++#define TMU_IMEM_SIZE	0x00002000
++
++#define UTIL_DMEM_BASE_ADDR	0x00000000
++#define UTIL_DMEM_SIZE	0x00002000
++
++#define PE_LMEM_BASE_ADDR	0xc3010000
++#define PE_LMEM_SIZE	0x8000
++#define PE_LMEM_END	(PE_LMEM_BASE_ADDR + PE_LMEM_SIZE)
++
++#define DMEM_BASE_ADDR	0x00000000
++#define DMEM_SIZE	0x2000	/* TMU has less... */
++#define DMEM_END	(DMEM_BASE_ADDR + DMEM_SIZE)
++
++#define PMEM_BASE_ADDR	0x00010000
++#define PMEM_SIZE	0x8000	/* TMU has less... */
++#define PMEM_END	(PMEM_BASE_ADDR + PMEM_SIZE)
++
++/* These check memory ranges from PE point of view/memory map */
++#define IS_DMEM(addr, len)				\
++	({ typeof(addr) addr_ = (addr);			\
++	((unsigned long)(addr_) >= DMEM_BASE_ADDR) &&	\
++	(((unsigned long)(addr_) + (len)) <= DMEM_END); })
++
++#define IS_PMEM(addr, len)				\
++	({ typeof(addr) addr_ = (addr);			\
++	((unsigned long)(addr_) >= PMEM_BASE_ADDR) &&	\
++	(((unsigned long)(addr_) + (len)) <= PMEM_END); })
++
++#define IS_PE_LMEM(addr, len)				\
++	({ typeof(addr) addr_ = (addr);			\
++	((unsigned long)(addr_) >=			\
++	PE_LMEM_BASE_ADDR) &&				\
++	(((unsigned long)(addr_) +			\
++	(len)) <= PE_LMEM_END); })
++
++#define IS_PFE_LMEM(addr, len)				\
++	({ typeof(addr) addr_ = (addr);			\
++	((unsigned long)(addr_) >=			\
++	CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR)) &&		\
++	(((unsigned long)(addr_) + (len)) <=		\
++	CBUS_VIRT_TO_PFE(LMEM_END)); })
++
++#define __IS_PHYS_DDR(addr, len)			\
++	({ typeof(addr) addr_ = (addr);			\
++	((unsigned long)(addr_) >=			\
++	DDR_PHYS_BASE_ADDR) &&				\
++	(((unsigned long)(addr_) + (len)) <=		\
++	DDR_PHYS_END); })
++
++#define IS_PHYS_DDR(addr, len)	__IS_PHYS_DDR(DDR_PFE_TO_PHYS(addr), len)
++
++/*
++ * If using a run-time virtual address for the cbus base address use this code
++ */
++extern void *cbus_base_addr;
++extern void *ddr_base_addr;
++extern unsigned long ddr_phys_base_addr;
++extern unsigned int ddr_size;
++
++#define CBUS_BASE_ADDR	cbus_base_addr
++#define DDR_PHYS_BASE_ADDR	ddr_phys_base_addr
++#define DDR_BASE_ADDR	ddr_base_addr
++#define DDR_SIZE	ddr_size
++
++#define DDR_PHYS_END	(DDR_PHYS_BASE_ADDR + DDR_SIZE)
++
++#define LS1012A_PFE_RESET_WA	/*
++				 * PFE doesn't have global reset and re-init
++				 * should takecare few things to make PFE
++				 * functional after reset
++				 */
++#define PFE_CBUS_PHYS_BASE_ADDR	0xc0000000	/* CBUS physical base address
++						 * as seen by PE's.
++						 */
++/* CBUS physical base address as seen by PE's. */
++#define PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE	0xc0000000
++
++#define DDR_PHYS_TO_PFE(p)	(((unsigned long int)(p)) & 0x7FFFFFFF)
++#define DDR_PFE_TO_PHYS(p)	(((unsigned long int)(p)) | 0x80000000)
++#define CBUS_PHYS_TO_PFE(p)	(((p) - PFE_CBUS_PHYS_BASE_ADDR) + \
++				PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE)
++/* Translates to PFE address map */
++
++#define DDR_PHYS_TO_VIRT(p)	(((p) - DDR_PHYS_BASE_ADDR) + DDR_BASE_ADDR)
++#define DDR_VIRT_TO_PHYS(v)	(((v) - DDR_BASE_ADDR) + DDR_PHYS_BASE_ADDR)
++#define DDR_VIRT_TO_PFE(p)	(DDR_PHYS_TO_PFE(DDR_VIRT_TO_PHYS(p)))
++
++#define CBUS_VIRT_TO_PFE(v)	(((v) - CBUS_BASE_ADDR) + \
++				PFE_CBUS_PHYS_BASE_ADDR)
++#define CBUS_PFE_TO_VIRT(p)	(((unsigned long int)(p) - \
++				PFE_CBUS_PHYS_BASE_ADDR) + CBUS_BASE_ADDR)
++
++/* The below part of the code is used in QOS control driver from host */
++#define TMU_APB_BASE_ADDR       0xc1000000      /* TMU base address seen by
++						 * pe's
++						 */
++
++enum {
++	CLASS0_ID = 0,
++	CLASS1_ID,
++	CLASS2_ID,
++	CLASS3_ID,
++	CLASS4_ID,
++	CLASS5_ID,
++	TMU0_ID,
++	TMU1_ID,
++	TMU2_ID,
++	TMU3_ID,
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	UTIL_ID,
++#endif
++	MAX_PE
++};
++
++#define CLASS_MASK	(BIT(CLASS0_ID) | BIT(CLASS1_ID) |\
++			BIT(CLASS2_ID) | BIT(CLASS3_ID) |\
++			BIT(CLASS4_ID) | BIT(CLASS5_ID))
++#define CLASS_MAX_ID	CLASS5_ID
++
++#define TMU_MASK	(BIT(TMU0_ID) | BIT(TMU1_ID) |\
++			BIT(TMU3_ID))
++
++#define TMU_MAX_ID	TMU3_ID
++
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++#define UTIL_MASK	BIT(UTIL_ID)
++#endif
++
++struct pe_status {
++	u32	cpu_state;
++	u32	activity_counter;
++	u32	rx;
++	union {
++	u32	tx;
++	u32	tmu_qstatus;
++	};
++	u32	drop;
++#if defined(CFG_PE_DEBUG)
++	u32	debug_indicator;
++	u32	debug[16];
++#endif
++} __aligned(16);
++
++struct pe_sync_mailbox {
++	u32 stop;
++	u32 stopped;
++};
++
++/* Drop counter definitions */
++
++#define	CLASS_NUM_DROP_COUNTERS	13
++#define	UTIL_NUM_DROP_COUNTERS	8
++
++/* PE information.
++ * Structure containing PE's specific information. It is used to create
++ * generic C functions common to all PE's.
++ * Before using the library functions this structure needs to be initialized
++ * with the different registers virtual addresses
++ * (according to the ARM MMU mmaping). The default initialization supports a
++ * virtual == physical mapping.
++ */
++struct pe_info {
++	u32 dmem_base_addr;	/* PE's dmem base address */
++	u32 pmem_base_addr;	/* PE's pmem base address */
++	u32 pmem_size;	/* PE's pmem size */
++
++	void *mem_access_wdata;	/* PE's _MEM_ACCESS_WDATA register
++				 * address
++				 */
++	void *mem_access_addr;	/* PE's _MEM_ACCESS_ADDR register
++				 * address
++				 */
++	void *mem_access_rdata;	/* PE's _MEM_ACCESS_RDATA register
++				 * address
++				 */
++};
++
++void pe_lmem_read(u32 *dst, u32 len, u32 offset);
++void pe_lmem_write(u32 *src, u32 len, u32 offset);
++
++void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
++void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
++
++u32 pe_pmem_read(int id, u32 addr, u8 size);
++
++void pe_dmem_write(int id, u32 val, u32 addr, u8 size);
++u32 pe_dmem_read(int id, u32 addr, u8 size);
++void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len);
++void class_pe_lmem_memset(u32 dst, int val, unsigned int len);
++void class_bus_write(u32 val, u32 addr, u8 size);
++u32 class_bus_read(u32 addr, u8 size);
++
++#define class_bus_readl(addr)	class_bus_read(addr, 4)
++#define class_bus_readw(addr)	class_bus_read(addr, 2)
++#define class_bus_readb(addr)	class_bus_read(addr, 1)
++
++#define class_bus_writel(val, addr)	class_bus_write(val, addr, 4)
++#define class_bus_writew(val, addr)	class_bus_write(val, addr, 2)
++#define class_bus_writeb(val, addr)	class_bus_write(val, addr, 1)
++
++#define pe_dmem_readl(id, addr)	pe_dmem_read(id, addr, 4)
++#define pe_dmem_readw(id, addr)	pe_dmem_read(id, addr, 2)
++#define pe_dmem_readb(id, addr)	pe_dmem_read(id, addr, 1)
++
++#define pe_dmem_writel(id, val, addr)	pe_dmem_write(id, val, addr, 4)
++#define pe_dmem_writew(id, val, addr)	pe_dmem_write(id, val, addr, 2)
++#define pe_dmem_writeb(id, val, addr)	pe_dmem_write(id, val, addr, 1)
++
++/*int pe_load_elf_section(int id, const void *data, elf32_shdr *shdr); */
++int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
++			struct device *dev);
++
++void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
++		  unsigned int ddr_size);
++void bmu_init(void *base, struct BMU_CFG *cfg);
++void bmu_reset(void *base);
++void bmu_enable(void *base);
++void bmu_disable(void *base);
++void bmu_set_config(void *base, struct BMU_CFG *cfg);
++
++/*
++ * An enumerated type for loopback values.  This can be one of three values, no
++ * loopback -normal operation, local loopback with internal loopback module of
++ * MAC or PHY loopback which is through the external PHY.
++ */
++#ifndef __MAC_LOOP_ENUM__
++#define __MAC_LOOP_ENUM__
++enum mac_loop {LB_NONE, LB_EXT, LB_LOCAL};
++#endif
++
++void gemac_init(void *base, void *config);
++void gemac_disable_rx_checksum_offload(void *base);
++void gemac_enable_rx_checksum_offload(void *base);
++void gemac_set_mdc_div(void *base, int mdc_div);
++void gemac_set_speed(void *base, enum mac_speed gem_speed);
++void gemac_set_duplex(void *base, int duplex);
++void gemac_set_mode(void *base, int mode);
++void gemac_enable(void *base);
++void gemac_tx_disable(void *base);
++void gemac_tx_enable(void *base);
++void gemac_disable(void *base);
++void gemac_reset(void *base);
++void gemac_set_address(void *base, struct spec_addr *addr);
++struct spec_addr gemac_get_address(void *base);
++void gemac_set_loop(void *base, enum mac_loop gem_loop);
++void gemac_set_laddr1(void *base, struct pfe_mac_addr *address);
++void gemac_set_laddr2(void *base, struct pfe_mac_addr *address);
++void gemac_set_laddr3(void *base, struct pfe_mac_addr *address);
++void gemac_set_laddr4(void *base, struct pfe_mac_addr *address);
++void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
++		      unsigned int entry_index);
++void gemac_clear_laddr1(void *base);
++void gemac_clear_laddr2(void *base);
++void gemac_clear_laddr3(void *base);
++void gemac_clear_laddr4(void *base);
++void gemac_clear_laddrN(void *base, unsigned int entry_index);
++struct pfe_mac_addr gemac_get_hash(void *base);
++void gemac_set_hash(void *base, struct pfe_mac_addr *hash);
++struct pfe_mac_addr gem_get_laddr1(void *base);
++struct pfe_mac_addr gem_get_laddr2(void *base);
++struct pfe_mac_addr gem_get_laddr3(void *base);
++struct pfe_mac_addr gem_get_laddr4(void *base);
++struct pfe_mac_addr gem_get_laddrN(void *base, unsigned int entry_index);
++void gemac_set_config(void *base, struct gemac_cfg *cfg);
++void gemac_allow_broadcast(void *base);
++void gemac_no_broadcast(void *base);
++void gemac_enable_1536_rx(void *base);
++void gemac_disable_1536_rx(void *base);
++void gemac_enable_rx_jmb(void *base);
++void gemac_disable_rx_jmb(void *base);
++void gemac_enable_stacked_vlan(void *base);
++void gemac_disable_stacked_vlan(void *base);
++void gemac_enable_pause_rx(void *base);
++void gemac_disable_pause_rx(void *base);
++void gemac_enable_copy_all(void *base);
++void gemac_disable_copy_all(void *base);
++void gemac_set_bus_width(void *base, int width);
++void gemac_set_wol(void *base, u32 wol_conf);
++
++void gpi_init(void *base, struct gpi_cfg *cfg);
++void gpi_reset(void *base);
++void gpi_enable(void *base);
++void gpi_disable(void *base);
++void gpi_set_config(void *base, struct gpi_cfg *cfg);
++
++void class_init(struct class_cfg *cfg);
++void class_reset(void);
++void class_enable(void);
++void class_disable(void);
++void class_set_config(struct class_cfg *cfg);
++
++void tmu_reset(void);
++void tmu_init(struct tmu_cfg *cfg);
++void tmu_enable(u32 pe_mask);
++void tmu_disable(u32 pe_mask);
++u32  tmu_qstatus(u32 if_id);
++u32  tmu_pkts_processed(u32 if_id);
++
++void util_init(struct util_cfg *cfg);
++void util_reset(void);
++void util_enable(void);
++void util_disable(void);
++
++void hif_init(void);
++void hif_tx_enable(void);
++void hif_tx_disable(void);
++void hif_rx_enable(void);
++void hif_rx_disable(void);
++
++/* Get Chip Revision level
++ *
++ */
++static inline unsigned int CHIP_REVISION(void)
++{
++	/*For LS1012A return always 1 */
++	return 1;
++}
++
++/* Start HIF rx DMA
++ *
++ */
++static inline void hif_rx_dma_start(void)
++{
++	writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_RX_CTRL);
++}
++
++/* Start HIF tx DMA
++ *
++ */
++static inline void hif_tx_dma_start(void)
++{
++	writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_TX_CTRL);
++}
++
++#endif /* _PFE_H_ */
+diff --git a/drivers/staging/fsl_ppfe/pfe_ctrl.c b/drivers/staging/fsl_ppfe/pfe_ctrl.c
+new file mode 100644
+index 00000000..dfa8547c
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_ctrl.c
+@@ -0,0 +1,238 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/module.h>
++#include <linux/list.h>
++#include <linux/kthread.h>
++
++#include "pfe_mod.h"
++#include "pfe_ctrl.h"
++
++#define TIMEOUT_MS	1000
++
++int relax(unsigned long end)
++{
++	if (time_after(jiffies, end)) {
++		if (time_after(jiffies, end + (TIMEOUT_MS * HZ) / 1000))
++			return -1;
++
++		if (need_resched())
++			schedule();
++	}
++
++	return 0;
++}
++
++void pfe_ctrl_suspend(struct pfe_ctrl *ctrl)
++{
++	int id;
++
++	mutex_lock(&ctrl->mutex);
++
++	for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
++		pe_dmem_write(id, cpu_to_be32(0x1), CLASS_DM_RESUME, 4);
++
++	for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
++		if (id == TMU2_ID)
++			continue;
++		pe_dmem_write(id, cpu_to_be32(0x1), TMU_DM_RESUME, 4);
++	}
++
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	pe_dmem_write(UTIL_ID, cpu_to_be32(0x1), UTIL_DM_RESUME, 4);
++#endif
++	mutex_unlock(&ctrl->mutex);
++}
++
++void pfe_ctrl_resume(struct pfe_ctrl *ctrl)
++{
++	int pe_mask = CLASS_MASK | TMU_MASK;
++
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	pe_mask |= UTIL_MASK;
++#endif
++	mutex_lock(&ctrl->mutex);
++	pe_start(&pfe->ctrl, pe_mask);
++	mutex_unlock(&ctrl->mutex);
++}
++
++/* PE sync stop.
++ * Stops packet processing for a list of PE's (specified using a bitmask).
++ * The caller must hold ctrl->mutex.
++ *
++ * @param ctrl		Control context
++ * @param pe_mask	Mask of PE id's to stop
++ *
++ */
++int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask)
++{
++	struct pe_sync_mailbox *mbox;
++	int pe_stopped = 0;
++	unsigned long end = jiffies + 2;
++	int i;
++
++	pe_mask &= 0x2FF;  /*Exclude Util + TMU2 */
++
++	for (i = 0; i < MAX_PE; i++)
++		if (pe_mask & (1 << i)) {
++			mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
++
++			pe_dmem_write(i, cpu_to_be32(0x1), (unsigned
++					long)&mbox->stop, 4);
++		}
++
++	while (pe_stopped != pe_mask) {
++		for (i = 0; i < MAX_PE; i++)
++			if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
++				mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
++
++				if (pe_dmem_read(i, (unsigned
++					long)&mbox->stopped, 4) &
++					cpu_to_be32(0x1))
++					pe_stopped |= (1 << i);
++			}
++
++		if (relax(end) < 0)
++			goto err;
++	}
++
++	return 0;
++
++err:
++	pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
++
++	for (i = 0; i < MAX_PE; i++)
++		if (pe_mask & (1 << i)) {
++			mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
++
++			pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
++					long)&mbox->stop, 4);
++	}
++
++	return -EIO;
++}
++
++/* PE start.
++ * Starts packet processing for a list of PE's (specified using a bitmask).
++ * The caller must hold ctrl->mutex.
++ *
++ * @param ctrl		Control context
++ * @param pe_mask	Mask of PE id's to start
++ *
++ */
++void pe_start(struct pfe_ctrl *ctrl, int pe_mask)
++{
++	struct pe_sync_mailbox *mbox;
++	int i;
++
++	for (i = 0; i < MAX_PE; i++)
++		if (pe_mask & (1 << i)) {
++			mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
++
++			pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
++					long)&mbox->stop, 4);
++		}
++}
++
++/* This function will ensure all PEs are put in to idle state */
++int pe_reset_all(struct pfe_ctrl *ctrl)
++{
++	struct pe_sync_mailbox *mbox;
++	int pe_stopped = 0;
++	unsigned long end = jiffies + 2;
++	int i;
++	int pe_mask  = CLASS_MASK | TMU_MASK;
++
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	pe_mask |= UTIL_MASK;
++#endif
++
++	for (i = 0; i < MAX_PE; i++)
++		if (pe_mask & (1 << i)) {
++			mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
++
++			pe_dmem_write(i, cpu_to_be32(0x2), (unsigned
++					long)&mbox->stop, 4);
++		}
++
++	while (pe_stopped != pe_mask) {
++		for (i = 0; i < MAX_PE; i++)
++			if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
++				mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
++
++				if (pe_dmem_read(i, (unsigned long)
++							&mbox->stopped, 4) &
++						cpu_to_be32(0x1))
++					pe_stopped |= (1 << i);
++			}
++
++		if (relax(end) < 0)
++			goto err;
++	}
++
++	return 0;
++
++err:
++	pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
++	return -EIO;
++}
++
++int pfe_ctrl_init(struct pfe *pfe)
++{
++	struct pfe_ctrl *ctrl = &pfe->ctrl;
++	int id;
++
++	pr_info("%s\n", __func__);
++
++	mutex_init(&ctrl->mutex);
++	spin_lock_init(&ctrl->lock);
++
++	for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
++		ctrl->sync_mailbox_baseaddr[id] = CLASS_DM_SYNC_MBOX;
++		ctrl->msg_mailbox_baseaddr[id] = CLASS_DM_MSG_MBOX;
++	}
++
++	for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
++		if (id == TMU2_ID)
++			continue;
++		ctrl->sync_mailbox_baseaddr[id] = TMU_DM_SYNC_MBOX;
++		ctrl->msg_mailbox_baseaddr[id] = TMU_DM_MSG_MBOX;
++	}
++
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	ctrl->sync_mailbox_baseaddr[UTIL_ID] = UTIL_DM_SYNC_MBOX;
++	ctrl->msg_mailbox_baseaddr[UTIL_ID] = UTIL_DM_MSG_MBOX;
++#endif
++
++	ctrl->hash_array_baseaddr = pfe->ddr_baseaddr + ROUTE_TABLE_BASEADDR;
++	ctrl->hash_array_phys_baseaddr = pfe->ddr_phys_baseaddr +
++						ROUTE_TABLE_BASEADDR;
++
++	ctrl->dev = pfe->dev;
++
++	pr_info("%s finished\n", __func__);
++
++	return 0;
++}
++
++void pfe_ctrl_exit(struct pfe *pfe)
++{
++	pr_info("%s\n", __func__);
++}
+diff --git a/drivers/staging/fsl_ppfe/pfe_ctrl.h b/drivers/staging/fsl_ppfe/pfe_ctrl.h
+new file mode 100644
+index 00000000..22115c76
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_ctrl.h
+@@ -0,0 +1,112 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _PFE_CTRL_H_
++#define _PFE_CTRL_H_
++
++#include <linux/dmapool.h>
++
++#include "pfe_mod.h"
++#include "pfe/pfe.h"
++
++#define DMA_BUF_SIZE_128	0x80	/* enough for 1 conntracks */
++#define DMA_BUF_SIZE_256	0x100
++/* enough for 2 conntracks, 1 bridge entry or 1 multicast entry */
++#define DMA_BUF_SIZE_512	0x200
++/* 512bytes dma allocated buffers used by rtp relay feature */
++#define DMA_BUF_MIN_ALIGNMENT	8
++#define DMA_BUF_BOUNDARY	(4 * 1024)
++/* bursts can not cross 4k boundary */
++
++#define CMD_TX_ENABLE	0x0501
++#define CMD_TX_DISABLE	0x0502
++
++#define CMD_RX_LRO		0x0011
++#define CMD_PKTCAP_ENABLE       0x0d01
++#define CMD_QM_EXPT_RATE	0x020c
++
++#define CLASS_DM_SH_STATIC		(0x800)
++#define CLASS_DM_CPU_TICKS		(CLASS_DM_SH_STATIC)
++#define CLASS_DM_SYNC_MBOX		(0x808)
++#define CLASS_DM_MSG_MBOX		(0x810)
++#define CLASS_DM_DROP_CNTR		(0x820)
++#define CLASS_DM_RESUME			(0x854)
++#define CLASS_DM_PESTATUS		(0x860)
++
++#define TMU_DM_SH_STATIC		(0x80)
++#define TMU_DM_CPU_TICKS		(TMU_DM_SH_STATIC)
++#define TMU_DM_SYNC_MBOX		(0x88)
++#define TMU_DM_MSG_MBOX			(0x90)
++#define TMU_DM_RESUME			(0xA0)
++#define TMU_DM_PESTATUS			(0xB0)
++#define TMU_DM_CONTEXT			(0x300)
++#define TMU_DM_TX_TRANS			(0x480)
++
++#define UTIL_DM_SH_STATIC		(0x0)
++#define UTIL_DM_CPU_TICKS		(UTIL_DM_SH_STATIC)
++#define UTIL_DM_SYNC_MBOX		(0x8)
++#define UTIL_DM_MSG_MBOX		(0x10)
++#define UTIL_DM_DROP_CNTR		(0x20)
++#define UTIL_DM_RESUME			(0x40)
++#define UTIL_DM_PESTATUS		(0x50)
++
++struct pfe_ctrl {
++	struct mutex mutex; /* to serialize pfe control access */
++	spinlock_t lock;
++
++	void *dma_pool;
++	void *dma_pool_512;
++	void *dma_pool_128;
++
++	struct device *dev;
++
++	void *hash_array_baseaddr;		/*
++						 * Virtual base address of
++						 * the conntrack hash array
++						 */
++	unsigned long hash_array_phys_baseaddr; /*
++						 * Physical base address of
++						 * the conntrack hash array
++						 */
++
++	int (*event_cb)(u16, u16, u16*);
++
++	unsigned long sync_mailbox_baseaddr[MAX_PE]; /*
++						      * Sync mailbox PFE
++						      * internal address,
++						      * initialized
++						      * when parsing elf images
++						      */
++	unsigned long msg_mailbox_baseaddr[MAX_PE]; /*
++						     * Msg mailbox PFE internal
++						     * address, initialized
++						     * when parsing elf images
++						     */
++	unsigned int sys_clk;			/* AXI clock value, in KHz */
++};
++
++int pfe_ctrl_init(struct pfe *pfe);
++void pfe_ctrl_exit(struct pfe *pfe);
++int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask);
++void pe_start(struct pfe_ctrl *ctrl, int pe_mask);
++int pe_reset_all(struct pfe_ctrl *ctrl);
++void pfe_ctrl_suspend(struct pfe_ctrl *ctrl);
++void pfe_ctrl_resume(struct pfe_ctrl *ctrl);
++int relax(unsigned long end);
++
++#endif /* _PFE_CTRL_H_ */
+diff --git a/drivers/staging/fsl_ppfe/pfe_debugfs.c b/drivers/staging/fsl_ppfe/pfe_debugfs.c
+new file mode 100644
+index 00000000..4156610d
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_debugfs.c
+@@ -0,0 +1,111 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/module.h>
++#include <linux/debugfs.h>
++#include <linux/platform_device.h>
++
++#include "pfe_mod.h"
++
++static int dmem_show(struct seq_file *s, void *unused)
++{
++	u32 dmem_addr, val;
++	int id = (long int)s->private;
++	int i;
++
++	for (dmem_addr = 0; dmem_addr < CLASS_DMEM_SIZE; dmem_addr += 8 * 4) {
++		seq_printf(s, "%04x:", dmem_addr);
++
++		for (i = 0; i < 8; i++) {
++			val = pe_dmem_read(id, dmem_addr + i * 4, 4);
++			seq_printf(s, " %02x %02x %02x %02x", val & 0xff,
++				   (val >> 8) & 0xff, (val >> 16) & 0xff,
++				   (val >> 24) & 0xff);
++		}
++
++		seq_puts(s, "\n");
++	}
++
++	return 0;
++}
++
++static int dmem_open(struct inode *inode, struct file *file)
++{
++	return single_open(file, dmem_show, inode->i_private);
++}
++
++static const struct file_operations dmem_fops = {
++	.open		= dmem_open,
++	.read		= seq_read,
++	.llseek		= seq_lseek,
++	.release	= single_release,
++};
++
++int pfe_debugfs_init(struct pfe *pfe)
++{
++	struct dentry *d;
++
++	pr_info("%s\n", __func__);
++
++	pfe->dentry = debugfs_create_dir("pfe", NULL);
++	if (IS_ERR_OR_NULL(pfe->dentry))
++		goto err_dir;
++
++	d = debugfs_create_file("pe0_dmem", 0444, pfe->dentry, (void *)0,
++				&dmem_fops);
++	if (IS_ERR_OR_NULL(d))
++		goto err_pe;
++
++	d = debugfs_create_file("pe1_dmem", 0444, pfe->dentry, (void *)1,
++				&dmem_fops);
++	if (IS_ERR_OR_NULL(d))
++		goto err_pe;
++
++	d = debugfs_create_file("pe2_dmem", 0444, pfe->dentry, (void *)2,
++				&dmem_fops);
++	if (IS_ERR_OR_NULL(d))
++		goto err_pe;
++
++	d = debugfs_create_file("pe3_dmem", 0444, pfe->dentry, (void *)3,
++				&dmem_fops);
++	if (IS_ERR_OR_NULL(d))
++		goto err_pe;
++
++	d = debugfs_create_file("pe4_dmem", 0444, pfe->dentry, (void *)4,
++				&dmem_fops);
++	if (IS_ERR_OR_NULL(d))
++		goto err_pe;
++
++	d = debugfs_create_file("pe5_dmem", 0444, pfe->dentry, (void *)5,
++				&dmem_fops);
++	if (IS_ERR_OR_NULL(d))
++		goto err_pe;
++
++	return 0;
++
++err_pe:
++	debugfs_remove_recursive(pfe->dentry);
++
++err_dir:
++	return -1;
++}
++
++void pfe_debugfs_exit(struct pfe *pfe)
++{
++	debugfs_remove_recursive(pfe->dentry);
++}
+diff --git a/drivers/staging/fsl_ppfe/pfe_debugfs.h b/drivers/staging/fsl_ppfe/pfe_debugfs.h
+new file mode 100644
+index 00000000..301d9fc2
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_debugfs.h
+@@ -0,0 +1,25 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _PFE_DEBUGFS_H_
++#define _PFE_DEBUGFS_H_
++
++int pfe_debugfs_init(struct pfe *pfe);
++void pfe_debugfs_exit(struct pfe *pfe);
++
++#endif /* _PFE_DEBUGFS_H_ */
+diff --git a/drivers/staging/fsl_ppfe/pfe_eth.c b/drivers/staging/fsl_ppfe/pfe_eth.c
+new file mode 100644
+index 00000000..02cd7c52
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_eth.c
+@@ -0,0 +1,2434 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++/* @pfe_eth.c.
++ *  Ethernet driver for to handle exception path for PFE.
++ *  - uses HIF functions to send/receive packets.
++ *  - uses ctrl function to start/stop interfaces.
++ *  - uses direct register accesses to control phy operation.
++ */
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/interrupt.h>
++#include <linux/dma-mapping.h>
++#include <linux/dmapool.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/ethtool.h>
++#include <linux/mii.h>
++#include <linux/phy.h>
++#include <linux/timer.h>
++#include <linux/hrtimer.h>
++#include <linux/platform_device.h>
++
++#include <net/ip.h>
++#include <net/sock.h>
++
++#include <linux/io.h>
++#include <asm/irq.h>
++#include <linux/delay.h>
++#include <linux/regmap.h>
++#include <linux/i2c.h>
++
++#if defined(CONFIG_NF_CONNTRACK_MARK)
++#include <net/netfilter/nf_conntrack.h>
++#endif
++
++#include "pfe_mod.h"
++#include "pfe_eth.h"
++
++static void *cbus_emac_base[3];
++static void *cbus_gpi_base[3];
++
++/* Forward Declaration */
++static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv);
++static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv);
++static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
++				from_tx, int n_desc);
++
++unsigned int gemac_regs[] = {
++	0x0004, /* Interrupt event */
++	0x0008, /* Interrupt mask */
++	0x0024, /* Ethernet control */
++	0x0064, /* MIB Control/Status */
++	0x0084, /* Receive control/status */
++	0x00C4, /* Transmit control */
++	0x00E4, /* Physical address low */
++	0x00E8, /* Physical address high */
++	0x0144, /* Transmit FIFO Watermark and Store and Forward Control*/
++	0x0190, /* Receive FIFO Section Full Threshold */
++	0x01A0, /* Transmit FIFO Section Empty Threshold */
++	0x01B0, /* Frame Truncation Length */
++};
++
++/********************************************************************/
++/*                   SYSFS INTERFACE				    */
++/********************************************************************/
++
++#ifdef PFE_ETH_NAPI_STATS
++/*
++ * pfe_eth_show_napi_stats
++ */
++static ssize_t pfe_eth_show_napi_stats(struct device *dev,
++				       struct device_attribute *attr,
++				       char *buf)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
++	ssize_t len = 0;
++
++	len += sprintf(buf + len, "sched:  %u\n",
++			priv->napi_counters[NAPI_SCHED_COUNT]);
++	len += sprintf(buf + len, "poll:   %u\n",
++			priv->napi_counters[NAPI_POLL_COUNT]);
++	len += sprintf(buf + len, "packet: %u\n",
++			priv->napi_counters[NAPI_PACKET_COUNT]);
++	len += sprintf(buf + len, "budget: %u\n",
++			priv->napi_counters[NAPI_FULL_BUDGET_COUNT]);
++	len += sprintf(buf + len, "desc:   %u\n",
++			priv->napi_counters[NAPI_DESC_COUNT]);
++
++	return len;
++}
++
++/*
++ * pfe_eth_set_napi_stats
++ */
++static ssize_t pfe_eth_set_napi_stats(struct device *dev,
++				      struct device_attribute *attr,
++				      const char *buf, size_t count)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
++
++	memset(priv->napi_counters, 0, sizeof(priv->napi_counters));
++
++	return count;
++}
++#endif
++#ifdef PFE_ETH_TX_STATS
++/* pfe_eth_show_tx_stats
++ *
++ */
++static ssize_t pfe_eth_show_tx_stats(struct device *dev,
++				     struct device_attribute *attr,
++				     char *buf)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
++	ssize_t len = 0;
++	int i;
++
++	len += sprintf(buf + len, "TX queues stats:\n");
++
++	for (i = 0; i < emac_txq_cnt; i++) {
++		struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
++									i);
++
++		len += sprintf(buf + len, "\n");
++		__netif_tx_lock_bh(tx_queue);
++
++		hif_tx_lock(&pfe->hif);
++		len += sprintf(buf + len,
++				"Queue %2d :  credits               = %10d\n"
++				, i, hif_lib_tx_credit_avail(pfe, priv->id, i));
++		len += sprintf(buf + len,
++				 "            tx packets            = %10d\n"
++				,  pfe->tmu_credit.tx_packets[priv->id][i]);
++		hif_tx_unlock(&pfe->hif);
++
++		/* Don't output additionnal stats if queue never used */
++		if (!pfe->tmu_credit.tx_packets[priv->id][i])
++			goto skip;
++
++		len += sprintf(buf + len,
++				 "            clean_fail            = %10d\n"
++				, priv->clean_fail[i]);
++		len += sprintf(buf + len,
++				 "            stop_queue            = %10d\n"
++				, priv->stop_queue_total[i]);
++		len += sprintf(buf + len,
++				 "            stop_queue_hif        = %10d\n"
++				, priv->stop_queue_hif[i]);
++		len += sprintf(buf + len,
++				"            stop_queue_hif_client = %10d\n"
++				, priv->stop_queue_hif_client[i]);
++		len += sprintf(buf + len,
++				 "            stop_queue_credit     = %10d\n"
++				, priv->stop_queue_credit[i]);
++skip:
++		__netif_tx_unlock_bh(tx_queue);
++	}
++	return len;
++}
++
++/* pfe_eth_set_tx_stats
++ *
++ */
++static ssize_t pfe_eth_set_tx_stats(struct device *dev,
++				    struct device_attribute *attr,
++				    const char *buf, size_t count)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
++	int i;
++
++	for (i = 0; i < emac_txq_cnt; i++) {
++		struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
++									i);
++
++		__netif_tx_lock_bh(tx_queue);
++		priv->clean_fail[i] = 0;
++		priv->stop_queue_total[i] = 0;
++		priv->stop_queue_hif[i] = 0;
++		priv->stop_queue_hif_client[i] = 0;
++		priv->stop_queue_credit[i] = 0;
++		__netif_tx_unlock_bh(tx_queue);
++	}
++
++	return count;
++}
++#endif
++/* pfe_eth_show_txavail
++ *
++ */
++static ssize_t pfe_eth_show_txavail(struct device *dev,
++				    struct device_attribute *attr,
++				    char *buf)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
++	ssize_t len = 0;
++	int i;
++
++	for (i = 0; i < emac_txq_cnt; i++) {
++		struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
++									i);
++
++		__netif_tx_lock_bh(tx_queue);
++
++		len += sprintf(buf + len, "%d",
++				hif_lib_tx_avail(&priv->client, i));
++
++		__netif_tx_unlock_bh(tx_queue);
++
++		if (i == (emac_txq_cnt - 1))
++			len += sprintf(buf + len, "\n");
++		else
++			len += sprintf(buf + len, " ");
++	}
++
++	return len;
++}
++
++/* pfe_eth_show_default_priority
++ *
++ */
++static ssize_t pfe_eth_show_default_priority(struct device *dev,
++					     struct device_attribute *attr,
++						char *buf)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
++	unsigned long flags;
++	int rc;
++
++	spin_lock_irqsave(&priv->lock, flags);
++	rc = sprintf(buf, "%d\n", priv->default_priority);
++	spin_unlock_irqrestore(&priv->lock, flags);
++
++	return rc;
++}
++
++/* pfe_eth_set_default_priority
++ *
++ */
++
++static ssize_t pfe_eth_set_default_priority(struct device *dev,
++					    struct device_attribute *attr,
++					    const char *buf, size_t count)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
++	unsigned long flags;
++
++	spin_lock_irqsave(&priv->lock, flags);
++	priv->default_priority = kstrtoul(buf, 0, 0);
++	spin_unlock_irqrestore(&priv->lock, flags);
++
++	return count;
++}
++
++static DEVICE_ATTR(txavail, 0444, pfe_eth_show_txavail, NULL);
++static DEVICE_ATTR(default_priority, 0644, pfe_eth_show_default_priority,
++			pfe_eth_set_default_priority);
++
++#ifdef PFE_ETH_NAPI_STATS
++static DEVICE_ATTR(napi_stats, 0644, pfe_eth_show_napi_stats,
++			pfe_eth_set_napi_stats);
++#endif
++
++#ifdef PFE_ETH_TX_STATS
++static DEVICE_ATTR(tx_stats, 0644, pfe_eth_show_tx_stats,
++			pfe_eth_set_tx_stats);
++#endif
++
++/*
++ * pfe_eth_sysfs_init
++ *
++ */
++static int pfe_eth_sysfs_init(struct net_device *ndev)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++	int err;
++
++	/* Initialize the default values */
++
++	/*
++	 * By default, packets without conntrack will use this default high
++	 * priority queue
++	 */
++	priv->default_priority = 15;
++
++	/* Create our sysfs files */
++	err = device_create_file(&ndev->dev, &dev_attr_default_priority);
++	if (err) {
++		netdev_err(ndev,
++			   "failed to create default_priority sysfs files\n");
++		goto err_priority;
++	}
++
++	err = device_create_file(&ndev->dev, &dev_attr_txavail);
++	if (err) {
++		netdev_err(ndev,
++			   "failed to create default_priority sysfs files\n");
++		goto err_txavail;
++	}
++
++#ifdef PFE_ETH_NAPI_STATS
++	err = device_create_file(&ndev->dev, &dev_attr_napi_stats);
++	if (err) {
++		netdev_err(ndev, "failed to create napi stats sysfs files\n");
++		goto err_napi;
++	}
++#endif
++
++#ifdef PFE_ETH_TX_STATS
++	err = device_create_file(&ndev->dev, &dev_attr_tx_stats);
++	if (err) {
++		netdev_err(ndev, "failed to create tx stats sysfs files\n");
++		goto err_tx;
++	}
++#endif
++
++	return 0;
++
++#ifdef PFE_ETH_TX_STATS
++err_tx:
++#endif
++#ifdef PFE_ETH_NAPI_STATS
++	device_remove_file(&ndev->dev, &dev_attr_napi_stats);
++
++err_napi:
++#endif
++	device_remove_file(&ndev->dev, &dev_attr_txavail);
++
++err_txavail:
++	device_remove_file(&ndev->dev, &dev_attr_default_priority);
++
++err_priority:
++	return -1;
++}
++
++/* pfe_eth_sysfs_exit
++ *
++ */
++void pfe_eth_sysfs_exit(struct net_device *ndev)
++{
++#ifdef PFE_ETH_TX_STATS
++	device_remove_file(&ndev->dev, &dev_attr_tx_stats);
++#endif
++
++#ifdef PFE_ETH_NAPI_STATS
++	device_remove_file(&ndev->dev, &dev_attr_napi_stats);
++#endif
++	device_remove_file(&ndev->dev, &dev_attr_txavail);
++	device_remove_file(&ndev->dev, &dev_attr_default_priority);
++}
++
++/*************************************************************************/
++/*		ETHTOOL INTERCAE					 */
++/*************************************************************************/
++
++/*MTIP GEMAC */
++static const struct fec_stat {
++	char name[ETH_GSTRING_LEN];
++	u16 offset;
++} fec_stats[] = {
++	/* RMON TX */
++	{ "tx_dropped", RMON_T_DROP },
++	{ "tx_packets", RMON_T_PACKETS },
++	{ "tx_broadcast", RMON_T_BC_PKT },
++	{ "tx_multicast", RMON_T_MC_PKT },
++	{ "tx_crc_errors", RMON_T_CRC_ALIGN },
++	{ "tx_undersize", RMON_T_UNDERSIZE },
++	{ "tx_oversize", RMON_T_OVERSIZE },
++	{ "tx_fragment", RMON_T_FRAG },
++	{ "tx_jabber", RMON_T_JAB },
++	{ "tx_collision", RMON_T_COL },
++	{ "tx_64byte", RMON_T_P64 },
++	{ "tx_65to127byte", RMON_T_P65TO127 },
++	{ "tx_128to255byte", RMON_T_P128TO255 },
++	{ "tx_256to511byte", RMON_T_P256TO511 },
++	{ "tx_512to1023byte", RMON_T_P512TO1023 },
++	{ "tx_1024to2047byte", RMON_T_P1024TO2047 },
++	{ "tx_GTE2048byte", RMON_T_P_GTE2048 },
++	{ "tx_octets", RMON_T_OCTETS },
++
++	/* IEEE TX */
++	{ "IEEE_tx_drop", IEEE_T_DROP },
++	{ "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
++	{ "IEEE_tx_1col", IEEE_T_1COL },
++	{ "IEEE_tx_mcol", IEEE_T_MCOL },
++	{ "IEEE_tx_def", IEEE_T_DEF },
++	{ "IEEE_tx_lcol", IEEE_T_LCOL },
++	{ "IEEE_tx_excol", IEEE_T_EXCOL },
++	{ "IEEE_tx_macerr", IEEE_T_MACERR },
++	{ "IEEE_tx_cserr", IEEE_T_CSERR },
++	{ "IEEE_tx_sqe", IEEE_T_SQE },
++	{ "IEEE_tx_fdxfc", IEEE_T_FDXFC },
++	{ "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
++
++	/* RMON RX */
++	{ "rx_packets", RMON_R_PACKETS },
++	{ "rx_broadcast", RMON_R_BC_PKT },
++	{ "rx_multicast", RMON_R_MC_PKT },
++	{ "rx_crc_errors", RMON_R_CRC_ALIGN },
++	{ "rx_undersize", RMON_R_UNDERSIZE },
++	{ "rx_oversize", RMON_R_OVERSIZE },
++	{ "rx_fragment", RMON_R_FRAG },
++	{ "rx_jabber", RMON_R_JAB },
++	{ "rx_64byte", RMON_R_P64 },
++	{ "rx_65to127byte", RMON_R_P65TO127 },
++	{ "rx_128to255byte", RMON_R_P128TO255 },
++	{ "rx_256to511byte", RMON_R_P256TO511 },
++	{ "rx_512to1023byte", RMON_R_P512TO1023 },
++	{ "rx_1024to2047byte", RMON_R_P1024TO2047 },
++	{ "rx_GTE2048byte", RMON_R_P_GTE2048 },
++	{ "rx_octets", RMON_R_OCTETS },
++
++	/* IEEE RX */
++	{ "IEEE_rx_drop", IEEE_R_DROP },
++	{ "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
++	{ "IEEE_rx_crc", IEEE_R_CRC },
++	{ "IEEE_rx_align", IEEE_R_ALIGN },
++	{ "IEEE_rx_macerr", IEEE_R_MACERR },
++	{ "IEEE_rx_fdxfc", IEEE_R_FDXFC },
++	{ "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
++};
++
++static void pfe_eth_fill_stats(struct net_device *ndev, struct ethtool_stats
++				*stats, u64 *data)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
++		data[i] = readl(priv->EMAC_baseaddr + fec_stats[i].offset);
++}
++
++static void pfe_eth_gstrings(struct net_device *netdev,
++			     u32 stringset, u8 *data)
++{
++	int i;
++
++	switch (stringset) {
++	case ETH_SS_STATS:
++		for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
++			memcpy(data + i * ETH_GSTRING_LEN,
++			       fec_stats[i].name, ETH_GSTRING_LEN);
++		break;
++	}
++}
++
++static int pfe_eth_stats_count(struct net_device *ndev, int sset)
++{
++	switch (sset) {
++	case ETH_SS_STATS:
++		return ARRAY_SIZE(fec_stats);
++	default:
++		return -EOPNOTSUPP;
++	}
++}
++
++/*
++ * pfe_eth_gemac_reglen - Return the length of the register structure.
++ *
++ */
++static int pfe_eth_gemac_reglen(struct net_device *ndev)
++{
++	pr_info("%s()\n", __func__);
++	return (sizeof(gemac_regs) / sizeof(u32));
++}
++
++/*
++ * pfe_eth_gemac_get_regs - Return the gemac register structure.
++ *
++ */
++static void  pfe_eth_gemac_get_regs(struct net_device *ndev, struct ethtool_regs
++					*regs, void *regbuf)
++{
++	int i;
++
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++	u32 *buf = (u32 *)regbuf;
++
++	pr_info("%s()\n", __func__);
++	for (i = 0; i < sizeof(gemac_regs) / sizeof(u32); i++)
++		buf[i] = readl(priv->EMAC_baseaddr + gemac_regs[i]);
++}
++
++/*
++ * pfe_eth_set_wol - Set the magic packet option, in WoL register.
++ *
++ */
++static int pfe_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++
++	if (wol->wolopts & ~WAKE_MAGIC)
++		return -EOPNOTSUPP;
++
++	/* for MTIP we store wol->wolopts */
++	priv->wol = wol->wolopts;
++
++	device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
++
++	return 0;
++}
++
++/*
++ *
++ * pfe_eth_get_wol - Get the WoL options.
++ *
++ */
++static void pfe_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo
++				*wol)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++
++	wol->supported = WAKE_MAGIC;
++	wol->wolopts = 0;
++
++	if (priv->wol & WAKE_MAGIC)
++		wol->wolopts = WAKE_MAGIC;
++
++	memset(&wol->sopass, 0, sizeof(wol->sopass));
++}
++
++/*
++ * pfe_eth_get_drvinfo -  Fills in the drvinfo structure with some basic info
++ *
++ */
++static void pfe_eth_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo
++				*drvinfo)
++{
++	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
++	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
++	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
++	strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
++}
++
++/*
++ * pfe_eth_set_settings - Used to send commands to PHY.
++ *
++ */
++static int pfe_eth_set_settings(struct net_device *ndev,
++				const struct ethtool_link_ksettings *cmd)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++	struct phy_device *phydev = priv->phydev;
++
++	if (!phydev)
++		return -ENODEV;
++
++	return phy_ethtool_ksettings_set(phydev, cmd);
++}
++
++/*
++ * pfe_eth_getsettings - Return the current settings in the ethtool_cmd
++ * structure.
++ *
++ */
++static int pfe_eth_get_settings(struct net_device *ndev,
++				struct ethtool_link_ksettings *cmd)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++	struct phy_device *phydev = priv->phydev;
++
++	if (!phydev)
++		return -ENODEV;
++
++	return phy_ethtool_ksettings_get(phydev, cmd);
++}
++
++/*
++ * pfe_eth_get_msglevel - Gets the debug message mask.
++ *
++ */
++static uint32_t pfe_eth_get_msglevel(struct net_device *ndev)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++
++	return priv->msg_enable;
++}
++
++/*
++ * pfe_eth_set_msglevel - Sets the debug message mask.
++ *
++ */
++static void pfe_eth_set_msglevel(struct net_device *ndev, uint32_t data)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++
++	priv->msg_enable = data;
++}
++
++#define HIF_RX_COAL_MAX_CLKS		(~(1 << 31))
++#define HIF_RX_COAL_CLKS_PER_USEC	(pfe->ctrl.sys_clk / 1000)
++#define HIF_RX_COAL_MAX_USECS		(HIF_RX_COAL_MAX_CLKS	/ \
++						HIF_RX_COAL_CLKS_PER_USEC)
++
++/*
++ * pfe_eth_set_coalesce - Sets rx interrupt coalescing timer.
++ *
++ */
++static int pfe_eth_set_coalesce(struct net_device *ndev,
++				struct ethtool_coalesce *ec)
++{
++	if (ec->rx_coalesce_usecs > HIF_RX_COAL_MAX_USECS)
++		return -EINVAL;
++
++	if (!ec->rx_coalesce_usecs) {
++		writel(0, HIF_INT_COAL);
++		return 0;
++	}
++
++	writel((ec->rx_coalesce_usecs * HIF_RX_COAL_CLKS_PER_USEC) |
++			HIF_INT_COAL_ENABLE, HIF_INT_COAL);
++
++	return 0;
++}
++
++/*
++ * pfe_eth_get_coalesce - Gets rx interrupt coalescing timer value.
++ *
++ */
++static int pfe_eth_get_coalesce(struct net_device *ndev,
++				struct ethtool_coalesce *ec)
++{
++	int reg_val = readl(HIF_INT_COAL);
++
++	if (reg_val & HIF_INT_COAL_ENABLE)
++		ec->rx_coalesce_usecs = (reg_val & HIF_RX_COAL_MAX_CLKS) /
++						HIF_RX_COAL_CLKS_PER_USEC;
++	else
++		ec->rx_coalesce_usecs = 0;
++
++	return 0;
++}
++
++/*
++ * pfe_eth_set_pauseparam - Sets pause parameters
++ *
++ */
++static int pfe_eth_set_pauseparam(struct net_device *ndev,
++				  struct ethtool_pauseparam *epause)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++
++	if (epause->tx_pause != epause->rx_pause) {
++		netdev_info(ndev,
++			    "hardware only support enable/disable both tx and rx\n");
++		return -EINVAL;
++	}
++
++	priv->pause_flag = 0;
++	priv->pause_flag |= epause->rx_pause ? PFE_PAUSE_FLAG_ENABLE : 0;
++	priv->pause_flag |= epause->autoneg ? PFE_PAUSE_FLAG_AUTONEG : 0;
++
++	if (epause->rx_pause || epause->autoneg) {
++		gemac_enable_pause_rx(priv->EMAC_baseaddr);
++		writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) |
++					EGPI_PAUSE_ENABLE),
++				priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
++		if (priv->phydev) {
++			priv->phydev->supported |= ADVERTISED_Pause |
++							ADVERTISED_Asym_Pause;
++			priv->phydev->advertising |= ADVERTISED_Pause |
++							ADVERTISED_Asym_Pause;
++		}
++	} else {
++		gemac_disable_pause_rx(priv->EMAC_baseaddr);
++		writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) &
++					~EGPI_PAUSE_ENABLE),
++				priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
++		if (priv->phydev) {
++			priv->phydev->supported &= ~(ADVERTISED_Pause |
++							ADVERTISED_Asym_Pause);
++			priv->phydev->advertising &= ~(ADVERTISED_Pause |
++							ADVERTISED_Asym_Pause);
++		}
++	}
++
++	return 0;
++}
++
++/*
++ * pfe_eth_get_pauseparam - Gets pause parameters
++ *
++ */
++static void pfe_eth_get_pauseparam(struct net_device *ndev,
++				   struct ethtool_pauseparam *epause)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++
++	epause->autoneg = (priv->pause_flag & PFE_PAUSE_FLAG_AUTONEG) != 0;
++	epause->tx_pause = (priv->pause_flag & PFE_PAUSE_FLAG_ENABLE) != 0;
++	epause->rx_pause = epause->tx_pause;
++}
++
++/*
++ * pfe_eth_get_hash
++ */
++#define PFE_HASH_BITS	6		/* #bits in hash */
++#define CRC32_POLY	0xEDB88320
++
++static int pfe_eth_get_hash(u8 *addr)
++{
++	unsigned int i, bit, data, crc, hash;
++
++	/* calculate crc32 value of mac address */
++	crc = 0xffffffff;
++
++	for (i = 0; i < 6; i++) {
++		data = addr[i];
++		for (bit = 0; bit < 8; bit++, data >>= 1) {
++			crc = (crc >> 1) ^
++				(((crc ^ data) & 1) ? CRC32_POLY : 0);
++		}
++	}
++
++	/*
++	 * only upper 6 bits (PFE_HASH_BITS) are used
++	 * which point to specific bit in the hash registers
++	 */
++	hash = (crc >> (32 - PFE_HASH_BITS)) & 0x3f;
++
++	return hash;
++}
++
++const struct ethtool_ops pfe_ethtool_ops = {
++	.get_drvinfo = pfe_eth_get_drvinfo,
++	.get_regs_len = pfe_eth_gemac_reglen,
++	.get_regs = pfe_eth_gemac_get_regs,
++	.get_link = ethtool_op_get_link,
++	.get_wol  = pfe_eth_get_wol,
++	.set_wol  = pfe_eth_set_wol,
++	.set_pauseparam = pfe_eth_set_pauseparam,
++	.get_pauseparam = pfe_eth_get_pauseparam,
++	.get_strings = pfe_eth_gstrings,
++	.get_sset_count = pfe_eth_stats_count,
++	.get_ethtool_stats = pfe_eth_fill_stats,
++	.get_msglevel = pfe_eth_get_msglevel,
++	.set_msglevel = pfe_eth_set_msglevel,
++	.set_coalesce = pfe_eth_set_coalesce,
++	.get_coalesce = pfe_eth_get_coalesce,
++	.get_link_ksettings = pfe_eth_get_settings,
++	.set_link_ksettings = pfe_eth_set_settings,
++};
++
++/* pfe_eth_mdio_reset
++ */
++int pfe_eth_mdio_reset(struct mii_bus *bus)
++{
++	struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
++	u32 phy_speed;
++
++	netif_info(priv, hw, priv->ndev, "%s\n", __func__);
++
++	mutex_lock(&bus->mdio_lock);
++
++	/*
++	 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
++	 *
++	 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
++	 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.
++	 */
++	phy_speed = (DIV_ROUND_UP((pfe->ctrl.sys_clk * 1000), 4000000)
++		     << EMAC_MII_SPEED_SHIFT);
++	phy_speed |= EMAC_HOLDTIME(0x5);
++	__raw_writel(phy_speed, priv->PHY_baseaddr + EMAC_MII_CTRL_REG);
++
++	mutex_unlock(&bus->mdio_lock);
++
++	return 0;
++}
++
++/* pfe_eth_gemac_phy_timeout
++ *
++ */
++static int pfe_eth_gemac_phy_timeout(struct pfe_eth_priv_s *priv, int timeout)
++{
++	while (!(__raw_readl(priv->PHY_baseaddr + EMAC_IEVENT_REG) &
++			EMAC_IEVENT_MII)) {
++		if (timeout-- <= 0)
++			return -1;
++		usleep_range(10, 20);
++	}
++	__raw_writel(EMAC_IEVENT_MII, priv->PHY_baseaddr + EMAC_IEVENT_REG);
++	return 0;
++}
++
++static int pfe_eth_mdio_mux(u8 muxval)
++{
++	struct i2c_adapter *a;
++	struct i2c_msg msg;
++	unsigned char buf[2];
++	int ret;
++
++	a = i2c_get_adapter(0);
++	if (!a)
++		return -ENODEV;
++
++	/* set bit 1 (the second bit) of chip at 0x09, register 0x13 */
++	buf[0] = 0x54; /* reg number */
++	buf[1] = (muxval << 6) | 0x3; /* data */
++	msg.addr = 0x66;
++	msg.buf = buf;
++	msg.len = 2;
++	msg.flags = 0;
++	ret = i2c_transfer(a, &msg, 1);
++	i2c_put_adapter(a);
++	if (ret != 1)
++		return -ENODEV;
++	return 0;
++}
++
++static int pfe_eth_mdio_write_addr(struct mii_bus *bus, int mii_id,
++				   int dev_addr, int regnum)
++{
++	struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
++
++	__raw_writel(EMAC_MII_DATA_PA(mii_id) |
++		     EMAC_MII_DATA_RA(dev_addr) |
++		     EMAC_MII_DATA_TA | EMAC_MII_DATA(regnum),
++		     priv->PHY_baseaddr + EMAC_MII_DATA_REG);
++
++	if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
++		netdev_err(priv->ndev, "%s: phy MDIO address write timeout\n",
++			   __func__);
++		return -1;
++	}
++
++	return 0;
++}
++
++static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
++			      u16 value)
++{
++	struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
++
++	/*To access external PHYs on QDS board mux needs to be configured*/
++	if ((mii_id) && (pfe->mdio_muxval[mii_id]))
++		pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
++
++	if (regnum & MII_ADDR_C45) {
++		pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
++					regnum & 0xffff);
++		__raw_writel(EMAC_MII_DATA_OP_CL45_WR |
++			     EMAC_MII_DATA_PA(mii_id) |
++			     EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
++			     EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
++			     priv->PHY_baseaddr + EMAC_MII_DATA_REG);
++	} else {
++		/* start a write op */
++		__raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_WR |
++			     EMAC_MII_DATA_PA(mii_id) |
++			     EMAC_MII_DATA_RA(regnum) |
++			     EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
++			     priv->PHY_baseaddr + EMAC_MII_DATA_REG);
++	}
++
++	if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
++		netdev_err(priv->ndev, "%s: phy MDIO write timeout\n",
++			   __func__);
++		return -1;
++	}
++	netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
++		   mii_id, regnum, value);
++
++	return 0;
++}
++
++static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
++{
++	struct pfe_eth_priv_s *priv = (struct pfe_eth_priv_s *)bus->priv;
++	u16 value = 0;
++
++	/*To access external PHYs on QDS board mux needs to be configured*/
++	if ((mii_id) && (pfe->mdio_muxval[mii_id]))
++		pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
++
++	if (regnum & MII_ADDR_C45) {
++		pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
++					regnum & 0xffff);
++		__raw_writel(EMAC_MII_DATA_OP_CL45_RD |
++			     EMAC_MII_DATA_PA(mii_id) |
++			     EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
++			     EMAC_MII_DATA_TA,
++			     priv->PHY_baseaddr + EMAC_MII_DATA_REG);
++	} else {
++		/* start a read op */
++		__raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_RD |
++			     EMAC_MII_DATA_PA(mii_id) |
++			     EMAC_MII_DATA_RA(regnum) |
++			     EMAC_MII_DATA_TA, priv->PHY_baseaddr +
++			     EMAC_MII_DATA_REG);
++	}
++
++	if (pfe_eth_gemac_phy_timeout(priv, EMAC_MDIO_TIMEOUT)) {
++		netdev_err(priv->ndev, "%s: phy MDIO read timeout\n", __func__);
++		return -1;
++	}
++
++	value = EMAC_MII_DATA(__raw_readl(priv->PHY_baseaddr +
++						EMAC_MII_DATA_REG));
++	netif_info(priv, hw, priv->ndev, "%s: phy %x reg %x val %x\n", __func__,
++		   mii_id, regnum, value);
++	return value;
++}
++
++static int pfe_eth_mdio_init(struct pfe_eth_priv_s *priv,
++			     struct ls1012a_mdio_platform_data *minfo)
++{
++	struct mii_bus *bus;
++	int rc;
++
++	netif_info(priv, drv, priv->ndev, "%s\n", __func__);
++	pr_info("%s\n", __func__);
++
++	bus = mdiobus_alloc();
++	if (!bus) {
++		netdev_err(priv->ndev, "mdiobus_alloc() failed\n");
++		rc = -ENOMEM;
++		goto err0;
++	}
++
++	bus->name = "ls1012a MDIO Bus";
++	bus->read = &pfe_eth_mdio_read;
++	bus->write = &pfe_eth_mdio_write;
++	bus->reset = &pfe_eth_mdio_reset;
++	snprintf(bus->id, MII_BUS_ID_SIZE, "ls1012a-%x", priv->id);
++	bus->priv = priv;
++
++	bus->phy_mask = minfo->phy_mask;
++	priv->mdc_div = minfo->mdc_div;
++
++	if (!priv->mdc_div)
++		priv->mdc_div = 64;
++
++	bus->irq[0] = minfo->irq[0];
++
++	bus->parent = priv->pfe->dev;
++
++	netif_info(priv, drv, priv->ndev, "%s: mdc_div: %d, phy_mask: %x\n",
++		   __func__, priv->mdc_div, bus->phy_mask);
++	rc = mdiobus_register(bus);
++	if (rc) {
++		netdev_err(priv->ndev, "mdiobus_register(%s) failed\n",
++			   bus->name);
++		goto err1;
++	}
++
++	priv->mii_bus = bus;
++	pfe_eth_mdio_reset(bus);
++
++	return 0;
++
++err1:
++	mdiobus_free(bus);
++err0:
++	return rc;
++}
++
++/* pfe_eth_mdio_exit
++ */
++static void pfe_eth_mdio_exit(struct mii_bus *bus)
++{
++	if (!bus)
++		return;
++
++	netif_info((struct pfe_eth_priv_s *)bus->priv, drv, ((struct
++			pfe_eth_priv_s *)(bus->priv))->ndev, "%s\n", __func__);
++
++	mdiobus_unregister(bus);
++	mdiobus_free(bus);
++}
++
++/* pfe_get_phydev_speed
++ */
++static int pfe_get_phydev_speed(struct phy_device *phydev)
++{
++	switch (phydev->speed) {
++	case 10:
++			return SPEED_10M;
++	case 100:
++			return SPEED_100M;
++	case 1000:
++	default:
++			return SPEED_1000M;
++	}
++}
++
++/* pfe_set_rgmii_speed
++ */
++#define RGMIIPCR	0x434
++/* RGMIIPCR bit definitions*/
++#define SCFG_RGMIIPCR_EN_AUTO           (0x00000008)
++#define SCFG_RGMIIPCR_SETSP_1000M       (0x00000004)
++#define SCFG_RGMIIPCR_SETSP_100M        (0x00000000)
++#define SCFG_RGMIIPCR_SETSP_10M         (0x00000002)
++#define SCFG_RGMIIPCR_SETFD             (0x00000001)
++
++static void pfe_set_rgmii_speed(struct phy_device *phydev)
++{
++	u32 rgmii_pcr;
++
++	regmap_read(pfe->scfg, RGMIIPCR, &rgmii_pcr);
++	rgmii_pcr  &= ~(SCFG_RGMIIPCR_SETSP_1000M | SCFG_RGMIIPCR_SETSP_10M);
++
++	switch (phydev->speed) {
++	case 10:
++			rgmii_pcr |= SCFG_RGMIIPCR_SETSP_10M;
++			break;
++	case 1000:
++			rgmii_pcr |= SCFG_RGMIIPCR_SETSP_1000M;
++			break;
++	case 100:
++	default:
++			/* Default is 100M */
++			break;
++	}
++	regmap_write(pfe->scfg, RGMIIPCR, rgmii_pcr);
++}
++
++/* pfe_get_phydev_duplex
++ */
++static int pfe_get_phydev_duplex(struct phy_device *phydev)
++{
++	/*return (phydev->duplex == DUPLEX_HALF) ? DUP_HALF:DUP_FULL ; */
++	return DUPLEX_FULL;
++}
++
++/* pfe_eth_adjust_link
++ */
++static void pfe_eth_adjust_link(struct net_device *ndev)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++	unsigned long flags;
++	struct phy_device *phydev = priv->phydev;
++	int new_state = 0;
++
++	netif_info(priv, drv, ndev, "%s\n", __func__);
++
++	spin_lock_irqsave(&priv->lock, flags);
++
++	if (phydev->link) {
++		/*
++		 * Now we make sure that we can be in full duplex mode.
++		 * If not, we operate in half-duplex mode.
++		 */
++		if (phydev->duplex != priv->oldduplex) {
++			new_state = 1;
++			gemac_set_duplex(priv->EMAC_baseaddr,
++					 pfe_get_phydev_duplex(phydev));
++			priv->oldduplex = phydev->duplex;
++		}
++
++		if (phydev->speed != priv->oldspeed) {
++			new_state = 1;
++			gemac_set_speed(priv->EMAC_baseaddr,
++					pfe_get_phydev_speed(phydev));
++			if (priv->einfo->mii_config == PHY_INTERFACE_MODE_RGMII_TXID)
++				pfe_set_rgmii_speed(phydev);
++			priv->oldspeed = phydev->speed;
++		}
++
++		if (!priv->oldlink) {
++			new_state = 1;
++			priv->oldlink = 1;
++		}
++
++	} else if (priv->oldlink) {
++		new_state = 1;
++		priv->oldlink = 0;
++		priv->oldspeed = 0;
++		priv->oldduplex = -1;
++	}
++
++	if (new_state && netif_msg_link(priv))
++		phy_print_status(phydev);
++
++	spin_unlock_irqrestore(&priv->lock, flags);
++}
++
++/* pfe_phy_exit
++ */
++static void pfe_phy_exit(struct net_device *ndev)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++
++	netif_info(priv, drv, ndev, "%s\n", __func__);
++
++	phy_disconnect(priv->phydev);
++	priv->phydev = NULL;
++}
++
++/* pfe_eth_stop
++ */
++static void pfe_eth_stop(struct net_device *ndev, int wake)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++
++	netif_info(priv, drv, ndev, "%s\n", __func__);
++
++	if (wake) {
++		gemac_tx_disable(priv->EMAC_baseaddr);
++	} else {
++		gemac_disable(priv->EMAC_baseaddr);
++		gpi_disable(priv->GPI_baseaddr);
++
++		if (priv->phydev)
++			phy_stop(priv->phydev);
++	}
++}
++
++/* pfe_eth_start
++ */
++static int pfe_eth_start(struct pfe_eth_priv_s *priv)
++{
++	netif_info(priv, drv, priv->ndev, "%s\n", __func__);
++
++	if (priv->phydev)
++		phy_start(priv->phydev);
++
++	gpi_enable(priv->GPI_baseaddr);
++	gemac_enable(priv->EMAC_baseaddr);
++
++	return 0;
++}
++
++/*
++ * Configure on chip serdes through mdio
++ */
++static void ls1012a_configure_serdes(struct net_device *ndev)
++{
++	struct pfe_eth_priv_s *priv = pfe->eth.eth_priv[0];
++	int sgmii_2500 = 0;
++	struct mii_bus *bus = priv->mii_bus;
++
++	if (priv->einfo->mii_config == PHY_INTERFACE_MODE_SGMII_2500)
++		sgmii_2500 = 1;
++
++	netif_info(priv, drv, ndev, "%s\n", __func__);
++	/* PCS configuration done with corresponding GEMAC */
++
++	pfe_eth_mdio_read(bus, 0, 0);
++	pfe_eth_mdio_read(bus, 0, 1);
++
++       /*These settings taken from validtion team */
++	pfe_eth_mdio_write(bus, 0, 0x0, 0x8000);
++	if (sgmii_2500) {
++		pfe_eth_mdio_write(bus, 0, 0x14, 0x9);
++		pfe_eth_mdio_write(bus, 0, 0x4, 0x4001);
++		pfe_eth_mdio_write(bus, 0, 0x12, 0xa120);
++		pfe_eth_mdio_write(bus, 0, 0x13, 0x7);
++	} else {
++		pfe_eth_mdio_write(bus, 0, 0x14, 0xb);
++		pfe_eth_mdio_write(bus, 0, 0x4, 0x1a1);
++		pfe_eth_mdio_write(bus, 0, 0x12, 0x400);
++		pfe_eth_mdio_write(bus, 0, 0x13, 0x0);
++	}
++
++	pfe_eth_mdio_write(bus, 0, 0x0, 0x1140);
++}
++
++/*
++ * pfe_phy_init
++ *
++ */
++static int pfe_phy_init(struct net_device *ndev)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++	struct phy_device *phydev;
++	char phy_id[MII_BUS_ID_SIZE + 3];
++	char bus_id[MII_BUS_ID_SIZE];
++	phy_interface_t interface;
++
++	priv->oldlink = 0;
++	priv->oldspeed = 0;
++	priv->oldduplex = -1;
++
++	snprintf(bus_id, MII_BUS_ID_SIZE, "ls1012a-%d", 0);
++	snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
++		 priv->einfo->phy_id);
++
++	netif_info(priv, drv, ndev, "%s: %s\n", __func__, phy_id);
++	interface = priv->einfo->mii_config;
++	if ((interface == PHY_INTERFACE_MODE_SGMII) ||
++	    (interface == PHY_INTERFACE_MODE_SGMII_2500)) {
++		/*Configure SGMII PCS */
++		if (pfe->scfg) {
++			/*Config MDIO from serdes */
++			regmap_write(pfe->scfg, 0x484, 0x00000000);
++		}
++		ls1012a_configure_serdes(ndev);
++	}
++
++	if (pfe->scfg) {
++		/*Config MDIO from PAD */
++		regmap_write(pfe->scfg, 0x484, 0x80000000);
++	}
++
++	priv->oldlink = 0;
++	priv->oldspeed = 0;
++	priv->oldduplex = -1;
++	pr_info("%s interface %x\n", __func__, interface);
++	phydev = phy_connect(ndev, phy_id, &pfe_eth_adjust_link, interface);
++
++	if (IS_ERR(phydev)) {
++		netdev_err(ndev, "phy_connect() failed\n");
++		return PTR_ERR(phydev);
++	}
++
++	priv->phydev = phydev;
++	phydev->irq = PHY_POLL;
++
++	return 0;
++}
++
++/* pfe_gemac_init
++ */
++static int pfe_gemac_init(struct pfe_eth_priv_s *priv)
++{
++	struct gemac_cfg cfg;
++
++	netif_info(priv, ifup, priv->ndev, "%s\n", __func__);
++
++	cfg.speed = SPEED_1000M;
++	cfg.duplex = DUPLEX_FULL;
++
++	gemac_set_config(priv->EMAC_baseaddr, &cfg);
++	gemac_allow_broadcast(priv->EMAC_baseaddr);
++	gemac_enable_1536_rx(priv->EMAC_baseaddr);
++	gemac_enable_rx_jmb(priv->EMAC_baseaddr);
++	gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
++	gemac_enable_pause_rx(priv->EMAC_baseaddr);
++	gemac_set_bus_width(priv->EMAC_baseaddr, 64);
++
++	/*GEM will perform checksum verifications*/
++	if (priv->ndev->features & NETIF_F_RXCSUM)
++		gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
++	else
++		gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
++
++	return 0;
++}
++
++/* pfe_eth_event_handler
++ */
++static int pfe_eth_event_handler(void *data, int event, int qno)
++{
++	struct pfe_eth_priv_s *priv = data;
++
++	switch (event) {
++	case EVENT_RX_PKT_IND:
++
++		if (qno == 0) {
++			if (napi_schedule_prep(&priv->high_napi)) {
++				netif_info(priv, intr, priv->ndev,
++					   "%s: schedule high prio poll\n"
++					   , __func__);
++
++#ifdef PFE_ETH_NAPI_STATS
++				priv->napi_counters[NAPI_SCHED_COUNT]++;
++#endif
++
++				__napi_schedule(&priv->high_napi);
++			}
++		} else if (qno == 1) {
++			if (napi_schedule_prep(&priv->low_napi)) {
++				netif_info(priv, intr, priv->ndev,
++					   "%s: schedule low prio poll\n"
++					   , __func__);
++
++#ifdef PFE_ETH_NAPI_STATS
++				priv->napi_counters[NAPI_SCHED_COUNT]++;
++#endif
++				__napi_schedule(&priv->low_napi);
++			}
++		} else if (qno == 2) {
++			if (napi_schedule_prep(&priv->lro_napi)) {
++				netif_info(priv, intr, priv->ndev,
++					   "%s: schedule lro prio poll\n"
++					   , __func__);
++
++#ifdef PFE_ETH_NAPI_STATS
++				priv->napi_counters[NAPI_SCHED_COUNT]++;
++#endif
++				__napi_schedule(&priv->lro_napi);
++			}
++		}
++
++		break;
++
++	case EVENT_TXDONE_IND:
++		pfe_eth_flush_tx(priv);
++		hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
++		break;
++	case EVENT_HIGH_RX_WM:
++	default:
++		break;
++	}
++
++	return 0;
++}
++
++/* pfe_eth_open
++ */
++static int pfe_eth_open(struct net_device *ndev)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++	struct hif_client_s *client;
++	int rc;
++
++	netif_info(priv, ifup, ndev, "%s\n", __func__);
++
++	/* Register client driver with HIF */
++	client = &priv->client;
++	memset(client, 0, sizeof(*client));
++	client->id = PFE_CL_GEM0 + priv->id;
++	client->tx_qn = emac_txq_cnt;
++	client->rx_qn = EMAC_RXQ_CNT;
++	client->priv = priv;
++	client->pfe = priv->pfe;
++	client->event_handler = pfe_eth_event_handler;
++
++	client->tx_qsize = EMAC_TXQ_DEPTH;
++	client->rx_qsize = EMAC_RXQ_DEPTH;
++
++	rc = hif_lib_client_register(client);
++	if (rc) {
++		netdev_err(ndev, "%s: hif_lib_client_register(%d) failed\n",
++			   __func__, client->id);
++		goto err0;
++	}
++
++	netif_info(priv, drv, ndev, "%s: registered client: %p\n", __func__,
++		   client);
++
++	pfe_gemac_init(priv);
++
++	if (!is_valid_ether_addr(ndev->dev_addr)) {
++		netdev_err(ndev, "%s: invalid MAC address\n", __func__);
++		rc = -EADDRNOTAVAIL;
++		goto err1;
++	}
++
++	gemac_set_laddrN(priv->EMAC_baseaddr,
++			 (struct pfe_mac_addr *)ndev->dev_addr, 1);
++
++	napi_enable(&priv->high_napi);
++	napi_enable(&priv->low_napi);
++	napi_enable(&priv->lro_napi);
++
++	rc = pfe_eth_start(priv);
++
++	netif_tx_wake_all_queues(ndev);
++
++	return rc;
++
++err1:
++	hif_lib_client_unregister(&priv->client);
++
++err0:
++	return rc;
++}
++
++/*
++ *  pfe_eth_shutdown
++ */
++int pfe_eth_shutdown(struct net_device *ndev, int wake)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++	int i, qstatus;
++	unsigned long next_poll = jiffies + 1, end = jiffies +
++				(TX_POLL_TIMEOUT_MS * HZ) / 1000;
++	int tx_pkts, prv_tx_pkts;
++
++	netif_info(priv, ifdown, ndev, "%s\n", __func__);
++
++	for (i = 0; i < emac_txq_cnt; i++)
++		hrtimer_cancel(&priv->fast_tx_timeout[i].timer);
++
++	netif_tx_stop_all_queues(ndev);
++
++	do {
++		tx_pkts = 0;
++		pfe_eth_flush_tx(priv);
++
++		for (i = 0; i < emac_txq_cnt; i++)
++			tx_pkts += hif_lib_tx_pending(&priv->client, i);
++
++		if (tx_pkts) {
++			/*Don't wait forever, break if we cross max timeout */
++			if (time_after(jiffies, end)) {
++				pr_err(
++					"(%s)Tx is not complete after %dmsec\n",
++					ndev->name, TX_POLL_TIMEOUT_MS);
++				break;
++			}
++
++			pr_info("%s : (%s) Waiting for tx packets to free. Pending tx pkts = %d.\n"
++				, __func__, ndev->name, tx_pkts);
++			if (need_resched())
++				schedule();
++		}
++
++	} while (tx_pkts);
++
++	end = jiffies + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
++
++	prv_tx_pkts = tmu_pkts_processed(priv->id);
++	/*
++	 * Wait till TMU transmits all pending packets
++	 * poll tmu_qstatus and pkts processed by TMU for every 10ms
++	 * Consider TMU is busy, If we see TMU qeueu pending or any packets
++	 * processed by TMU
++	 */
++	while (1) {
++		if (time_after(jiffies, next_poll)) {
++			tx_pkts = tmu_pkts_processed(priv->id);
++			qstatus = tmu_qstatus(priv->id) & 0x7ffff;
++
++			if (!qstatus && (tx_pkts == prv_tx_pkts))
++				break;
++			/* Don't wait forever, break if we cross max
++			 * timeout(TX_POLL_TIMEOUT_MS)
++			 */
++			if (time_after(jiffies, end)) {
++				pr_err("TMU%d is busy after %dmsec\n",
++				       priv->id, TX_POLL_TIMEOUT_MS);
++				break;
++			}
++			prv_tx_pkts = tx_pkts;
++			next_poll++;
++		}
++		if (need_resched())
++			schedule();
++	}
++	/* Wait for some more time to complete transmitting packet if any */
++	next_poll = jiffies + 1;
++	while (1) {
++		if (time_after(jiffies, next_poll))
++			break;
++		if (need_resched())
++			schedule();
++	}
++
++	pfe_eth_stop(ndev, wake);
++
++	napi_disable(&priv->lro_napi);
++	napi_disable(&priv->low_napi);
++	napi_disable(&priv->high_napi);
++
++	hif_lib_client_unregister(&priv->client);
++
++	return 0;
++}
++
++/* pfe_eth_close
++ *
++ */
++static int pfe_eth_close(struct net_device *ndev)
++{
++	pfe_eth_shutdown(ndev, 0);
++
++	return 0;
++}
++
++/* pfe_eth_suspend
++ *
++ * return value : 1 if netdevice is configured to wakeup system
++ *                0 otherwise
++ */
++int pfe_eth_suspend(struct net_device *ndev)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++	int retval = 0;
++
++	if (priv->wol) {
++		gemac_set_wol(priv->EMAC_baseaddr, priv->wol);
++		retval = 1;
++	}
++	pfe_eth_shutdown(ndev, priv->wol);
++
++	return retval;
++}
++
++/* pfe_eth_resume
++ *
++ */
++int pfe_eth_resume(struct net_device *ndev)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++
++	if (priv->wol)
++		gemac_set_wol(priv->EMAC_baseaddr, 0);
++	gemac_tx_enable(priv->EMAC_baseaddr);
++
++	return pfe_eth_open(ndev);
++}
++
++/* pfe_eth_get_queuenum
++ */
++static int pfe_eth_get_queuenum(struct pfe_eth_priv_s *priv, struct sk_buff
++					*skb)
++{
++	int queuenum = 0;
++	unsigned long flags;
++
++	/* Get the Fast Path queue number */
++	/*
++	 * Use conntrack mark (if conntrack exists), then packet mark (if any),
++	 * then fallback to default
++	 */
++#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
++	if (skb->nfct) {
++		enum ip_conntrack_info cinfo;
++		struct nf_conn *ct;
++
++		ct = nf_ct_get(skb, &cinfo);
++
++		if (ct) {
++			u32 connmark;
++
++			connmark = ct->mark;
++
++			if ((connmark & 0x80000000) && priv->id != 0)
++				connmark >>= 16;
++
++			queuenum = connmark & EMAC_QUEUENUM_MASK;
++		}
++	} else  {/* continued after #endif ... */
++#endif
++		if (skb->mark) {
++			queuenum = skb->mark & EMAC_QUEUENUM_MASK;
++		} else {
++			spin_lock_irqsave(&priv->lock, flags);
++			queuenum = priv->default_priority & EMAC_QUEUENUM_MASK;
++			spin_unlock_irqrestore(&priv->lock, flags);
++		}
++#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
++	}
++#endif
++	return queuenum;
++}
++
++/* pfe_eth_might_stop_tx
++ *
++ */
++static int pfe_eth_might_stop_tx(struct pfe_eth_priv_s *priv, int queuenum,
++				 struct netdev_queue *tx_queue,
++				 unsigned int n_desc,
++				 unsigned int n_segs)
++{
++	ktime_t kt;
++
++	if (unlikely((__hif_tx_avail(&pfe->hif) < n_desc) ||
++		     (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) ||
++	(hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs))) {
++#ifdef PFE_ETH_TX_STATS
++		if (__hif_tx_avail(&pfe->hif) < n_desc) {
++			priv->stop_queue_hif[queuenum]++;
++		} else if (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) {
++			priv->stop_queue_hif_client[queuenum]++;
++		} else if (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) <
++			n_segs) {
++			priv->stop_queue_credit[queuenum]++;
++		}
++		priv->stop_queue_total[queuenum]++;
++#endif
++		netif_tx_stop_queue(tx_queue);
++
++		kt = ktime_set(0, LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS *
++				NSEC_PER_MSEC);
++		hrtimer_start(&priv->fast_tx_timeout[queuenum].timer, kt,
++			      HRTIMER_MODE_REL);
++		return -1;
++	} else {
++		return 0;
++	}
++}
++
++#define SA_MAX_OP 2
++/* pfe_hif_send_packet
++ *
++ * At this level if TX fails we drop the packet
++ */
++static void pfe_hif_send_packet(struct sk_buff *skb, struct  pfe_eth_priv_s
++					*priv, int queuenum)
++{
++	struct skb_shared_info *sh = skb_shinfo(skb);
++	unsigned int nr_frags;
++	u32 ctrl = 0;
++
++	netif_info(priv, tx_queued, priv->ndev, "%s\n", __func__);
++
++	if (skb_is_gso(skb)) {
++		priv->stats.tx_dropped++;
++		return;
++	}
++
++	if (skb->ip_summed == CHECKSUM_PARTIAL)
++		ctrl = HIF_CTRL_TX_CHECKSUM;
++
++	nr_frags = sh->nr_frags;
++
++	if (nr_frags) {
++		skb_frag_t *f;
++		int i;
++
++		__hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
++				   skb_headlen(skb), ctrl, HIF_FIRST_BUFFER,
++				   skb);
++
++		for (i = 0; i < nr_frags - 1; i++) {
++			f = &sh->frags[i];
++			__hif_lib_xmit_pkt(&priv->client, queuenum,
++					   skb_frag_address(f),
++					   skb_frag_size(f),
++					   0x0, 0x0, skb);
++		}
++
++		f = &sh->frags[i];
++
++		__hif_lib_xmit_pkt(&priv->client, queuenum,
++				   skb_frag_address(f), skb_frag_size(f),
++				   0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
++				   skb);
++
++		netif_info(priv, tx_queued, priv->ndev,
++			   "%s: pkt sent successfully skb:%p nr_frags:%d len:%d\n",
++			   __func__, skb, nr_frags, skb->len);
++	} else {
++		__hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
++				   skb->len, ctrl, HIF_FIRST_BUFFER |
++				   HIF_LAST_BUFFER | HIF_DATA_VALID,
++				   skb);
++		netif_info(priv, tx_queued, priv->ndev,
++			   "%s: pkt sent successfully skb:%p len:%d\n",
++			   __func__, skb, skb->len);
++	}
++	hif_tx_dma_start();
++	priv->stats.tx_packets++;
++	priv->stats.tx_bytes += skb->len;
++	hif_lib_tx_credit_use(pfe, priv->id, queuenum, 1);
++}
++
++/* pfe_eth_flush_txQ
++ */
++static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
++				from_tx, int n_desc)
++{
++	struct sk_buff *skb;
++	struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
++								tx_q_num);
++	unsigned int flags;
++
++	netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
++
++	if (!from_tx)
++		__netif_tx_lock_bh(tx_queue);
++
++	/* Clean HIF and client queue */
++	while ((skb = hif_lib_tx_get_next_complete(&priv->client,
++						   tx_q_num, &flags,
++						   HIF_TX_DESC_NT))) {
++		if (flags & HIF_DATA_VALID)
++			dev_kfree_skb_any(skb);
++	}
++	if (!from_tx)
++		__netif_tx_unlock_bh(tx_queue);
++}
++
++/* pfe_eth_flush_tx
++ */
++static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
++{
++	int ii;
++
++	netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
++
++	for (ii = 0; ii < emac_txq_cnt; ii++)
++		pfe_eth_flush_txQ(priv, ii, 0, 0);
++}
++
++void pfe_tx_get_req_desc(struct sk_buff *skb, unsigned int *n_desc, unsigned int
++				*n_segs)
++{
++	struct skb_shared_info *sh = skb_shinfo(skb);
++
++	/* Scattered data */
++	if (sh->nr_frags) {
++		*n_desc = sh->nr_frags + 1;
++		*n_segs = 1;
++	/* Regular case */
++	} else {
++		*n_desc = 1;
++		*n_segs = 1;
++	}
++}
++
++/* pfe_eth_send_packet
++ */
++static int pfe_eth_send_packet(struct sk_buff *skb, struct net_device *ndev)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++	int tx_q_num = skb_get_queue_mapping(skb);
++	int n_desc, n_segs;
++	struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
++								tx_q_num);
++
++	netif_info(priv, tx_queued, ndev, "%s\n", __func__);
++
++	if ((!skb_is_gso(skb)) && (skb_headroom(skb) < (PFE_PKT_HEADER_SZ +
++			sizeof(unsigned long)))) {
++		netif_warn(priv, tx_err, priv->ndev, "%s: copying skb\n",
++			   __func__);
++
++		if (pskb_expand_head(skb, (PFE_PKT_HEADER_SZ + sizeof(unsigned
++					long)), 0, GFP_ATOMIC)) {
++			/* No need to re-transmit, no way to recover*/
++			kfree_skb(skb);
++			priv->stats.tx_dropped++;
++			return NETDEV_TX_OK;
++		}
++	}
++
++	pfe_tx_get_req_desc(skb, &n_desc, &n_segs);
++
++	hif_tx_lock(&pfe->hif);
++	if (unlikely(pfe_eth_might_stop_tx(priv, tx_q_num, tx_queue, n_desc,
++					   n_segs))) {
++#ifdef PFE_ETH_TX_STATS
++		if (priv->was_stopped[tx_q_num]) {
++			priv->clean_fail[tx_q_num]++;
++			priv->was_stopped[tx_q_num] = 0;
++		}
++#endif
++		hif_tx_unlock(&pfe->hif);
++		return NETDEV_TX_BUSY;
++	}
++
++	pfe_hif_send_packet(skb, priv, tx_q_num);
++
++	hif_tx_unlock(&pfe->hif);
++
++	tx_queue->trans_start = jiffies;
++
++#ifdef PFE_ETH_TX_STATS
++	priv->was_stopped[tx_q_num] = 0;
++#endif
++
++	return NETDEV_TX_OK;
++}
++
++/* pfe_eth_select_queue
++ *
++ */
++static u16 pfe_eth_select_queue(struct net_device *ndev, struct sk_buff *skb,
++				void *accel_priv,
++				select_queue_fallback_t fallback)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++
++	return pfe_eth_get_queuenum(priv, skb);
++}
++
++/* pfe_eth_get_stats
++ */
++static struct net_device_stats *pfe_eth_get_stats(struct net_device *ndev)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++
++	netif_info(priv, drv, ndev, "%s\n", __func__);
++
++	return &priv->stats;
++}
++
++/* pfe_eth_set_mac_address
++ */
++static int pfe_eth_set_mac_address(struct net_device *ndev, void *addr)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++	struct sockaddr *sa = addr;
++
++	netif_info(priv, drv, ndev, "%s\n", __func__);
++
++	if (!is_valid_ether_addr(sa->sa_data))
++		return -EADDRNOTAVAIL;
++
++	memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
++
++	gemac_set_laddrN(priv->EMAC_baseaddr,
++			 (struct pfe_mac_addr *)ndev->dev_addr, 1);
++
++	return 0;
++}
++
++/* pfe_eth_enet_addr_byte_mac
++ */
++int pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
++			       struct pfe_mac_addr *enet_addr)
++{
++	if (!enet_byte_addr || !enet_addr) {
++		return -1;
++
++	} else {
++		enet_addr->bottom = enet_byte_addr[0] |
++			(enet_byte_addr[1] << 8) |
++			(enet_byte_addr[2] << 16) |
++			(enet_byte_addr[3] << 24);
++		enet_addr->top = enet_byte_addr[4] |
++			(enet_byte_addr[5] << 8);
++		return 0;
++	}
++}
++
++/* pfe_eth_set_multi
++ */
++static void pfe_eth_set_multi(struct net_device *ndev)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++	struct pfe_mac_addr    hash_addr; /* hash register structure */
++	/* specific mac address	register structure */
++	struct pfe_mac_addr    spec_addr;
++	int		result; /* index into hash register to set.. */
++	int		uc_count = 0;
++	struct netdev_hw_addr *ha;
++
++	if (ndev->flags & IFF_PROMISC) {
++		netif_info(priv, drv, ndev, "entering promiscuous mode\n");
++
++		priv->promisc = 1;
++		gemac_enable_copy_all(priv->EMAC_baseaddr);
++	} else {
++		priv->promisc = 0;
++		gemac_disable_copy_all(priv->EMAC_baseaddr);
++	}
++
++	/* Enable broadcast frame reception if required. */
++	if (ndev->flags & IFF_BROADCAST) {
++		gemac_allow_broadcast(priv->EMAC_baseaddr);
++	} else {
++		netif_info(priv, drv, ndev,
++			   "disabling broadcast frame reception\n");
++
++		gemac_no_broadcast(priv->EMAC_baseaddr);
++	}
++
++	if (ndev->flags & IFF_ALLMULTI) {
++		/* Set the hash to rx all multicast frames */
++		hash_addr.bottom = 0xFFFFFFFF;
++		hash_addr.top = 0xFFFFFFFF;
++		gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
++		netdev_for_each_uc_addr(ha, ndev) {
++			if (uc_count >= MAX_UC_SPEC_ADDR_REG)
++				break;
++			pfe_eth_enet_addr_byte_mac(ha->addr, &spec_addr);
++			gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr,
++					 uc_count + 2);
++			uc_count++;
++		}
++	} else if ((netdev_mc_count(ndev) > 0)  || (netdev_uc_count(ndev))) {
++		u8 *addr;
++
++		hash_addr.bottom = 0;
++		hash_addr.top = 0;
++
++		netdev_for_each_mc_addr(ha, ndev) {
++			addr = ha->addr;
++
++			netif_info(priv, drv, ndev,
++				   "adding multicast address %X:%X:%X:%X:%X:%X to gem filter\n",
++				addr[0], addr[1], addr[2],
++				addr[3], addr[4], addr[5]);
++
++			result = pfe_eth_get_hash(addr);
++
++			if (result < EMAC_HASH_REG_BITS) {
++				if (result < 32)
++					hash_addr.bottom |= (1 << result);
++				else
++					hash_addr.top |= (1 << (result - 32));
++			} else {
++				break;
++			}
++		}
++
++		uc_count = -1;
++		netdev_for_each_uc_addr(ha, ndev) {
++			addr = ha->addr;
++
++			if (++uc_count < MAX_UC_SPEC_ADDR_REG)   {
++				netdev_info(ndev,
++					    "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem filter\n",
++					    addr[0], addr[1], addr[2],
++					    addr[3], addr[4], addr[5]);
++				pfe_eth_enet_addr_byte_mac(addr, &spec_addr);
++				gemac_set_laddrN(priv->EMAC_baseaddr,
++						 &spec_addr, uc_count + 2);
++			} else {
++				netif_info(priv, drv, ndev,
++					   "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem hash\n",
++					   addr[0], addr[1], addr[2],
++					   addr[3], addr[4], addr[5]);
++
++				result = pfe_eth_get_hash(addr);
++				if (result >= EMAC_HASH_REG_BITS) {
++					break;
++
++				} else {
++					if (result < 32)
++						hash_addr.bottom |= (1 <<
++								result);
++					else
++						hash_addr.top |= (1 <<
++								(result - 32));
++				}
++			}
++		}
++
++		gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
++	}
++
++	if (!(netdev_uc_count(ndev) >= MAX_UC_SPEC_ADDR_REG)) {
++		/*
++		 *  Check if there are any specific address HW registers that
++		 * need to be flushed
++		 */
++		for (uc_count = netdev_uc_count(ndev); uc_count <
++			MAX_UC_SPEC_ADDR_REG; uc_count++)
++			gemac_clear_laddrN(priv->EMAC_baseaddr, uc_count + 2);
++	}
++
++	if (ndev->flags & IFF_LOOPBACK)
++		gemac_set_loop(priv->EMAC_baseaddr, LB_LOCAL);
++}
++
++/* pfe_eth_set_features
++ */
++static int pfe_eth_set_features(struct net_device *ndev, netdev_features_t
++					features)
++{
++	struct pfe_eth_priv_s *priv = netdev_priv(ndev);
++	int rc = 0;
++
++	if (features & NETIF_F_RXCSUM)
++		gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
++	else
++		gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
++	return rc;
++}
++
++/* pfe_eth_fast_tx_timeout
++ */
++static enum hrtimer_restart pfe_eth_fast_tx_timeout(struct hrtimer *timer)
++{
++	struct pfe_eth_fast_timer *fast_tx_timeout = container_of(timer, struct
++							pfe_eth_fast_timer,
++							timer);
++	struct pfe_eth_priv_s *priv =  container_of(fast_tx_timeout->base,
++							struct pfe_eth_priv_s,
++							fast_tx_timeout);
++	struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
++						fast_tx_timeout->queuenum);
++
++	if (netif_tx_queue_stopped(tx_queue)) {
++#ifdef PFE_ETH_TX_STATS
++		priv->was_stopped[fast_tx_timeout->queuenum] = 1;
++#endif
++		netif_tx_wake_queue(tx_queue);
++	}
++
++	return HRTIMER_NORESTART;
++}
++
++/* pfe_eth_fast_tx_timeout_init
++ */
++static void pfe_eth_fast_tx_timeout_init(struct pfe_eth_priv_s *priv)
++{
++	int i;
++
++	for (i = 0; i < emac_txq_cnt; i++) {
++		priv->fast_tx_timeout[i].queuenum = i;
++		hrtimer_init(&priv->fast_tx_timeout[i].timer, CLOCK_MONOTONIC,
++			     HRTIMER_MODE_REL);
++		priv->fast_tx_timeout[i].timer.function =
++				pfe_eth_fast_tx_timeout;
++		priv->fast_tx_timeout[i].base = priv->fast_tx_timeout;
++	}
++}
++
++static struct sk_buff *pfe_eth_rx_skb(struct net_device *ndev,
++				      struct	pfe_eth_priv_s *priv,
++				      unsigned int qno)
++{
++	void *buf_addr;
++	unsigned int rx_ctrl;
++	unsigned int desc_ctrl = 0;
++	struct hif_ipsec_hdr *ipsec_hdr = NULL;
++	struct sk_buff *skb;
++	struct sk_buff *skb_frag, *skb_frag_last = NULL;
++	int length = 0, offset;
++
++	skb = priv->skb_inflight[qno];
++
++	if (skb) {
++		skb_frag_last = skb_shinfo(skb)->frag_list;
++		if (skb_frag_last) {
++			while (skb_frag_last->next)
++				skb_frag_last = skb_frag_last->next;
++		}
++	}
++
++	while (!(desc_ctrl & CL_DESC_LAST)) {
++		buf_addr = hif_lib_receive_pkt(&priv->client, qno, &length,
++					       &offset, &rx_ctrl, &desc_ctrl,
++					       (void **)&ipsec_hdr);
++		if (!buf_addr)
++			goto incomplete;
++
++#ifdef PFE_ETH_NAPI_STATS
++		priv->napi_counters[NAPI_DESC_COUNT]++;
++#endif
++
++		/* First frag */
++		if (desc_ctrl & CL_DESC_FIRST) {
++			skb = build_skb(buf_addr, 0);
++			if (unlikely(!skb))
++				goto pkt_drop;
++
++			skb_reserve(skb, offset);
++			skb_put(skb, length);
++			skb->dev = ndev;
++
++			if ((ndev->features & NETIF_F_RXCSUM) && (rx_ctrl &
++					HIF_CTRL_RX_CHECKSUMMED))
++				skb->ip_summed = CHECKSUM_UNNECESSARY;
++			else
++				skb_checksum_none_assert(skb);
++
++		} else {
++			/* Next frags */
++			if (unlikely(!skb)) {
++				pr_err("%s: NULL skb_inflight\n",
++				       __func__);
++				goto pkt_drop;
++			}
++
++			skb_frag = build_skb(buf_addr, 0);
++
++			if (unlikely(!skb_frag)) {
++				kfree(buf_addr);
++				goto pkt_drop;
++			}
++
++			skb_reserve(skb_frag, offset);
++			skb_put(skb_frag, length);
++
++			skb_frag->dev = ndev;
++
++			if (skb_shinfo(skb)->frag_list)
++				skb_frag_last->next = skb_frag;
++			else
++				skb_shinfo(skb)->frag_list = skb_frag;
++
++			skb->truesize += skb_frag->truesize;
++			skb->data_len += length;
++			skb->len += length;
++			skb_frag_last = skb_frag;
++		}
++	}
++
++	priv->skb_inflight[qno] = NULL;
++	return skb;
++
++incomplete:
++	priv->skb_inflight[qno] = skb;
++	return NULL;
++
++pkt_drop:
++	priv->skb_inflight[qno] = NULL;
++
++	if (skb)
++		kfree_skb(skb);
++	else
++		kfree(buf_addr);
++
++	priv->stats.rx_errors++;
++
++	return NULL;
++}
++
++/* pfe_eth_poll
++ */
++static int pfe_eth_poll(struct pfe_eth_priv_s *priv, struct napi_struct *napi,
++			unsigned int qno, int budget)
++{
++	struct net_device *ndev = priv->ndev;
++	struct sk_buff *skb;
++	int work_done = 0;
++	unsigned int len;
++
++	netif_info(priv, intr, priv->ndev, "%s\n", __func__);
++
++#ifdef PFE_ETH_NAPI_STATS
++	priv->napi_counters[NAPI_POLL_COUNT]++;
++#endif
++
++	do {
++		skb = pfe_eth_rx_skb(ndev, priv, qno);
++
++		if (!skb)
++			break;
++
++		len = skb->len;
++
++		/* Packet will be processed */
++		skb->protocol = eth_type_trans(skb, ndev);
++
++		netif_receive_skb(skb);
++
++		priv->stats.rx_packets++;
++		priv->stats.rx_bytes += len;
++
++		work_done++;
++
++#ifdef PFE_ETH_NAPI_STATS
++		priv->napi_counters[NAPI_PACKET_COUNT]++;
++#endif
++
++	} while (work_done < budget);
++
++	/*
++	 * If no Rx receive nor cleanup work was done, exit polling mode.
++	 * No more netif_running(dev) check is required here , as this is
++	 * checked in net/core/dev.c (2.6.33.5 kernel specific).
++	 */
++	if (work_done < budget) {
++		napi_complete(napi);
++
++		hif_lib_event_handler_start(&priv->client, EVENT_RX_PKT_IND,
++					    qno);
++	}
++#ifdef PFE_ETH_NAPI_STATS
++	else
++		priv->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
++#endif
++
++	return work_done;
++}
++
++/*
++ * pfe_eth_lro_poll
++ */
++static int pfe_eth_lro_poll(struct napi_struct *napi, int budget)
++{
++	struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
++							lro_napi);
++
++	netif_info(priv, intr, priv->ndev, "%s\n", __func__);
++
++	return pfe_eth_poll(priv, napi, 2, budget);
++}
++
++/* pfe_eth_low_poll
++ */
++static int pfe_eth_low_poll(struct napi_struct *napi, int budget)
++{
++	struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
++							low_napi);
++
++	netif_info(priv, intr, priv->ndev, "%s\n", __func__);
++
++	return pfe_eth_poll(priv, napi, 1, budget);
++}
++
++/* pfe_eth_high_poll
++ */
++static int pfe_eth_high_poll(struct napi_struct *napi, int budget)
++{
++	struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
++							high_napi);
++
++	netif_info(priv, intr, priv->ndev, "%s\n", __func__);
++
++	return pfe_eth_poll(priv, napi, 0, budget);
++}
++
++static const struct net_device_ops pfe_netdev_ops = {
++	.ndo_open = pfe_eth_open,
++	.ndo_stop = pfe_eth_close,
++	.ndo_start_xmit = pfe_eth_send_packet,
++	.ndo_select_queue = pfe_eth_select_queue,
++	.ndo_get_stats = pfe_eth_get_stats,
++	.ndo_set_mac_address = pfe_eth_set_mac_address,
++	.ndo_set_rx_mode = pfe_eth_set_multi,
++	.ndo_set_features = pfe_eth_set_features,
++	.ndo_validate_addr = eth_validate_addr,
++};
++
++/* pfe_eth_init_one
++ */
++static int pfe_eth_init_one(struct pfe *pfe, int id)
++{
++	struct net_device *ndev = NULL;
++	struct pfe_eth_priv_s *priv = NULL;
++	struct ls1012a_eth_platform_data *einfo;
++	struct ls1012a_mdio_platform_data *minfo;
++	struct ls1012a_pfe_platform_data *pfe_info;
++	int err;
++
++	/* Extract pltform data */
++	pfe_info = (struct ls1012a_pfe_platform_data *)
++					pfe->dev->platform_data;
++	if (!pfe_info) {
++		pr_err(
++			"%s: pfe missing additional platform data\n"
++			, __func__);
++		err = -ENODEV;
++		goto err0;
++	}
++
++	einfo = (struct ls1012a_eth_platform_data *)
++				pfe_info->ls1012a_eth_pdata;
++
++	/* einfo never be NULL, but no harm in having this check */
++	if (!einfo) {
++		pr_err(
++			"%s: pfe missing additional gemacs platform data\n"
++			, __func__);
++		err = -ENODEV;
++		goto err0;
++	}
++
++	minfo = (struct ls1012a_mdio_platform_data *)
++				pfe_info->ls1012a_mdio_pdata;
++
++	/* einfo never be NULL, but no harm in having this check */
++	if (!minfo) {
++		pr_err(
++			"%s: pfe missing additional mdios platform data\n",
++			 __func__);
++		err = -ENODEV;
++		goto err0;
++	}
++
++	/* Create an ethernet device instance */
++	ndev = alloc_etherdev_mq(sizeof(*priv), emac_txq_cnt);
++
++	if (!ndev) {
++		pr_err("%s: gemac %d device allocation failed\n",
++		       __func__, einfo[id].gem_id);
++		err = -ENOMEM;
++		goto err0;
++	}
++
++	priv = netdev_priv(ndev);
++	priv->ndev = ndev;
++	priv->id = einfo[id].gem_id;
++	priv->pfe = pfe;
++
++	SET_NETDEV_DEV(priv->ndev, priv->pfe->dev);
++
++	pfe->eth.eth_priv[id] = priv;
++
++	/* Set the info in the priv to the current info */
++	priv->einfo = &einfo[id];
++	priv->EMAC_baseaddr = cbus_emac_base[id];
++	priv->PHY_baseaddr = cbus_emac_base[0];
++	priv->GPI_baseaddr = cbus_gpi_base[id];
++
++#define HIF_GEMAC_TMUQ_BASE	6
++	priv->low_tmu_q	=  HIF_GEMAC_TMUQ_BASE + (id * 2);
++	priv->high_tmu_q	=  priv->low_tmu_q + 1;
++
++	spin_lock_init(&priv->lock);
++
++	pfe_eth_fast_tx_timeout_init(priv);
++
++	/* Copy the station address into the dev structure, */
++	memcpy(ndev->dev_addr, einfo[id].mac_addr, ETH_ALEN);
++
++	/* Initialize mdio */
++	if (minfo[id].enabled) {
++		err = pfe_eth_mdio_init(priv, &minfo[id]);
++		if (err) {
++			netdev_err(ndev, "%s: pfe_eth_mdio_init() failed\n",
++				   __func__);
++			goto err2;
++		}
++	}
++
++	ndev->mtu = 1500;
++
++	/* Set MTU limits */
++	ndev->min_mtu = ETH_MIN_MTU;
++	ndev->max_mtu = JUMBO_FRAME_SIZE;
++
++	/* supported features */
++	ndev->hw_features = NETIF_F_SG;
++
++	/*Enable after checksum offload is validated */
++	ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
++		NETIF_F_IPV6_CSUM | NETIF_F_SG;
++
++	/* enabled by default */
++	ndev->features = ndev->hw_features;
++
++	priv->usr_features = ndev->features;
++
++	ndev->netdev_ops = &pfe_netdev_ops;
++
++	ndev->ethtool_ops = &pfe_ethtool_ops;
++
++	/* Enable basic messages by default */
++	priv->msg_enable = NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK |
++				NETIF_MSG_PROBE;
++
++	netif_napi_add(ndev, &priv->low_napi, pfe_eth_low_poll,
++		       HIF_RX_POLL_WEIGHT - 16);
++	netif_napi_add(ndev, &priv->high_napi, pfe_eth_high_poll,
++		       HIF_RX_POLL_WEIGHT - 16);
++	netif_napi_add(ndev, &priv->lro_napi, pfe_eth_lro_poll,
++		       HIF_RX_POLL_WEIGHT - 16);
++
++	err = register_netdev(ndev);
++
++	if (err) {
++		netdev_err(ndev, "register_netdev() failed\n");
++		goto err3;
++	}
++	device_init_wakeup(&ndev->dev, WAKE_MAGIC);
++
++	if (!(priv->einfo->phy_flags & GEMAC_NO_PHY)) {
++		err = pfe_phy_init(ndev);
++		if (err) {
++			netdev_err(ndev, "%s: pfe_phy_init() failed\n",
++				   __func__);
++			goto err4;
++		}
++	}
++
++	netif_carrier_on(ndev);
++
++	/* Create all the sysfs files */
++	if (pfe_eth_sysfs_init(ndev))
++		goto err4;
++
++	netif_info(priv, probe, ndev, "%s: created interface, baseaddr: %p\n",
++		   __func__, priv->EMAC_baseaddr);
++
++	return 0;
++err4:
++	unregister_netdev(ndev);
++err3:
++	pfe_eth_mdio_exit(priv->mii_bus);
++err2:
++	free_netdev(priv->ndev);
++err0:
++	return err;
++}
++
++/* pfe_eth_init
++ */
++int pfe_eth_init(struct pfe *pfe)
++{
++	int ii = 0;
++	int err;
++
++	pr_info("%s\n", __func__);
++
++	cbus_emac_base[0] = EMAC1_BASE_ADDR;
++	cbus_emac_base[1] = EMAC2_BASE_ADDR;
++
++	cbus_gpi_base[0] = EGPI1_BASE_ADDR;
++	cbus_gpi_base[1] = EGPI2_BASE_ADDR;
++
++	for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
++		err = pfe_eth_init_one(pfe, ii);
++		if (err)
++			goto err0;
++	}
++
++	return 0;
++
++err0:
++	while (ii--)
++		pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
++
++	/* Register three network devices in the kernel */
++	return err;
++}
++
++/* pfe_eth_exit_one
++ */
++static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv)
++{
++	netif_info(priv, probe, priv->ndev, "%s\n", __func__);
++
++	pfe_eth_sysfs_exit(priv->ndev);
++
++	unregister_netdev(priv->ndev);
++
++	if (!(priv->einfo->phy_flags & GEMAC_NO_PHY))
++		pfe_phy_exit(priv->ndev);
++
++	if (priv->mii_bus)
++		pfe_eth_mdio_exit(priv->mii_bus);
++
++	free_netdev(priv->ndev);
++}
++
++/* pfe_eth_exit
++ */
++void pfe_eth_exit(struct pfe *pfe)
++{
++	int ii;
++
++	pr_info("%s\n", __func__);
++
++	for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
++		pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
++}
+diff --git a/drivers/staging/fsl_ppfe/pfe_eth.h b/drivers/staging/fsl_ppfe/pfe_eth.h
+new file mode 100644
+index 00000000..721bef3e
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_eth.h
+@@ -0,0 +1,184 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _PFE_ETH_H_
++#define _PFE_ETH_H_
++#include <linux/kernel.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/ethtool.h>
++#include <linux/mii.h>
++#include <linux/phy.h>
++#include <linux/clk.h>
++#include <linux/interrupt.h>
++#include <linux/time.h>
++
++#define PFE_ETH_NAPI_STATS
++#define PFE_ETH_TX_STATS
++
++#define PFE_ETH_FRAGS_MAX (65536 / HIF_RX_PKT_MIN_SIZE)
++#define LRO_LEN_COUNT_MAX	32
++#define LRO_NB_COUNT_MAX	32
++
++#define PFE_PAUSE_FLAG_ENABLE		1
++#define PFE_PAUSE_FLAG_AUTONEG		2
++
++/* GEMAC configured by SW */
++/* GEMAC configured by phy lines (not for MII/GMII) */
++
++#define GEMAC_SW_FULL_DUPLEX    BIT(9)
++#define GEMAC_SW_SPEED_10M      (0 << 12)
++#define GEMAC_SW_SPEED_100M     BIT(12)
++#define GEMAC_SW_SPEED_1G       (2 << 12)
++
++#define GEMAC_NO_PHY            BIT(0)
++
++struct ls1012a_eth_platform_data {
++	/* device specific information */
++	u32 device_flags;
++	char name[16];
++
++	/* board specific information */
++	u32 mii_config;
++	u32 phy_flags;
++	u32 gem_id;
++	u32 bus_id;
++	u32 phy_id;
++	u32 mdio_muxval;
++	u8 mac_addr[ETH_ALEN];
++};
++
++struct ls1012a_mdio_platform_data {
++	int enabled;
++	int irq[32];
++	u32 phy_mask;
++	int mdc_div;
++};
++
++struct ls1012a_pfe_platform_data {
++	struct ls1012a_eth_platform_data ls1012a_eth_pdata[3];
++	struct ls1012a_mdio_platform_data ls1012a_mdio_pdata[3];
++};
++
++#define NUM_GEMAC_SUPPORT	2
++#define DRV_NAME		"pfe-eth"
++#define DRV_VERSION		"1.0"
++
++#define LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS	3
++#define TX_POLL_TIMEOUT_MS	1000
++
++#define EMAC_TXQ_CNT	16
++#define EMAC_TXQ_DEPTH	(HIF_TX_DESC_NT)
++
++#define JUMBO_FRAME_SIZE	10258
++/*
++ * Client Tx queue threshold, for txQ flush condition.
++ * It must be smaller than the queue size (in case we ever change it in the
++ * future).
++ */
++#define HIF_CL_TX_FLUSH_MARK	32
++
++/*
++ * Max number of TX resources (HIF descriptors or skbs) that will be released
++ * in a single go during batch recycling.
++ * Should be lower than the flush mark so the SW can provide the HW with a
++ * continuous stream of packets instead of bursts.
++ */
++#define TX_FREE_MAX_COUNT 16
++#define EMAC_RXQ_CNT	3
++#define EMAC_RXQ_DEPTH	HIF_RX_DESC_NT
++/* make sure clients can receive a full burst of packets */
++#define EMAC_RMON_TXBYTES_POS	0x00
++#define EMAC_RMON_RXBYTES_POS	0x14
++
++#define EMAC_QUEUENUM_MASK      (emac_txq_cnt - 1)
++#define EMAC_MDIO_TIMEOUT	1000
++#define MAX_UC_SPEC_ADDR_REG 31
++
++struct pfe_eth_fast_timer {
++	int queuenum;
++	struct hrtimer timer;
++	void *base;
++};
++
++struct  pfe_eth_priv_s {
++	struct pfe		*pfe;
++	struct hif_client_s	client;
++	struct napi_struct	lro_napi;
++	struct napi_struct	low_napi;
++	struct napi_struct	high_napi;
++	int			low_tmu_q;
++	int			high_tmu_q;
++	struct net_device_stats stats;
++	struct net_device	*ndev;
++	int			id;
++	int			promisc;
++	unsigned int		msg_enable;
++	unsigned int		usr_features;
++
++	spinlock_t		lock; /* protect member variables */
++	unsigned int		event_status;
++	int			irq;
++	void			*EMAC_baseaddr;
++	/* This points to the EMAC base from where we access PHY */
++	void			*PHY_baseaddr;
++	void			*GPI_baseaddr;
++	/* PHY stuff */
++	struct phy_device	*phydev;
++	int			oldspeed;
++	int			oldduplex;
++	int			oldlink;
++	/* mdio info */
++	int			mdc_div;
++	struct mii_bus		*mii_bus;
++	struct clk		*gemtx_clk;
++	int			wol;
++	int			pause_flag;
++
++	int			default_priority;
++	struct pfe_eth_fast_timer fast_tx_timeout[EMAC_TXQ_CNT];
++
++	struct ls1012a_eth_platform_data *einfo;
++	struct sk_buff *skb_inflight[EMAC_RXQ_CNT + 6];
++
++#ifdef PFE_ETH_TX_STATS
++	unsigned int stop_queue_total[EMAC_TXQ_CNT];
++	unsigned int stop_queue_hif[EMAC_TXQ_CNT];
++	unsigned int stop_queue_hif_client[EMAC_TXQ_CNT];
++	unsigned int stop_queue_credit[EMAC_TXQ_CNT];
++	unsigned int clean_fail[EMAC_TXQ_CNT];
++	unsigned int was_stopped[EMAC_TXQ_CNT];
++#endif
++
++#ifdef PFE_ETH_NAPI_STATS
++	unsigned int napi_counters[NAPI_MAX_COUNT];
++#endif
++	unsigned int frags_inflight[EMAC_RXQ_CNT + 6];
++};
++
++struct pfe_eth {
++	struct pfe_eth_priv_s *eth_priv[3];
++};
++
++int pfe_eth_init(struct pfe *pfe);
++void pfe_eth_exit(struct pfe *pfe);
++int pfe_eth_suspend(struct net_device *dev);
++int pfe_eth_resume(struct net_device *dev);
++int pfe_eth_mdio_reset(struct mii_bus *bus);
++
++#endif /* _PFE_ETH_H_ */
+diff --git a/drivers/staging/fsl_ppfe/pfe_firmware.c b/drivers/staging/fsl_ppfe/pfe_firmware.c
+new file mode 100644
+index 00000000..47462b9f
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_firmware.c
+@@ -0,0 +1,314 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++/*
++ * @file
++ * Contains all the functions to handle parsing and loading of PE firmware
++ * files.
++ */
++#include <linux/firmware.h>
++
++#include "pfe_mod.h"
++#include "pfe_firmware.h"
++#include "pfe/pfe.h"
++
++static struct elf32_shdr *get_elf_section_header(const struct firmware *fw,
++						 const char *section)
++{
++	struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
++	struct elf32_shdr *shdr;
++	struct elf32_shdr *shdr_shstr;
++	Elf32_Off e_shoff = be32_to_cpu(elf_hdr->e_shoff);
++	Elf32_Half e_shentsize = be16_to_cpu(elf_hdr->e_shentsize);
++	Elf32_Half e_shnum = be16_to_cpu(elf_hdr->e_shnum);
++	Elf32_Half e_shstrndx = be16_to_cpu(elf_hdr->e_shstrndx);
++	Elf32_Off shstr_offset;
++	Elf32_Word sh_name;
++	const char *name;
++	int i;
++
++	/* Section header strings */
++	shdr_shstr = (struct elf32_shdr *)(fw->data + e_shoff + e_shstrndx *
++					e_shentsize);
++	shstr_offset = be32_to_cpu(shdr_shstr->sh_offset);
++
++	for (i = 0; i < e_shnum; i++) {
++		shdr = (struct elf32_shdr *)(fw->data + e_shoff
++					     + i * e_shentsize);
++
++		sh_name = be32_to_cpu(shdr->sh_name);
++
++		name = (const char *)(fw->data + shstr_offset + sh_name);
++
++		if (!strcmp(name, section))
++			return shdr;
++	}
++
++	pr_err("%s: didn't find section %s\n", __func__, section);
++
++	return NULL;
++}
++
++#if defined(CFG_DIAGS)
++static int pfe_get_diags_info(const struct firmware *fw, struct pfe_diags_info
++				*diags_info)
++{
++	struct elf32_shdr *shdr;
++	unsigned long offset, size;
++
++	shdr = get_elf_section_header(fw, ".pfe_diags_str");
++	if (shdr) {
++		offset = be32_to_cpu(shdr->sh_offset);
++		size = be32_to_cpu(shdr->sh_size);
++		diags_info->diags_str_base = be32_to_cpu(shdr->sh_addr);
++		diags_info->diags_str_size = size;
++		diags_info->diags_str_array = kmalloc(size, GFP_KERNEL);
++		memcpy(diags_info->diags_str_array, fw->data + offset, size);
++
++		return 0;
++	} else {
++		return -1;
++	}
++}
++#endif
++
++static void pfe_check_version_info(const struct firmware *fw)
++{
++	/*static char *version = NULL;*/
++	static char *version;
++
++	struct elf32_shdr *shdr = get_elf_section_header(fw, ".version");
++
++	if (shdr) {
++		if (!version) {
++			/*
++			 * this is the first fw we load, use its version
++			 * string as reference (whatever it is)
++			 */
++			version = (char *)(fw->data +
++					be32_to_cpu(shdr->sh_offset));
++
++			pr_info("PFE binary version: %s\n", version);
++		} else {
++			/*
++			 * already have loaded at least one firmware, check
++			 * sequence can start now
++			 */
++			if (strcmp(version, (char *)(fw->data +
++				be32_to_cpu(shdr->sh_offset)))) {
++				pr_info(
++				"WARNING: PFE firmware binaries from incompatible version\n");
++			}
++		}
++	} else {
++		/*
++		 * version cannot be verified, a potential issue that should
++		 * be reported
++		 */
++		pr_info(
++			 "WARNING: PFE firmware binaries from incompatible version\n");
++	}
++}
++
++/* PFE elf firmware loader.
++ * Loads an elf firmware image into a list of PE's (specified using a bitmask)
++ *
++ * @param pe_mask	Mask of PE id's to load firmware to
++ * @param fw		Pointer to the firmware image
++ *
++ * @return		0 on success, a negative value on error
++ *
++ */
++int pfe_load_elf(int pe_mask, const struct firmware *fw, struct pfe *pfe)
++{
++	struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
++	Elf32_Half sections = be16_to_cpu(elf_hdr->e_shnum);
++	struct elf32_shdr *shdr = (struct elf32_shdr *)(fw->data +
++					be32_to_cpu(elf_hdr->e_shoff));
++	int id, section;
++	int rc;
++
++	pr_info("%s\n", __func__);
++
++	/* Some sanity checks */
++	if (strncmp(&elf_hdr->e_ident[EI_MAG0], ELFMAG, SELFMAG)) {
++		pr_err("%s: incorrect elf magic number\n", __func__);
++		return -EINVAL;
++	}
++
++	if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
++		pr_err("%s: incorrect elf class(%x)\n", __func__,
++		       elf_hdr->e_ident[EI_CLASS]);
++		return -EINVAL;
++	}
++
++	if (elf_hdr->e_ident[EI_DATA] != ELFDATA2MSB) {
++		pr_err("%s: incorrect elf data(%x)\n", __func__,
++		       elf_hdr->e_ident[EI_DATA]);
++		return -EINVAL;
++	}
++
++	if (be16_to_cpu(elf_hdr->e_type) != ET_EXEC) {
++		pr_err("%s: incorrect elf file type(%x)\n", __func__,
++		       be16_to_cpu(elf_hdr->e_type));
++		return -EINVAL;
++	}
++
++	for (section = 0; section < sections; section++, shdr++) {
++		if (!(be32_to_cpu(shdr->sh_flags) & (SHF_WRITE | SHF_ALLOC |
++			SHF_EXECINSTR)))
++			continue;
++
++		for (id = 0; id < MAX_PE; id++)
++			if (pe_mask & (1 << id)) {
++				rc = pe_load_elf_section(id, fw->data, shdr,
++							 pfe->dev);
++				if (rc < 0)
++					goto err;
++			}
++	}
++
++	pfe_check_version_info(fw);
++
++	return 0;
++
++err:
++	return rc;
++}
++
++/* PFE firmware initialization.
++ * Loads different firmware files from filesystem.
++ * Initializes PE IMEM/DMEM and UTIL-PE DDR
++ * Initializes control path symbol addresses (by looking them up in the elf
++ * firmware files
++ * Takes PE's out of reset
++ *
++ * @return	0 on success, a negative value on error
++ *
++ */
++int pfe_firmware_init(struct pfe *pfe)
++{
++	const struct firmware *class_fw, *tmu_fw;
++	int rc = 0;
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	const char *util_fw_name;
++	const struct firmware *util_fw;
++#endif
++
++	pr_info("%s\n", __func__);
++
++	if (request_firmware(&class_fw, CLASS_FIRMWARE_FILENAME, pfe->dev)) {
++		pr_err("%s: request firmware %s failed\n", __func__,
++		       CLASS_FIRMWARE_FILENAME);
++		rc = -ETIMEDOUT;
++		goto err0;
++	}
++
++	if (request_firmware(&tmu_fw, TMU_FIRMWARE_FILENAME, pfe->dev)) {
++		pr_err("%s: request firmware %s failed\n", __func__,
++		       TMU_FIRMWARE_FILENAME);
++		rc = -ETIMEDOUT;
++		goto err1;
++}
++
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	util_fw_name = UTIL_FIRMWARE_FILENAME;
++
++	if (request_firmware(&util_fw, util_fw_name, pfe->dev)) {
++		pr_err("%s: request firmware %s failed\n", __func__,
++		       util_fw_name);
++		rc = -ETIMEDOUT;
++		goto err2;
++	}
++#endif
++	rc = pfe_load_elf(CLASS_MASK, class_fw, pfe);
++	if (rc < 0) {
++		pr_err("%s: class firmware load failed\n", __func__);
++		goto err3;
++	}
++
++#if defined(CFG_DIAGS)
++	rc = pfe_get_diags_info(class_fw, &pfe->diags.class_diags_info);
++	if (rc < 0) {
++		pr_warn(
++			"PFE diags won't be available for class PEs\n");
++		rc = 0;
++	}
++#endif
++
++	rc = pfe_load_elf(TMU_MASK, tmu_fw, pfe);
++	if (rc < 0) {
++		pr_err("%s: tmu firmware load failed\n", __func__);
++		goto err3;
++	}
++
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	rc = pfe_load_elf(UTIL_MASK, util_fw, pfe);
++	if (rc < 0) {
++		pr_err("%s: util firmware load failed\n", __func__);
++		goto err3;
++	}
++
++#if defined(CFG_DIAGS)
++	rc = pfe_get_diags_info(util_fw, &pfe->diags.util_diags_info);
++	if (rc < 0) {
++		pr_warn(
++			"PFE diags won't be available for util PE\n");
++		rc = 0;
++	}
++#endif
++
++	util_enable();
++#endif
++
++	tmu_enable(0xf);
++	class_enable();
++
++err3:
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	release_firmware(util_fw);
++
++err2:
++#endif
++	release_firmware(tmu_fw);
++
++err1:
++	release_firmware(class_fw);
++
++err0:
++	return rc;
++}
++
++/* PFE firmware cleanup
++ * Puts PE's in reset
++ *
++ *
++ */
++void pfe_firmware_exit(struct pfe *pfe)
++{
++	pr_info("%s\n", __func__);
++
++	if (pe_reset_all(&pfe->ctrl) != 0)
++		pr_err("Error: Failed to stop PEs, PFE reload may not work correctly\n");
++
++	class_disable();
++	tmu_disable(0xf);
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	util_disable();
++#endif
++}
+diff --git a/drivers/staging/fsl_ppfe/pfe_firmware.h b/drivers/staging/fsl_ppfe/pfe_firmware.h
+new file mode 100644
+index 00000000..5ade848b
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_firmware.h
+@@ -0,0 +1,32 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _PFE_FIRMWARE_H_
++#define _PFE_FIRMWARE_H_
++
++#define CLASS_FIRMWARE_FILENAME		"ppfe_class_ls1012a.elf"
++#define TMU_FIRMWARE_FILENAME		"ppfe_tmu_ls1012a.elf"
++
++#define PFE_FW_CHECK_PASS		0
++#define PFE_FW_CHECK_FAIL		1
++#define NUM_PFE_FW				3
++
++int pfe_firmware_init(struct pfe *pfe);
++void pfe_firmware_exit(struct pfe *pfe);
++
++#endif /* _PFE_FIRMWARE_H_ */
+diff --git a/drivers/staging/fsl_ppfe/pfe_hal.c b/drivers/staging/fsl_ppfe/pfe_hal.c
+new file mode 100644
+index 00000000..0915034b
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_hal.c
+@@ -0,0 +1,1516 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include "pfe_mod.h"
++#include "pfe/pfe.h"
++
++void *cbus_base_addr;
++void *ddr_base_addr;
++unsigned long ddr_phys_base_addr;
++unsigned int ddr_size;
++
++static struct pe_info pe[MAX_PE];
++
++/* Initializes the PFE library.
++ * Must be called before using any of the library functions.
++ *
++ * @param[in] cbus_base		CBUS virtual base address (as mapped in
++ * the host CPU address space)
++ * @param[in] ddr_base		PFE DDR range virtual base address (as
++ * mapped in the host CPU address space)
++ * @param[in] ddr_phys_base	PFE DDR range physical base address (as
++ * mapped in platform)
++ * @param[in] size		PFE DDR range size (as defined by the host
++ * software)
++ */
++void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
++		  unsigned int size)
++{
++	cbus_base_addr = cbus_base;
++	ddr_base_addr = ddr_base;
++	ddr_phys_base_addr = ddr_phys_base;
++	ddr_size = size;
++
++	pe[CLASS0_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(0);
++	pe[CLASS0_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(0);
++	pe[CLASS0_ID].pmem_size = CLASS_IMEM_SIZE;
++	pe[CLASS0_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
++	pe[CLASS0_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
++	pe[CLASS0_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
++
++	pe[CLASS1_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(1);
++	pe[CLASS1_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(1);
++	pe[CLASS1_ID].pmem_size = CLASS_IMEM_SIZE;
++	pe[CLASS1_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
++	pe[CLASS1_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
++	pe[CLASS1_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
++
++	pe[CLASS2_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(2);
++	pe[CLASS2_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(2);
++	pe[CLASS2_ID].pmem_size = CLASS_IMEM_SIZE;
++	pe[CLASS2_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
++	pe[CLASS2_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
++	pe[CLASS2_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
++
++	pe[CLASS3_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(3);
++	pe[CLASS3_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(3);
++	pe[CLASS3_ID].pmem_size = CLASS_IMEM_SIZE;
++	pe[CLASS3_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
++	pe[CLASS3_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
++	pe[CLASS3_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
++
++	pe[CLASS4_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(4);
++	pe[CLASS4_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(4);
++	pe[CLASS4_ID].pmem_size = CLASS_IMEM_SIZE;
++	pe[CLASS4_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
++	pe[CLASS4_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
++	pe[CLASS4_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
++
++	pe[CLASS5_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(5);
++	pe[CLASS5_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(5);
++	pe[CLASS5_ID].pmem_size = CLASS_IMEM_SIZE;
++	pe[CLASS5_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
++	pe[CLASS5_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
++	pe[CLASS5_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
++
++	pe[TMU0_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(0);
++	pe[TMU0_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(0);
++	pe[TMU0_ID].pmem_size = TMU_IMEM_SIZE;
++	pe[TMU0_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
++	pe[TMU0_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
++	pe[TMU0_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
++
++	pe[TMU1_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(1);
++	pe[TMU1_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(1);
++	pe[TMU1_ID].pmem_size = TMU_IMEM_SIZE;
++	pe[TMU1_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
++	pe[TMU1_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
++	pe[TMU1_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
++
++	pe[TMU3_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(3);
++	pe[TMU3_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(3);
++	pe[TMU3_ID].pmem_size = TMU_IMEM_SIZE;
++	pe[TMU3_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
++	pe[TMU3_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
++	pe[TMU3_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
++
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	pe[UTIL_ID].dmem_base_addr = UTIL_DMEM_BASE_ADDR;
++	pe[UTIL_ID].mem_access_wdata = UTIL_MEM_ACCESS_WDATA;
++	pe[UTIL_ID].mem_access_addr = UTIL_MEM_ACCESS_ADDR;
++	pe[UTIL_ID].mem_access_rdata = UTIL_MEM_ACCESS_RDATA;
++#endif
++}
++
++/* Writes a buffer to PE internal memory from the host
++ * through indirect access registers.
++ *
++ * @param[in] id		PE identification (CLASS0_ID, ..., TMU0_ID,
++ * ..., UTIL_ID)
++ * @param[in] src		Buffer source address
++ * @param[in] mem_access_addr	DMEM destination address (must be 32bit
++ * aligned)
++ * @param[in] len		Number of bytes to copy
++ */
++void pe_mem_memcpy_to32(int id, u32 mem_access_addr, const void *src, unsigned
++int len)
++{
++	u32 offset = 0, val, addr;
++	unsigned int len32 = len >> 2;
++	int i;
++
++	addr = mem_access_addr | PE_MEM_ACCESS_WRITE |
++		PE_MEM_ACCESS_BYTE_ENABLE(0, 4);
++
++	for (i = 0; i < len32; i++, offset += 4, src += 4) {
++		val = *(u32 *)src;
++		writel(cpu_to_be32(val), pe[id].mem_access_wdata);
++		writel(addr + offset, pe[id].mem_access_addr);
++	}
++
++	len = (len & 0x3);
++	if (len) {
++		val = 0;
++
++		addr = (mem_access_addr | PE_MEM_ACCESS_WRITE |
++			PE_MEM_ACCESS_BYTE_ENABLE(0, len)) + offset;
++
++		for (i = 0; i < len; i++, src++)
++			val |= (*(u8 *)src) << (8 * i);
++
++		writel(cpu_to_be32(val), pe[id].mem_access_wdata);
++		writel(addr, pe[id].mem_access_addr);
++	}
++}
++
++/* Writes a buffer to PE internal data memory (DMEM) from the host
++ * through indirect access registers.
++ * @param[in] id		PE identification (CLASS0_ID, ..., TMU0_ID,
++ * ..., UTIL_ID)
++ * @param[in] src		Buffer source address
++ * @param[in] dst		DMEM destination address (must be 32bit
++ * aligned)
++ * @param[in] len		Number of bytes to copy
++ */
++void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
++{
++	pe_mem_memcpy_to32(id, pe[id].dmem_base_addr | dst |
++				PE_MEM_ACCESS_DMEM, src, len);
++}
++
++/* Writes a buffer to PE internal program memory (PMEM) from the host
++ * through indirect access registers.
++ * @param[in] id		PE identification (CLASS0_ID, ..., TMU0_ID,
++ * ..., TMU3_ID)
++ * @param[in] src		Buffer source address
++ * @param[in] dst		PMEM destination address (must be 32bit
++ * aligned)
++ * @param[in] len		Number of bytes to copy
++ */
++void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
++{
++	pe_mem_memcpy_to32(id, pe[id].pmem_base_addr | (dst & (pe[id].pmem_size
++				- 1)) | PE_MEM_ACCESS_IMEM, src, len);
++}
++
++/* Reads PE internal program memory (IMEM) from the host
++ * through indirect access registers.
++ * @param[in] id		PE identification (CLASS0_ID, ..., TMU0_ID,
++ * ..., TMU3_ID)
++ * @param[in] addr		PMEM read address (must be aligned on size)
++ * @param[in] size		Number of bytes to read (maximum 4, must not
++ * cross 32bit boundaries)
++ * @return			the data read (in PE endianness, i.e BE).
++ */
++u32 pe_pmem_read(int id, u32 addr, u8 size)
++{
++	u32 offset = addr & 0x3;
++	u32 mask = 0xffffffff >> ((4 - size) << 3);
++	u32 val;
++
++	addr = pe[id].pmem_base_addr | ((addr & ~0x3) & (pe[id].pmem_size - 1))
++		| PE_MEM_ACCESS_IMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
++
++	writel(addr, pe[id].mem_access_addr);
++	val = be32_to_cpu(readl(pe[id].mem_access_rdata));
++
++	return (val >> (offset << 3)) & mask;
++}
++
++/* Writes PE internal data memory (DMEM) from the host
++ * through indirect access registers.
++ * @param[in] id		PE identification (CLASS0_ID, ..., TMU0_ID,
++ * ..., UTIL_ID)
++ * @param[in] addr		DMEM write address (must be aligned on size)
++ * @param[in] val		Value to write (in PE endianness, i.e BE)
++ * @param[in] size		Number of bytes to write (maximum 4, must not
++ * cross 32bit boundaries)
++ */
++void pe_dmem_write(int id, u32 val, u32 addr, u8 size)
++{
++	u32 offset = addr & 0x3;
++
++	addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_WRITE |
++		PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
++
++	/* Indirect access interface is byte swapping data being written */
++	writel(cpu_to_be32(val << (offset << 3)), pe[id].mem_access_wdata);
++	writel(addr, pe[id].mem_access_addr);
++}
++
++/* Reads PE internal data memory (DMEM) from the host
++ * through indirect access registers.
++ * @param[in] id		PE identification (CLASS0_ID, ..., TMU0_ID,
++ * ..., UTIL_ID)
++ * @param[in] addr		DMEM read address (must be aligned on size)
++ * @param[in] size		Number of bytes to read (maximum 4, must not
++ * cross 32bit boundaries)
++ * @return			the data read (in PE endianness, i.e BE).
++ */
++u32 pe_dmem_read(int id, u32 addr, u8 size)
++{
++	u32 offset = addr & 0x3;
++	u32 mask = 0xffffffff >> ((4 - size) << 3);
++	u32 val;
++
++	addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_DMEM |
++			PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
++
++	writel(addr, pe[id].mem_access_addr);
++
++	/* Indirect access interface is byte swapping data being read */
++	val = be32_to_cpu(readl(pe[id].mem_access_rdata));
++
++	return (val >> (offset << 3)) & mask;
++}
++
++/* This function is used to write to CLASS internal bus peripherals (ccu,
++ * pe-lem) from the host
++ * through indirect access registers.
++ * @param[in]	val	value to write
++ * @param[in]	addr	Address to write to (must be aligned on size)
++ * @param[in]	size	Number of bytes to write (1, 2 or 4)
++ *
++ */
++void class_bus_write(u32 val, u32 addr, u8 size)
++{
++	u32 offset = addr & 0x3;
++
++	writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
++
++	addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | PE_MEM_ACCESS_WRITE |
++			(size << 24);
++
++	writel(cpu_to_be32(val << (offset << 3)), CLASS_BUS_ACCESS_WDATA);
++	writel(addr, CLASS_BUS_ACCESS_ADDR);
++}
++
++/* Reads from CLASS internal bus peripherals (ccu, pe-lem) from the host
++ * through indirect access registers.
++ * @param[in] addr	Address to read from (must be aligned on size)
++ * @param[in] size	Number of bytes to read (1, 2 or 4)
++ * @return		the read data
++ *
++ */
++u32 class_bus_read(u32 addr, u8 size)
++{
++	u32 offset = addr & 0x3;
++	u32 mask = 0xffffffff >> ((4 - size) << 3);
++	u32 val;
++
++	writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
++
++	addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | (size << 24);
++
++	writel(addr, CLASS_BUS_ACCESS_ADDR);
++	val = be32_to_cpu(readl(CLASS_BUS_ACCESS_RDATA));
++
++	return (val >> (offset << 3)) & mask;
++}
++
++/* Writes data to the cluster memory (PE_LMEM)
++ * @param[in] dst	PE LMEM destination address (must be 32bit aligned)
++ * @param[in] src	Buffer source address
++ * @param[in] len	Number of bytes to copy
++ */
++void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len)
++{
++	u32 len32 = len >> 2;
++	int i;
++
++	for (i = 0; i < len32; i++, src += 4, dst += 4)
++		class_bus_write(*(u32 *)src, dst, 4);
++
++	if (len & 0x2) {
++		class_bus_write(*(u16 *)src, dst, 2);
++		src += 2;
++		dst += 2;
++	}
++
++	if (len & 0x1) {
++		class_bus_write(*(u8 *)src, dst, 1);
++		src++;
++		dst++;
++	}
++}
++
++/* Writes value to the cluster memory (PE_LMEM)
++ * @param[in] dst	PE LMEM destination address (must be 32bit aligned)
++ * @param[in] val	Value to write
++ * @param[in] len	Number of bytes to write
++ */
++void class_pe_lmem_memset(u32 dst, int val, unsigned int len)
++{
++	u32 len32 = len >> 2;
++	int i;
++
++	val = val | (val << 8) | (val << 16) | (val << 24);
++
++	for (i = 0; i < len32; i++, dst += 4)
++		class_bus_write(val, dst, 4);
++
++	if (len & 0x2) {
++		class_bus_write(val, dst, 2);
++		dst += 2;
++	}
++
++	if (len & 0x1) {
++		class_bus_write(val, dst, 1);
++		dst++;
++	}
++}
++
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++
++/* Writes UTIL program memory (DDR) from the host.
++ *
++ * @param[in] addr	Address to write (virtual, must be aligned on size)
++ * @param[in] val		Value to write (in PE endianness, i.e BE)
++ * @param[in] size		Number of bytes to write (2 or 4)
++ */
++static void util_pmem_write(u32 val, void *addr, u8 size)
++{
++	void *addr64 = (void *)((unsigned long)addr & ~0x7);
++	unsigned long off = 8 - ((unsigned long)addr & 0x7) - size;
++
++	/*
++	 * IMEM should  be loaded as a 64bit swapped value in a 64bit aligned
++	 * location
++	 */
++	if (size == 4)
++		writel(be32_to_cpu(val), addr64 + off);
++	else
++		writew(be16_to_cpu((u16)val), addr64 + off);
++}
++
++/* Writes a buffer to UTIL program memory (DDR) from the host.
++ *
++ * @param[in] dst	Address to write (virtual, must be at least 16bit
++ * aligned)
++ * @param[in] src	Buffer to write (in PE endianness, i.e BE, must have
++ * same alignment as dst)
++ * @param[in] len	Number of bytes to write (must be at least 16bit
++ * aligned)
++ */
++static void util_pmem_memcpy(void *dst, const void *src, unsigned int len)
++{
++	unsigned int len32;
++	int i;
++
++	if ((unsigned long)src & 0x2) {
++		util_pmem_write(*(u16 *)src, dst, 2);
++		src += 2;
++		dst += 2;
++		len -= 2;
++	}
++
++	len32 = len >> 2;
++
++	for (i = 0; i < len32; i++, dst += 4, src += 4)
++		util_pmem_write(*(u32 *)src, dst, 4);
++
++	if (len & 0x2)
++		util_pmem_write(*(u16 *)src, dst, len & 0x2);
++}
++#endif
++
++/* Loads an elf section into pmem
++ * Code needs to be at least 16bit aligned and only PROGBITS sections are
++ * supported
++ *
++ * @param[in] id	PE identification (CLASS0_ID, ..., TMU0_ID, ...,
++ * TMU3_ID)
++ * @param[in] data	pointer to the elf firmware
++ * @param[in] shdr	pointer to the elf section header
++ *
++ */
++static int pe_load_pmem_section(int id, const void *data,
++				struct elf32_shdr *shdr)
++{
++	u32 offset = be32_to_cpu(shdr->sh_offset);
++	u32 addr = be32_to_cpu(shdr->sh_addr);
++	u32 size = be32_to_cpu(shdr->sh_size);
++	u32 type = be32_to_cpu(shdr->sh_type);
++
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	if (id == UTIL_ID) {
++		pr_err("%s: unsupported pmem section for UTIL\n",
++		       __func__);
++		return -EINVAL;
++	}
++#endif
++
++	if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
++		pr_err(
++			"%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
++			, __func__, addr, (unsigned long)data + offset);
++
++		return -EINVAL;
++	}
++
++	if (addr & 0x1) {
++		pr_err("%s: load address(%x) is not 16bit aligned\n",
++		       __func__, addr);
++		return -EINVAL;
++	}
++
++	if (size & 0x1) {
++		pr_err("%s: load size(%x) is not 16bit aligned\n",
++		       __func__, size);
++		return -EINVAL;
++	}
++
++	switch (type) {
++	case SHT_PROGBITS:
++		pe_pmem_memcpy_to32(id, addr, data + offset, size);
++
++		break;
++
++	default:
++		pr_err("%s: unsupported section type(%x)\n", __func__,
++		       type);
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++/* Loads an elf section into dmem
++ * Data needs to be at least 32bit aligned, NOBITS sections are correctly
++ * initialized to 0
++ *
++ * @param[in] id		PE identification (CLASS0_ID, ..., TMU0_ID,
++ * ..., UTIL_ID)
++ * @param[in] data		pointer to the elf firmware
++ * @param[in] shdr		pointer to the elf section header
++ *
++ */
++static int pe_load_dmem_section(int id, const void *data,
++				struct elf32_shdr *shdr)
++{
++	u32 offset = be32_to_cpu(shdr->sh_offset);
++	u32 addr = be32_to_cpu(shdr->sh_addr);
++	u32 size = be32_to_cpu(shdr->sh_size);
++	u32 type = be32_to_cpu(shdr->sh_type);
++	u32 size32 = size >> 2;
++	int i;
++
++	if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
++		pr_err(
++			"%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
++			__func__, addr, (unsigned long)data + offset);
++
++		return -EINVAL;
++	}
++
++	if (addr & 0x3) {
++		pr_err("%s: load address(%x) is not 32bit aligned\n",
++		       __func__, addr);
++		return -EINVAL;
++	}
++
++	switch (type) {
++	case SHT_PROGBITS:
++		pe_dmem_memcpy_to32(id, addr, data + offset, size);
++		break;
++
++	case SHT_NOBITS:
++		for (i = 0; i < size32; i++, addr += 4)
++			pe_dmem_write(id, 0, addr, 4);
++
++		if (size & 0x3)
++			pe_dmem_write(id, 0, addr, size & 0x3);
++
++		break;
++
++	default:
++		pr_err("%s: unsupported section type(%x)\n", __func__,
++		       type);
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++/* Loads an elf section into DDR
++ * Data needs to be at least 32bit aligned, NOBITS sections are correctly
++ * initialized to 0
++ *
++ * @param[in] id		PE identification (CLASS0_ID, ..., TMU0_ID,
++ * ..., UTIL_ID)
++ * @param[in] data		pointer to the elf firmware
++ * @param[in] shdr		pointer to the elf section header
++ *
++ */
++static int pe_load_ddr_section(int id, const void *data,
++			       struct elf32_shdr *shdr,
++			       struct device *dev) {
++	u32 offset = be32_to_cpu(shdr->sh_offset);
++	u32 addr = be32_to_cpu(shdr->sh_addr);
++	u32 size = be32_to_cpu(shdr->sh_size);
++	u32 type = be32_to_cpu(shdr->sh_type);
++	u32 flags = be32_to_cpu(shdr->sh_flags);
++
++	switch (type) {
++	case SHT_PROGBITS:
++		if (flags & SHF_EXECINSTR) {
++			if (id <= CLASS_MAX_ID) {
++				/* DO the loading only once in DDR */
++				if (id == CLASS0_ID) {
++					pr_err(
++						"%s: load address(%x) and elf file address(%lx) rcvd\n",
++						__func__, addr,
++						(unsigned long)data + offset);
++					if (((unsigned long)(data + offset)
++						& 0x3) != (addr & 0x3)) {
++						pr_err(
++							"%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
++							, __func__, addr,
++						(unsigned long)data + offset);
++
++						return -EINVAL;
++					}
++
++					if (addr & 0x1) {
++						pr_err(
++							"%s: load address(%x) is not 16bit aligned\n"
++							, __func__, addr);
++						return -EINVAL;
++					}
++
++					if (size & 0x1) {
++						pr_err(
++							"%s: load length(%x) is not 16bit aligned\n"
++							, __func__, size);
++						return -EINVAL;
++					}
++					memcpy(DDR_PHYS_TO_VIRT(
++						DDR_PFE_TO_PHYS(addr)),
++						data + offset, size);
++				}
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++			} else if (id == UTIL_ID) {
++				if (((unsigned long)(data + offset) & 0x3)
++					!= (addr & 0x3)) {
++					pr_err(
++						"%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
++						, __func__, addr,
++						(unsigned long)data + offset);
++
++					return -EINVAL;
++				}
++
++				if (addr & 0x1) {
++					pr_err(
++						"%s: load address(%x) is not 16bit aligned\n"
++						, __func__, addr);
++					return -EINVAL;
++				}
++
++				if (size & 0x1) {
++					pr_err(
++						"%s: load length(%x) is not 16bit aligned\n"
++						, __func__, size);
++					return -EINVAL;
++				}
++
++				util_pmem_memcpy(DDR_PHYS_TO_VIRT(
++							DDR_PFE_TO_PHYS(addr)),
++							data + offset, size);
++			}
++#endif
++			} else {
++				pr_err(
++					"%s: unsupported ddr section type(%x) for PE(%d)\n"
++						, __func__, type, id);
++				return -EINVAL;
++			}
++
++		} else {
++			memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data
++				+ offset, size);
++		}
++
++		break;
++
++	case SHT_NOBITS:
++		memset(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), 0, size);
++
++		break;
++
++	default:
++		pr_err("%s: unsupported section type(%x)\n", __func__,
++		       type);
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++/* Loads an elf section into pe lmem
++ * Data needs to be at least 32bit aligned, NOBITS sections are correctly
++ * initialized to 0
++ *
++ * @param[in] id		PE identification (CLASS0_ID,..., CLASS5_ID)
++ * @param[in] data		pointer to the elf firmware
++ * @param[in] shdr		pointer to the elf section header
++ *
++ */
++static int pe_load_pe_lmem_section(int id, const void *data,
++				   struct elf32_shdr *shdr)
++{
++	u32 offset = be32_to_cpu(shdr->sh_offset);
++	u32 addr = be32_to_cpu(shdr->sh_addr);
++	u32 size = be32_to_cpu(shdr->sh_size);
++	u32 type = be32_to_cpu(shdr->sh_type);
++
++	if (id > CLASS_MAX_ID) {
++		pr_err(
++			"%s: unsupported pe-lmem section type(%x) for PE(%d)\n",
++			 __func__, type, id);
++		return -EINVAL;
++	}
++
++	if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
++		pr_err(
++			"%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
++			__func__, addr, (unsigned long)data + offset);
++
++		return -EINVAL;
++	}
++
++	if (addr & 0x3) {
++		pr_err("%s: load address(%x) is not 32bit aligned\n",
++		       __func__, addr);
++		return -EINVAL;
++	}
++
++	switch (type) {
++	case SHT_PROGBITS:
++		class_pe_lmem_memcpy_to32(addr, data + offset, size);
++		break;
++
++	case SHT_NOBITS:
++		class_pe_lmem_memset(addr, 0, size);
++		break;
++
++	default:
++		pr_err("%s: unsupported section type(%x)\n", __func__,
++		       type);
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++/* Loads an elf section into a PE
++ * For now only supports loading a section to dmem (all PE's), pmem (class and
++ * tmu PE's),
++ * DDDR (util PE code)
++ *
++ * @param[in] id		PE identification (CLASS0_ID, ..., TMU0_ID,
++ * ..., UTIL_ID)
++ * @param[in] data		pointer to the elf firmware
++ * @param[in] shdr		pointer to the elf section header
++ *
++ */
++int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
++			struct device *dev) {
++	u32 addr = be32_to_cpu(shdr->sh_addr);
++	u32 size = be32_to_cpu(shdr->sh_size);
++
++	if (IS_DMEM(addr, size))
++		return pe_load_dmem_section(id, data, shdr);
++	else if (IS_PMEM(addr, size))
++		return pe_load_pmem_section(id, data, shdr);
++	else if (IS_PFE_LMEM(addr, size))
++		return 0;
++	else if (IS_PHYS_DDR(addr, size))
++		return pe_load_ddr_section(id, data, shdr, dev);
++	else if (IS_PE_LMEM(addr, size))
++		return pe_load_pe_lmem_section(id, data, shdr);
++
++	pr_err("%s: unsupported memory range(%x)\n", __func__,
++	       addr);
++	return 0;
++}
++
++/**************************** BMU ***************************/
++
++/* Initializes a BMU block.
++ * @param[in] base	BMU block base address
++ * @param[in] cfg	BMU configuration
++ */
++void bmu_init(void *base, struct BMU_CFG *cfg)
++{
++	bmu_disable(base);
++
++	bmu_set_config(base, cfg);
++
++	bmu_reset(base);
++}
++
++/* Resets a BMU block.
++ * @param[in] base	BMU block base address
++ */
++void bmu_reset(void *base)
++{
++	writel(CORE_SW_RESET, base + BMU_CTRL);
++
++	/* Wait for self clear */
++	while (readl(base + BMU_CTRL) & CORE_SW_RESET)
++		;
++}
++
++/* Enabled a BMU block.
++ * @param[in] base	BMU block base address
++ */
++void bmu_enable(void *base)
++{
++	writel(CORE_ENABLE, base + BMU_CTRL);
++}
++
++/* Disables a BMU block.
++ * @param[in] base	BMU block base address
++ */
++void bmu_disable(void *base)
++{
++	writel(CORE_DISABLE, base + BMU_CTRL);
++}
++
++/* Sets the configuration of a BMU block.
++ * @param[in] base	BMU block base address
++ * @param[in] cfg	BMU configuration
++ */
++void bmu_set_config(void *base, struct BMU_CFG *cfg)
++{
++	writel(cfg->baseaddr, base + BMU_UCAST_BASE_ADDR);
++	writel(cfg->count & 0xffff, base + BMU_UCAST_CONFIG);
++	writel(cfg->size & 0xffff, base + BMU_BUF_SIZE);
++
++	/* Interrupts are never used */
++	writel(cfg->low_watermark, base + BMU_LOW_WATERMARK);
++	writel(cfg->high_watermark, base + BMU_HIGH_WATERMARK);
++	writel(0x0, base + BMU_INT_ENABLE);
++}
++
++/**************************** MTIP GEMAC ***************************/
++
++/* Enable Rx Checksum Engine. With this enabled, Frame with bad IP,
++ *   TCP or UDP checksums are discarded
++ *
++ * @param[in] base	GEMAC base address.
++ */
++void gemac_enable_rx_checksum_offload(void *base)
++{
++	/*Do not find configuration to do this */
++}
++
++/* Disable Rx Checksum Engine.
++ *
++ * @param[in] base	GEMAC base address.
++ */
++void gemac_disable_rx_checksum_offload(void *base)
++{
++	/*Do not find configuration to do this */
++}
++
++/* GEMAC set speed.
++ * @param[in] base	GEMAC base address
++ * @param[in] speed	GEMAC speed (10, 100 or 1000 Mbps)
++ */
++void gemac_set_speed(void *base, enum mac_speed gem_speed)
++{
++	u32 ecr = readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_SPEED;
++	u32 rcr = readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_RMII_10T;
++
++	switch (gem_speed) {
++	case SPEED_10M:
++			rcr |= EMAC_RCNTRL_RMII_10T;
++			break;
++
++	case SPEED_1000M:
++			ecr |= EMAC_ECNTRL_SPEED;
++			break;
++
++	case SPEED_100M:
++	default:
++			/*It is in 100M mode */
++			break;
++	}
++	writel(ecr, (base + EMAC_ECNTRL_REG));
++	writel(rcr, (base + EMAC_RCNTRL_REG));
++}
++
++/* GEMAC set duplex.
++ * @param[in] base	GEMAC base address
++ * @param[in] duplex	GEMAC duplex mode (Full, Half)
++ */
++void gemac_set_duplex(void *base, int duplex)
++{
++	if (duplex == DUPLEX_HALF) {
++		writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_FDEN, base
++			+ EMAC_TCNTRL_REG);
++		writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_DRT, (base
++			+ EMAC_RCNTRL_REG));
++	} else{
++		writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_FDEN, base
++			+ EMAC_TCNTRL_REG);
++		writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_DRT, (base
++			+ EMAC_RCNTRL_REG));
++	}
++}
++
++/* GEMAC set mode.
++ * @param[in] base	GEMAC base address
++ * @param[in] mode	GEMAC operation mode (MII, RMII, RGMII, SGMII)
++ */
++void gemac_set_mode(void *base, int mode)
++{
++	u32 val = readl(base + EMAC_RCNTRL_REG);
++
++	/*Remove loopbank*/
++	val &= ~EMAC_RCNTRL_LOOP;
++
++	/*Enable flow control and MII mode*/
++	val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE);
++
++	writel(val, base + EMAC_RCNTRL_REG);
++}
++
++/* GEMAC enable function.
++ * @param[in] base	GEMAC base address
++ */
++void gemac_enable(void *base)
++{
++	writel(readl(base + EMAC_ECNTRL_REG) | EMAC_ECNTRL_ETHER_EN, base +
++		EMAC_ECNTRL_REG);
++}
++
++/* GEMAC disable function.
++ * @param[in] base	GEMAC base address
++ */
++void gemac_disable(void *base)
++{
++	writel(readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_ETHER_EN, base +
++		EMAC_ECNTRL_REG);
++}
++
++/* GEMAC TX disable function.
++ * @param[in] base	GEMAC base address
++ */
++void gemac_tx_disable(void *base)
++{
++	writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_GTS, base +
++		EMAC_TCNTRL_REG);
++}
++
++void gemac_tx_enable(void *base)
++{
++	writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_GTS, base +
++			EMAC_TCNTRL_REG);
++}
++
++/* Sets the hash register of the MAC.
++ * This register is used for matching unicast and multicast frames.
++ *
++ * @param[in] base	GEMAC base address.
++ * @param[in] hash	64-bit hash to be configured.
++ */
++void gemac_set_hash(void *base, struct pfe_mac_addr *hash)
++{
++	writel(hash->bottom,  base + EMAC_GALR);
++	writel(hash->top, base + EMAC_GAUR);
++}
++
++void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
++		      unsigned int entry_index)
++{
++	if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
++		return;
++
++	entry_index = entry_index - 1;
++	if (entry_index < 1) {
++		writel(htonl(address->bottom),  base + EMAC_PHY_ADDR_LOW);
++		writel((htonl(address->top) | 0x8808), base +
++			EMAC_PHY_ADDR_HIGH);
++	} else {
++		writel(htonl(address->bottom),  base + ((entry_index - 1) * 8)
++			+ EMAC_SMAC_0_0);
++		writel((htonl(address->top) | 0x8808), base + ((entry_index -
++			1) * 8) + EMAC_SMAC_0_1);
++	}
++}
++
++void gemac_clear_laddrN(void *base, unsigned int entry_index)
++{
++	if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
++		return;
++
++	entry_index = entry_index - 1;
++	if (entry_index < 1) {
++		writel(0, base + EMAC_PHY_ADDR_LOW);
++		writel(0, base + EMAC_PHY_ADDR_HIGH);
++	} else {
++		writel(0,  base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0);
++		writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1);
++	}
++}
++
++/* Set the loopback mode of the MAC.  This can be either no loopback for
++ * normal operation, local loopback through MAC internal loopback module or PHY
++ *   loopback for external loopback through a PHY.  This asserts the external
++ * loop pin.
++ *
++ * @param[in] base	GEMAC base address.
++ * @param[in] gem_loop	Loopback mode to be enabled. LB_LOCAL - MAC
++ * Loopback,
++ *			LB_EXT - PHY Loopback.
++ */
++void gemac_set_loop(void *base, enum mac_loop gem_loop)
++{
++	pr_info("%s()\n", __func__);
++	writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_LOOP, (base +
++		EMAC_RCNTRL_REG));
++}
++
++/* GEMAC allow frames
++ * @param[in] base	GEMAC base address
++ */
++void gemac_enable_copy_all(void *base)
++{
++	writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_PROM, (base +
++		EMAC_RCNTRL_REG));
++}
++
++/* GEMAC do not allow frames
++ * @param[in] base	GEMAC base address
++ */
++void gemac_disable_copy_all(void *base)
++{
++	writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_PROM, (base +
++		EMAC_RCNTRL_REG));
++}
++
++/* GEMAC allow broadcast function.
++ * @param[in] base	GEMAC base address
++ */
++void gemac_allow_broadcast(void *base)
++{
++	writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_BC_REJ, base +
++		EMAC_RCNTRL_REG);
++}
++
++/* GEMAC no broadcast function.
++ * @param[in] base	GEMAC base address
++ */
++void gemac_no_broadcast(void *base)
++{
++	writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_BC_REJ, base +
++		EMAC_RCNTRL_REG);
++}
++
++/* GEMAC enable 1536 rx function.
++ * @param[in]	base	GEMAC base address
++ */
++void gemac_enable_1536_rx(void *base)
++{
++	/* Set 1536 as Maximum frame length */
++	writel(readl(base + EMAC_RCNTRL_REG) | (1536 << 16), base +
++		EMAC_RCNTRL_REG);
++}
++
++/* GEMAC enable jumbo function.
++ * @param[in]	base	GEMAC base address
++ */
++void gemac_enable_rx_jmb(void *base)
++{
++	writel(readl(base + EMAC_RCNTRL_REG) | (JUMBO_FRAME_SIZE << 16), base
++		+ EMAC_RCNTRL_REG);
++}
++
++/* GEMAC enable stacked vlan function.
++ * @param[in]	base	GEMAC base address
++ */
++void gemac_enable_stacked_vlan(void *base)
++{
++	/* MTIP doesn't support stacked vlan */
++}
++
++/* GEMAC enable pause rx function.
++ * @param[in] base	GEMAC base address
++ */
++void gemac_enable_pause_rx(void *base)
++{
++	writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_FCE,
++	       base + EMAC_RCNTRL_REG);
++}
++
++/* GEMAC disable pause rx function.
++ * @param[in] base	GEMAC base address
++ */
++void gemac_disable_pause_rx(void *base)
++{
++	writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_FCE,
++	       base + EMAC_RCNTRL_REG);
++}
++
++/* GEMAC enable pause tx function.
++ * @param[in] base GEMAC base address
++ */
++void gemac_enable_pause_tx(void *base)
++{
++	writel(EMAC_RX_SECTION_EMPTY_V, base + EMAC_RX_SECTION_EMPTY);
++}
++
++/* GEMAC disable pause tx function.
++ * @param[in] base GEMAC base address
++ */
++void gemac_disable_pause_tx(void *base)
++{
++	writel(0x0, base + EMAC_RX_SECTION_EMPTY);
++}
++
++/* GEMAC wol configuration
++ * @param[in] base	GEMAC base address
++ * @param[in] wol_conf	WoL register configuration
++ */
++void gemac_set_wol(void *base, u32 wol_conf)
++{
++	u32  val = readl(base + EMAC_ECNTRL_REG);
++
++	if (wol_conf)
++		val |= (EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
++	else
++		val &= ~(EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
++	writel(val, base + EMAC_ECNTRL_REG);
++}
++
++/* Sets Gemac bus width to 64bit
++ * @param[in] base       GEMAC base address
++ * @param[in] width     gemac bus width to be set possible values are 32/64/128
++ */
++void gemac_set_bus_width(void *base, int width)
++{
++}
++
++/* Sets Gemac configuration.
++ * @param[in] base	GEMAC base address
++ * @param[in] cfg	GEMAC configuration
++ */
++void gemac_set_config(void *base, struct gemac_cfg *cfg)
++{
++	/*GEMAC config taken from VLSI */
++	writel(0x00000004, base + EMAC_TFWR_STR_FWD);
++	writel(0x00000005, base + EMAC_RX_SECTION_FULL);
++	writel(0x00003fff, base + EMAC_TRUNC_FL);
++	writel(0x00000030, base + EMAC_TX_SECTION_EMPTY);
++	writel(0x00000000, base + EMAC_MIB_CTRL_STS_REG);
++
++	gemac_set_mode(base, cfg->mode);
++
++	gemac_set_speed(base, cfg->speed);
++
++	gemac_set_duplex(base, cfg->duplex);
++}
++
++/**************************** GPI ***************************/
++
++/* Initializes a GPI block.
++ * @param[in] base	GPI base address
++ * @param[in] cfg	GPI configuration
++ */
++void gpi_init(void *base, struct gpi_cfg *cfg)
++{
++	gpi_reset(base);
++
++	gpi_disable(base);
++
++	gpi_set_config(base, cfg);
++}
++
++/* Resets a GPI block.
++ * @param[in] base	GPI base address
++ */
++void gpi_reset(void *base)
++{
++	writel(CORE_SW_RESET, base + GPI_CTRL);
++}
++
++/* Enables a GPI block.
++ * @param[in] base	GPI base address
++ */
++void gpi_enable(void *base)
++{
++	writel(CORE_ENABLE, base + GPI_CTRL);
++}
++
++/* Disables a GPI block.
++ * @param[in] base	GPI base address
++ */
++void gpi_disable(void *base)
++{
++	writel(CORE_DISABLE, base + GPI_CTRL);
++}
++
++/* Sets the configuration of a GPI block.
++ * @param[in] base	GPI base address
++ * @param[in] cfg	GPI configuration
++ */
++void gpi_set_config(void *base, struct gpi_cfg *cfg)
++{
++	writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL),	base
++		+ GPI_LMEM_ALLOC_ADDR);
++	writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL),	base
++		+ GPI_LMEM_FREE_ADDR);
++	writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL),	base
++		+ GPI_DDR_ALLOC_ADDR);
++	writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),	base
++		+ GPI_DDR_FREE_ADDR);
++	writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
++	writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
++	writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
++	writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
++	writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
++	writel((DDR_HDR_SIZE << 16) |	LMEM_HDR_SIZE,	base + GPI_HDR_SIZE);
++	writel((DDR_BUF_SIZE << 16) |	LMEM_BUF_SIZE,	base + GPI_BUF_SIZE);
++
++	writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) |
++		GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
++	writel(cfg->tmlf_txthres, base + GPI_TMLF_TX);
++	writel(cfg->aseq_len,	base + GPI_DTX_ASEQ);
++	writel(1, base + GPI_TOE_CHKSUM_EN);
++
++	if (cfg->mtip_pause_reg) {
++		writel(cfg->mtip_pause_reg, base + GPI_CSR_MTIP_PAUSE_REG);
++		writel(EGPI_PAUSE_TIME, base + GPI_TX_PAUSE_TIME);
++	}
++}
++
++/**************************** CLASSIFIER ***************************/
++
++/* Initializes CLASSIFIER block.
++ * @param[in] cfg	CLASSIFIER configuration
++ */
++void class_init(struct class_cfg *cfg)
++{
++	class_reset();
++
++	class_disable();
++
++	class_set_config(cfg);
++}
++
++/* Resets CLASSIFIER block.
++ *
++ */
++void class_reset(void)
++{
++	writel(CORE_SW_RESET, CLASS_TX_CTRL);
++}
++
++/* Enables all CLASS-PE's cores.
++ *
++ */
++void class_enable(void)
++{
++	writel(CORE_ENABLE, CLASS_TX_CTRL);
++}
++
++/* Disables all CLASS-PE's cores.
++ *
++ */
++void class_disable(void)
++{
++	writel(CORE_DISABLE, CLASS_TX_CTRL);
++}
++
++/*
++ * Sets the configuration of the CLASSIFIER block.
++ * @param[in] cfg	CLASSIFIER configuration
++ */
++void class_set_config(struct class_cfg *cfg)
++{
++	u32 val;
++
++	/* Initialize route table */
++	if (!cfg->resume)
++		memset(DDR_PHYS_TO_VIRT(cfg->route_table_baseaddr), 0, (1 <<
++		cfg->route_table_hash_bits) * CLASS_ROUTE_SIZE);
++
++#if !defined(LS1012A_PFE_RESET_WA)
++	writel(cfg->pe_sys_clk_ratio,	CLASS_PE_SYS_CLK_RATIO);
++#endif
++
++	writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE,	CLASS_HDR_SIZE);
++	writel(LMEM_BUF_SIZE,				CLASS_LMEM_BUF_SIZE);
++	writel(CLASS_ROUTE_ENTRY_SIZE(CLASS_ROUTE_SIZE) |
++		CLASS_ROUTE_HASH_SIZE(cfg->route_table_hash_bits),
++		CLASS_ROUTE_HASH_ENTRY_SIZE);
++	writel(HIF_PKT_CLASS_EN | HIF_PKT_OFFSET(sizeof(struct hif_hdr)),
++	       CLASS_HIF_PARSE);
++
++	val = HASH_CRC_PORT_IP | QB2BUS_LE;
++
++#if defined(CONFIG_IP_ALIGNED)
++	val |= IP_ALIGNED;
++#endif
++
++	/*
++	 *  Class PE packet steering will only work if TOE mode, bridge fetch or
++	 * route fetch are enabled (see class/qb_fet.v). Route fetch would
++	 * trigger additional memory copies (likely from DDR because of hash
++	 * table size, which cannot be reduced because PE software still
++	 * relies on hash value computed in HW), so when not in TOE mode we
++	 * simply enable HW bridge fetch even though we don't use it.
++	 */
++	if (cfg->toe_mode)
++		val |= CLASS_TOE;
++	else
++		val |= HW_BRIDGE_FETCH;
++
++	writel(val, CLASS_ROUTE_MULTI);
++
++	writel(DDR_PHYS_TO_PFE(cfg->route_table_baseaddr),
++	       CLASS_ROUTE_TABLE_BASE);
++	writel(CLASS_PE0_RO_DM_ADDR0_VAL,		CLASS_PE0_RO_DM_ADDR0);
++	writel(CLASS_PE0_RO_DM_ADDR1_VAL,		CLASS_PE0_RO_DM_ADDR1);
++	writel(CLASS_PE0_QB_DM_ADDR0_VAL,		CLASS_PE0_QB_DM_ADDR0);
++	writel(CLASS_PE0_QB_DM_ADDR1_VAL,		CLASS_PE0_QB_DM_ADDR1);
++	writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR),	CLASS_TM_INQ_ADDR);
++
++	writel(23, CLASS_AFULL_THRES);
++	writel(23, CLASS_TSQ_FIFO_THRES);
++
++	writel(24, CLASS_MAX_BUF_CNT);
++	writel(24, CLASS_TSQ_MAX_CNT);
++}
++
++/**************************** TMU ***************************/
++
++void tmu_reset(void)
++{
++	writel(SW_RESET, TMU_CTRL);
++}
++
++/* Initializes TMU block.
++ * @param[in] cfg	TMU configuration
++ */
++void tmu_init(struct tmu_cfg *cfg)
++{
++	int q, phyno;
++
++	tmu_disable(0xF);
++	mdelay(10);
++
++#if !defined(LS1012A_PFE_RESET_WA)
++	/* keep in soft reset */
++	writel(SW_RESET, TMU_CTRL);
++#endif
++	writel(0x3, TMU_SYS_GENERIC_CONTROL);
++	writel(750, TMU_INQ_WATERMARK);
++	writel(CBUS_VIRT_TO_PFE(EGPI1_BASE_ADDR +
++		GPI_INQ_PKTPTR),	TMU_PHY0_INQ_ADDR);
++	writel(CBUS_VIRT_TO_PFE(EGPI2_BASE_ADDR +
++		GPI_INQ_PKTPTR),	TMU_PHY1_INQ_ADDR);
++	writel(CBUS_VIRT_TO_PFE(HGPI_BASE_ADDR +
++		GPI_INQ_PKTPTR),	TMU_PHY3_INQ_ADDR);
++	writel(CBUS_VIRT_TO_PFE(HIF_NOCPY_RX_INQ0_PKTPTR), TMU_PHY4_INQ_ADDR);
++	writel(CBUS_VIRT_TO_PFE(UTIL_INQ_PKTPTR), TMU_PHY5_INQ_ADDR);
++	writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),
++	       TMU_BMU_INQ_ADDR);
++
++	writel(0x3FF,	TMU_TDQ0_SCH_CTRL);	/*
++						 * enabling all 10
++						 * schedulers [9:0] of each TDQ
++						 */
++	writel(0x3FF,	TMU_TDQ1_SCH_CTRL);
++	writel(0x3FF,	TMU_TDQ3_SCH_CTRL);
++
++#if !defined(LS1012A_PFE_RESET_WA)
++	writel(cfg->pe_sys_clk_ratio,	TMU_PE_SYS_CLK_RATIO);
++#endif
++
++#if !defined(LS1012A_PFE_RESET_WA)
++	writel(DDR_PHYS_TO_PFE(cfg->llm_base_addr),	TMU_LLM_BASE_ADDR);
++	/* Extra packet pointers will be stored from this address onwards */
++
++	writel(cfg->llm_queue_len,	TMU_LLM_QUE_LEN);
++	writel(5,			TMU_TDQ_IIFG_CFG);
++	writel(DDR_BUF_SIZE,		TMU_BMU_BUF_SIZE);
++
++	writel(0x0,			TMU_CTRL);
++
++	/* MEM init */
++	pr_info("%s: mem init\n", __func__);
++	writel(MEM_INIT,	TMU_CTRL);
++
++	while (!(readl(TMU_CTRL) & MEM_INIT_DONE))
++		;
++
++	/* LLM init */
++	pr_info("%s: lmem init\n", __func__);
++	writel(LLM_INIT,	TMU_CTRL);
++
++	while (!(readl(TMU_CTRL) & LLM_INIT_DONE))
++		;
++#endif
++	/* set up each queue for tail drop */
++	for (phyno = 0; phyno < 4; phyno++) {
++		if (phyno == 2)
++			continue;
++		for (q = 0; q < 16; q++) {
++			u32 qdepth;
++
++			writel((phyno << 8) | q, TMU_TEQ_CTRL);
++			writel(1 << 22, TMU_TEQ_QCFG); /*Enable tail drop */
++
++			if (phyno == 3)
++				qdepth = DEFAULT_TMU3_QDEPTH;
++			else
++				qdepth = (q == 0) ? DEFAULT_Q0_QDEPTH :
++						DEFAULT_MAX_QDEPTH;
++
++			/* LOG: 68855 */
++			/*
++			 * The following is a workaround for the reordered
++			 * packet and BMU2 buffer leakage issue.
++			 */
++			if (CHIP_REVISION() == 0)
++				qdepth = 31;
++
++			writel(qdepth << 18, TMU_TEQ_HW_PROB_CFG2);
++			writel(qdepth >> 14, TMU_TEQ_HW_PROB_CFG3);
++		}
++	}
++
++#ifdef CFG_LRO
++	/* Set TMU-3 queue 5 (LRO) in no-drop mode */
++	writel((3 << 8) | TMU_QUEUE_LRO, TMU_TEQ_CTRL);
++	writel(0, TMU_TEQ_QCFG);
++#endif
++
++	writel(0x05, TMU_TEQ_DISABLE_DROPCHK);
++
++	writel(0x0, TMU_CTRL);
++}
++
++/* Enables TMU-PE cores.
++ * @param[in] pe_mask	TMU PE mask
++ */
++void tmu_enable(u32 pe_mask)
++{
++	writel(readl(TMU_TX_CTRL) | (pe_mask & 0xF), TMU_TX_CTRL);
++}
++
++/* Disables TMU cores.
++ * @param[in] pe_mask	TMU PE mask
++ */
++void tmu_disable(u32 pe_mask)
++{
++	writel(readl(TMU_TX_CTRL) & ~(pe_mask & 0xF), TMU_TX_CTRL);
++}
++
++/* This will return the tmu queue status
++ * @param[in] if_id	gem interface id or TMU index
++ * @return		returns the bit mask of busy queues, zero means all
++ * queues are empty
++ */
++u32 tmu_qstatus(u32 if_id)
++{
++	return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
++		offsetof(struct pe_status, tmu_qstatus), 4));
++}
++
++u32 tmu_pkts_processed(u32 if_id)
++{
++	return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
++		offsetof(struct pe_status, rx), 4));
++}
++
++/**************************** UTIL ***************************/
++
++/* Resets UTIL block.
++ */
++void util_reset(void)
++{
++	writel(CORE_SW_RESET, UTIL_TX_CTRL);
++}
++
++/* Initializes UTIL block.
++ * @param[in] cfg	UTIL configuration
++ */
++void util_init(struct util_cfg *cfg)
++{
++	writel(cfg->pe_sys_clk_ratio,   UTIL_PE_SYS_CLK_RATIO);
++}
++
++/* Enables UTIL-PE core.
++ *
++ */
++void util_enable(void)
++{
++	writel(CORE_ENABLE, UTIL_TX_CTRL);
++}
++
++/* Disables UTIL-PE core.
++ *
++ */
++void util_disable(void)
++{
++	writel(CORE_DISABLE, UTIL_TX_CTRL);
++}
++
++/**************************** HIF ***************************/
++/* Initializes HIF copy block.
++ *
++ */
++void hif_init(void)
++{
++	/*Initialize HIF registers*/
++	writel((HIF_RX_POLL_CTRL_CYCLE << 16) | HIF_TX_POLL_CTRL_CYCLE,
++	       HIF_POLL_CTRL);
++}
++
++/* Enable hif tx DMA and interrupt
++ *
++ */
++void hif_tx_enable(void)
++{
++	writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
++	writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN),
++	       HIF_INT_ENABLE);
++}
++
++/* Disable hif tx DMA and interrupt
++ *
++ */
++void hif_tx_disable(void)
++{
++	u32	hif_int;
++
++	writel(0, HIF_TX_CTRL);
++
++	hif_int = readl(HIF_INT_ENABLE);
++	hif_int &= HIF_TXPKT_INT_EN;
++	writel(hif_int, HIF_INT_ENABLE);
++}
++
++/* Enable hif rx DMA and interrupt
++ *
++ */
++void hif_rx_enable(void)
++{
++	hif_rx_dma_start();
++	writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN),
++	       HIF_INT_ENABLE);
++}
++
++/* Disable hif rx DMA and interrupt
++ *
++ */
++void hif_rx_disable(void)
++{
++	u32	hif_int;
++
++	writel(0, HIF_RX_CTRL);
++
++	hif_int = readl(HIF_INT_ENABLE);
++	hif_int &= HIF_RXPKT_INT_EN;
++	writel(hif_int, HIF_INT_ENABLE);
++}
+diff --git a/drivers/staging/fsl_ppfe/pfe_hif.c b/drivers/staging/fsl_ppfe/pfe_hif.c
+new file mode 100644
+index 00000000..6835e140
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_hif.c
+@@ -0,0 +1,1072 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/kernel.h>
++#include <linux/interrupt.h>
++#include <linux/dma-mapping.h>
++#include <linux/dmapool.h>
++#include <linux/sched.h>
++#include <linux/module.h>
++#include <linux/list.h>
++#include <linux/kthread.h>
++#include <linux/slab.h>
++
++#include <linux/io.h>
++#include <asm/irq.h>
++
++#include "pfe_mod.h"
++
++#define HIF_INT_MASK	(HIF_INT | HIF_RXPKT_INT | HIF_TXPKT_INT)
++
++unsigned char napi_first_batch;
++
++static void pfe_tx_do_cleanup(unsigned long data);
++
++static int pfe_hif_alloc_descr(struct pfe_hif *hif)
++{
++	void *addr;
++	dma_addr_t dma_addr;
++	int err = 0;
++
++	pr_info("%s\n", __func__);
++	addr = dma_alloc_coherent(pfe->dev,
++				  HIF_RX_DESC_NT * sizeof(struct hif_desc) +
++				  HIF_TX_DESC_NT * sizeof(struct hif_desc),
++				  &dma_addr, GFP_KERNEL);
++
++	if (!addr) {
++		pr_err("%s: Could not allocate buffer descriptors!\n"
++			, __func__);
++		err = -ENOMEM;
++		goto err0;
++	}
++
++	hif->descr_baseaddr_p = dma_addr;
++	hif->descr_baseaddr_v = addr;
++	hif->rx_ring_size = HIF_RX_DESC_NT;
++	hif->tx_ring_size = HIF_TX_DESC_NT;
++
++	return 0;
++
++err0:
++	return err;
++}
++
++#if defined(LS1012A_PFE_RESET_WA)
++static void pfe_hif_disable_rx_desc(struct pfe_hif *hif)
++{
++	int ii;
++	struct hif_desc	*desc = hif->rx_base;
++
++	/*Mark all descriptors as LAST_BD */
++	for (ii = 0; ii < hif->rx_ring_size; ii++) {
++		desc->ctrl |= BD_CTRL_LAST_BD;
++		desc++;
++	}
++}
++
++struct class_rx_hdr_t {
++	u32     next_ptr;       /* ptr to the start of the first DDR buffer */
++	u16     length;         /* total packet length */
++	u16     phyno;          /* input physical port number */
++	u32     status;         /* gemac status bits */
++	u32     status2;            /* reserved for software usage */
++};
++
++/* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
++ * except overflow
++ */
++#define STATUS_BAD_FRAME_ERR            BIT(16)
++#define STATUS_LENGTH_ERR               BIT(17)
++#define STATUS_CRC_ERR                  BIT(18)
++#define STATUS_TOO_SHORT_ERR            BIT(19)
++#define STATUS_TOO_LONG_ERR             BIT(20)
++#define STATUS_CODE_ERR                 BIT(21)
++#define STATUS_MC_HASH_MATCH            BIT(22)
++#define STATUS_CUMULATIVE_ARC_HIT       BIT(23)
++#define STATUS_UNICAST_HASH_MATCH       BIT(24)
++#define STATUS_IP_CHECKSUM_CORRECT      BIT(25)
++#define STATUS_TCP_CHECKSUM_CORRECT     BIT(26)
++#define STATUS_UDP_CHECKSUM_CORRECT     BIT(27)
++#define STATUS_OVERFLOW_ERR             BIT(28) /* GPI error */
++#define MIN_PKT_SIZE			64
++
++static inline void copy_to_lmem(u32 *dst, u32 *src, int len)
++{
++	int i;
++
++	for (i = 0; i < len; i += sizeof(u32))	{
++		*dst = htonl(*src);
++		dst++; src++;
++	}
++}
++
++static void send_dummy_pkt_to_hif(void)
++{
++	void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
++	u32 physaddr;
++	struct class_rx_hdr_t local_hdr;
++	static u32 dummy_pkt[] =  {
++		0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
++		0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
++		0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
++		0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
++
++	ddr_ptr = (void *)((u64)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL));
++	if (!ddr_ptr)
++		return;
++
++	lmem_ptr = (void *)((u64)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL));
++	if (!lmem_ptr)
++		return;
++
++	pr_info("Sending a dummy pkt to HIF %p %p\n", ddr_ptr, lmem_ptr);
++	physaddr = (u32)DDR_VIRT_TO_PFE(ddr_ptr);
++
++	lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long int)lmem_ptr);
++
++	local_hdr.phyno = htons(0); /* RX_PHY_0 */
++	local_hdr.length = htons(MIN_PKT_SIZE);
++
++	local_hdr.next_ptr = htonl((u32)physaddr);
++	/*Mark checksum is correct */
++	local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
++				STATUS_UDP_CHECKSUM_CORRECT |
++				STATUS_TCP_CHECKSUM_CORRECT |
++				STATUS_UNICAST_HASH_MATCH |
++				STATUS_CUMULATIVE_ARC_HIT));
++	copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
++		     sizeof(local_hdr));
++
++	copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
++		     0x40);
++
++	writel((unsigned long int)lmem_ptr, CLASS_INQ_PKTPTR);
++}
++
++void pfe_hif_rx_idle(struct pfe_hif *hif)
++{
++	int hif_stop_loop = 10;
++	u32 rx_status;
++
++	pfe_hif_disable_rx_desc(hif);
++	pr_info("Bringing hif to idle state...");
++	writel(0, HIF_INT_ENABLE);
++	/*If HIF Rx BDP is busy send a dummy packet */
++	do {
++		rx_status = readl(HIF_RX_STATUS);
++		if (rx_status & BDP_CSR_RX_DMA_ACTV)
++			send_dummy_pkt_to_hif();
++
++		usleep_range(100, 150);
++	} while (--hif_stop_loop);
++
++	if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
++		pr_info("Failed\n");
++	else
++		pr_info("Done\n");
++}
++#endif
++
++static void pfe_hif_free_descr(struct pfe_hif *hif)
++{
++	pr_info("%s\n", __func__);
++
++	dma_free_coherent(pfe->dev,
++			  hif->rx_ring_size * sizeof(struct hif_desc) +
++			  hif->tx_ring_size * sizeof(struct hif_desc),
++			  hif->descr_baseaddr_v, hif->descr_baseaddr_p);
++}
++
++void pfe_hif_desc_dump(struct pfe_hif *hif)
++{
++	struct hif_desc	*desc;
++	unsigned long desc_p;
++	int ii = 0;
++
++	pr_info("%s\n", __func__);
++
++	desc = hif->rx_base;
++	desc_p = (u32)((u64)desc - (u64)hif->descr_baseaddr_v +
++			hif->descr_baseaddr_p);
++
++	pr_info("HIF Rx desc base %p physical %x\n", desc, (u32)desc_p);
++	for (ii = 0; ii < hif->rx_ring_size; ii++) {
++		pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
++			readl(&desc->status), readl(&desc->ctrl),
++			readl(&desc->data), readl(&desc->next));
++			desc++;
++	}
++
++	desc = hif->tx_base;
++	desc_p = ((u64)desc - (u64)hif->descr_baseaddr_v +
++			hif->descr_baseaddr_p);
++
++	pr_info("HIF Tx desc base %p physical %x\n", desc, (u32)desc_p);
++	for (ii = 0; ii < hif->tx_ring_size; ii++) {
++		pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
++			readl(&desc->status), readl(&desc->ctrl),
++			readl(&desc->data), readl(&desc->next));
++		desc++;
++	}
++}
++
++/* pfe_hif_release_buffers */
++static void pfe_hif_release_buffers(struct pfe_hif *hif)
++{
++	struct hif_desc	*desc;
++	int i = 0;
++
++	hif->rx_base = hif->descr_baseaddr_v;
++
++	pr_info("%s\n", __func__);
++
++	/*Free Rx buffers */
++	desc = hif->rx_base;
++	for (i = 0; i < hif->rx_ring_size; i++) {
++		if (readl(&desc->data)) {
++			if ((i < hif->shm->rx_buf_pool_cnt) &&
++			    (!hif->shm->rx_buf_pool[i])) {
++				/*
++				 * dma_unmap_single(hif->dev, desc->data,
++				 * hif->rx_buf_len[i], DMA_FROM_DEVICE);
++				 */
++				dma_unmap_single(hif->dev,
++						 DDR_PFE_TO_PHYS(
++						 readl(&desc->data)),
++						 hif->rx_buf_len[i],
++						 DMA_FROM_DEVICE);
++				hif->shm->rx_buf_pool[i] = hif->rx_buf_addr[i];
++			} else {
++				pr_err("%s: buffer pool already full\n"
++					, __func__);
++			}
++		}
++
++		writel(0, &desc->data);
++		writel(0, &desc->status);
++		writel(0, &desc->ctrl);
++		desc++;
++	}
++}
++
++/*
++ * pfe_hif_init_buffers
++ * This function initializes the HIF Rx/Tx ring descriptors and
++ * initialize Rx queue with buffers.
++ */
++static int pfe_hif_init_buffers(struct pfe_hif *hif)
++{
++	struct hif_desc	*desc, *first_desc_p;
++	u32 data;
++	int i = 0;
++
++	pr_info("%s\n", __func__);
++
++	/* Check enough Rx buffers available in the shared memory */
++	if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
++		return -ENOMEM;
++
++	hif->rx_base = hif->descr_baseaddr_v;
++	memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
++
++	/*Initialize Rx descriptors */
++	desc = hif->rx_base;
++	first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
++
++	for (i = 0; i < hif->rx_ring_size; i++) {
++		/* Initialize Rx buffers from the shared memory */
++
++		data = (u32)dma_map_single(hif->dev, hif->shm->rx_buf_pool[i],
++				pfe_pkt_size, DMA_FROM_DEVICE);
++		hif->rx_buf_addr[i] = hif->shm->rx_buf_pool[i];
++		hif->rx_buf_len[i] = pfe_pkt_size;
++		hif->shm->rx_buf_pool[i] = NULL;
++
++		if (likely(dma_mapping_error(hif->dev, data) == 0)) {
++			writel(DDR_PHYS_TO_PFE(data), &desc->data);
++		} else {
++			pr_err("%s : low on mem\n",  __func__);
++
++			goto err;
++		}
++
++		writel(0, &desc->status);
++
++		/*
++		 * Ensure everything else is written to DDR before
++		 * writing bd->ctrl
++		 */
++		wmb();
++
++		writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
++			| BD_CTRL_DIR | BD_CTRL_DESC_EN
++			| BD_BUF_LEN(pfe_pkt_size)), &desc->ctrl);
++
++		/* Chain descriptors */
++		writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
++		desc++;
++	}
++
++	/* Overwrite last descriptor to chain it to first one*/
++	desc--;
++	writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
++
++	hif->rxtoclean_index = 0;
++
++	/*Initialize Rx buffer descriptor ring base address */
++	writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
++
++	hif->tx_base = hif->rx_base + hif->rx_ring_size;
++	first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
++				hif->rx_ring_size;
++	memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
++
++	/*Initialize tx descriptors */
++	desc = hif->tx_base;
++
++	for (i = 0; i < hif->tx_ring_size; i++) {
++		/* Chain descriptors */
++		writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
++		writel(0, &desc->ctrl);
++		desc++;
++	}
++
++	/* Overwrite last descriptor to chain it to first one */
++	desc--;
++	writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
++	hif->txavail = hif->tx_ring_size;
++	hif->txtosend = 0;
++	hif->txtoclean = 0;
++	hif->txtoflush = 0;
++
++	/*Initialize Tx buffer descriptor ring base address */
++	writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
++
++	return 0;
++
++err:
++	pfe_hif_release_buffers(hif);
++	return -ENOMEM;
++}
++
++/*
++ * pfe_hif_client_register
++ *
++ * This function used to register a client driver with the HIF driver.
++ *
++ * Return value:
++ * 0 - on Successful registration
++ */
++static int pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
++				   struct hif_client_shm *client_shm)
++{
++	struct hif_client *client = &hif->client[client_id];
++	u32 i, cnt;
++	struct rx_queue_desc *rx_qbase;
++	struct tx_queue_desc *tx_qbase;
++	struct hif_rx_queue *rx_queue;
++	struct hif_tx_queue *tx_queue;
++	int err = 0;
++
++	pr_info("%s\n", __func__);
++
++	spin_lock_bh(&hif->tx_lock);
++
++	if (test_bit(client_id, &hif->shm->g_client_status[0])) {
++		pr_err("%s: client %d already registered\n",
++		       __func__, client_id);
++		err = -1;
++		goto unlock;
++	}
++
++	memset(client, 0, sizeof(struct hif_client));
++
++	/* Initialize client Rx queues baseaddr, size */
++
++	cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
++	/* Check if client is requesting for more queues than supported */
++	if (cnt > HIF_CLIENT_QUEUES_MAX)
++		cnt = HIF_CLIENT_QUEUES_MAX;
++
++	client->rx_qn = cnt;
++	rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
++	for (i = 0; i < cnt; i++) {
++		rx_queue = &client->rx_q[i];
++		rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
++		rx_queue->size = client_shm->rx_qsize;
++		rx_queue->write_idx = 0;
++	}
++
++	/* Initialize client Tx queues baseaddr, size */
++	cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
++
++	/* Check if client is requesting for more queues than supported */
++	if (cnt > HIF_CLIENT_QUEUES_MAX)
++		cnt = HIF_CLIENT_QUEUES_MAX;
++
++	client->tx_qn = cnt;
++	tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
++	for (i = 0; i < cnt; i++) {
++		tx_queue = &client->tx_q[i];
++		tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
++		tx_queue->size = client_shm->tx_qsize;
++		tx_queue->ack_idx = 0;
++	}
++
++	set_bit(client_id, &hif->shm->g_client_status[0]);
++
++unlock:
++	spin_unlock_bh(&hif->tx_lock);
++
++	return err;
++}
++
++/*
++ * pfe_hif_client_unregister
++ *
++ * This function used to unregister a client  from the HIF driver.
++ *
++ */
++static void pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
++{
++	pr_info("%s\n", __func__);
++
++	/*
++	 * Mark client as no longer available (which prevents further packet
++	 * receive for this client)
++	 */
++	spin_lock_bh(&hif->tx_lock);
++
++	if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
++		pr_err("%s: client %d not registered\n", __func__,
++		       client_id);
++
++		spin_unlock_bh(&hif->tx_lock);
++		return;
++	}
++
++	clear_bit(client_id, &hif->shm->g_client_status[0]);
++
++	spin_unlock_bh(&hif->tx_lock);
++}
++
++/*
++ * client_put_rxpacket-
++ * This functions puts the Rx pkt  in the given client Rx queue.
++ * It actually swap the Rx pkt in the client Rx descriptor buffer
++ * and returns the free buffer from it.
++ *
++ * If the function returns NULL means client Rx queue is full and
++ * packet couldn't send to client queue.
++ */
++static void *client_put_rxpacket(struct hif_rx_queue *queue, void *pkt, u32 len,
++				 u32 flags, u32 client_ctrl, u32 *rem_len)
++{
++	void *free_pkt = NULL;
++	struct rx_queue_desc *desc = queue->base + queue->write_idx;
++
++	if (readl(&desc->ctrl) & CL_DESC_OWN) {
++		if (page_mode) {
++			int rem_page_size = PAGE_SIZE -
++					PRESENT_OFST_IN_PAGE(pkt);
++			int cur_pkt_size = ROUND_MIN_RX_SIZE(len +
++					pfe_pkt_headroom);
++			*rem_len = (rem_page_size - cur_pkt_size);
++			if (*rem_len) {
++				free_pkt = pkt + cur_pkt_size;
++				get_page(virt_to_page(free_pkt));
++			} else {
++				free_pkt = (void
++				*)__get_free_page(GFP_ATOMIC | GFP_DMA_PFE);
++				*rem_len = pfe_pkt_size;
++			}
++		} else {
++			free_pkt = kmalloc(PFE_BUF_SIZE, GFP_ATOMIC |
++					GFP_DMA_PFE);
++			*rem_len = PFE_BUF_SIZE - pfe_pkt_headroom;
++		}
++
++		if (free_pkt) {
++			desc->data = pkt;
++			desc->client_ctrl = client_ctrl;
++			/*
++			 * Ensure everything else is written to DDR before
++			 * writing bd->ctrl
++			 */
++			smp_wmb();
++			writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
++			queue->write_idx = (queue->write_idx + 1)
++					    & (queue->size - 1);
++
++			free_pkt += pfe_pkt_headroom;
++		}
++	}
++
++	return free_pkt;
++}
++
++/*
++ * pfe_hif_rx_process-
++ * This function does pfe hif rx queue processing.
++ * Dequeue packet from Rx queue and send it to corresponding client queue
++ */
++static int pfe_hif_rx_process(struct pfe_hif *hif, int budget)
++{
++	struct hif_desc	*desc;
++	struct hif_hdr *pkt_hdr;
++	struct __hif_hdr hif_hdr;
++	void *free_buf;
++	int rtc, len, rx_processed = 0;
++	struct __hif_desc local_desc;
++	int flags;
++	unsigned int desc_p;
++	unsigned int buf_size = 0;
++
++	spin_lock_bh(&hif->lock);
++
++	rtc = hif->rxtoclean_index;
++
++	while (rx_processed < budget) {
++		desc = hif->rx_base + rtc;
++
++		__memcpy12(&local_desc, desc);
++
++		/* ACK pending Rx interrupt */
++		if (local_desc.ctrl & BD_CTRL_DESC_EN) {
++			writel(HIF_INT | HIF_RXPKT_INT, HIF_INT_SRC);
++
++			if (rx_processed == 0) {
++				if (napi_first_batch == 1) {
++					desc_p = hif->descr_baseaddr_p +
++					((unsigned long int)(desc) -
++					(unsigned long
++					int)hif->descr_baseaddr_v);
++					napi_first_batch = 0;
++				}
++			}
++
++			__memcpy12(&local_desc, desc);
++
++			if (local_desc.ctrl & BD_CTRL_DESC_EN)
++				break;
++		}
++
++		napi_first_batch = 0;
++
++#ifdef HIF_NAPI_STATS
++		hif->napi_counters[NAPI_DESC_COUNT]++;
++#endif
++		len = BD_BUF_LEN(local_desc.ctrl);
++		/*
++		 * dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
++		 * hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
++		 */
++		dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
++				 hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
++
++		pkt_hdr = (struct hif_hdr *)hif->rx_buf_addr[rtc];
++
++		/* Track last HIF header received */
++		if (!hif->started) {
++			hif->started = 1;
++
++			__memcpy8(&hif_hdr, pkt_hdr);
++
++			hif->qno = hif_hdr.hdr.q_num;
++			hif->client_id = hif_hdr.hdr.client_id;
++			hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
++						hif_hdr.hdr.client_ctrl;
++			flags = CL_DESC_FIRST;
++
++		} else {
++			flags = 0;
++		}
++
++		if (local_desc.ctrl & BD_CTRL_LIFM)
++			flags |= CL_DESC_LAST;
++
++		/* Check for valid client id and still registered */
++		if ((hif->client_id >= HIF_CLIENTS_MAX) ||
++		    !(test_bit(hif->client_id,
++			&hif->shm->g_client_status[0]))) {
++			printk_ratelimited("%s: packet with invalid client id %d q_num %d\n",
++					   __func__,
++					   hif->client_id,
++					   hif->qno);
++
++			free_buf = pkt_hdr;
++
++			goto pkt_drop;
++		}
++
++		/* Check to valid queue number */
++		if (hif->client[hif->client_id].rx_qn <= hif->qno) {
++			pr_info("%s: packet with invalid queue: %d\n"
++				, __func__, hif->qno);
++			hif->qno = 0;
++		}
++
++		free_buf =
++		client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
++				    (void *)pkt_hdr, len, flags,
++			hif->client_ctrl, &buf_size);
++
++		hif_lib_indicate_client(hif->client_id, EVENT_RX_PKT_IND,
++					hif->qno);
++
++		if (unlikely(!free_buf)) {
++#ifdef HIF_NAPI_STATS
++			hif->napi_counters[NAPI_CLIENT_FULL_COUNT]++;
++#endif
++			/*
++			 * If we want to keep in polling mode to retry later,
++			 * we need to tell napi that we consumed
++			 * the full budget or we will hit a livelock scenario.
++			 * The core code keeps this napi instance
++			 * at the head of the list and none of the other
++			 * instances get to run
++			 */
++			rx_processed = budget;
++
++			if (flags & CL_DESC_FIRST)
++				hif->started = 0;
++
++			break;
++		}
++
++pkt_drop:
++		/*Fill free buffer in the descriptor */
++		hif->rx_buf_addr[rtc] = free_buf;
++		hif->rx_buf_len[rtc] = min(pfe_pkt_size, buf_size);
++		writel((DDR_PHYS_TO_PFE
++			((u32)dma_map_single(hif->dev,
++			free_buf, hif->rx_buf_len[rtc], DMA_FROM_DEVICE))),
++			&desc->data);
++		/*
++		 * Ensure everything else is written to DDR before
++		 * writing bd->ctrl
++		 */
++		wmb();
++		writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
++			BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
++			&desc->ctrl);
++
++		rtc = (rtc + 1) & (hif->rx_ring_size - 1);
++
++		if (local_desc.ctrl & BD_CTRL_LIFM) {
++			if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED)) {
++				rx_processed++;
++
++#ifdef HIF_NAPI_STATS
++				hif->napi_counters[NAPI_PACKET_COUNT]++;
++#endif
++			}
++			hif->started = 0;
++		}
++	}
++
++	hif->rxtoclean_index = rtc;
++	spin_unlock_bh(&hif->lock);
++
++	/* we made some progress, re-start rx dma in case it stopped */
++	hif_rx_dma_start();
++
++	return rx_processed;
++}
++
++/*
++ * client_ack_txpacket-
++ * This function ack the Tx packet in the give client Tx queue by resetting
++ * ownership bit in the descriptor.
++ */
++static int client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
++			       unsigned int q_no)
++{
++	struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
++	struct tx_queue_desc *desc = queue->base + queue->ack_idx;
++
++	if (readl(&desc->ctrl) & CL_DESC_OWN) {
++		writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
++		queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1);
++
++		return 0;
++
++	} else {
++		/*This should not happen */
++		pr_err("%s: %d %d %d %d %d %p %d\n", __func__,
++		       hif->txtosend, hif->txtoclean, hif->txavail,
++			client_id, q_no, queue, queue->ack_idx);
++		WARN(1, "%s: doesn't own this descriptor", __func__);
++		return 1;
++	}
++}
++
++void __hif_tx_done_process(struct pfe_hif *hif, int count)
++{
++	struct hif_desc *desc;
++	struct hif_desc_sw *desc_sw;
++	int ttc, tx_avl;
++	int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
++
++	ttc = hif->txtoclean;
++	tx_avl = hif->txavail;
++
++	while ((tx_avl < hif->tx_ring_size) && count--) {
++		desc = hif->tx_base + ttc;
++
++		if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
++			break;
++
++		desc_sw = &hif->tx_sw_queue[ttc];
++
++		if (desc_sw->data) {
++			/*
++			 * dmap_unmap_single(hif->dev, desc_sw->data,
++			 * desc_sw->len, DMA_TO_DEVICE);
++			 */
++			dma_unmap_single(hif->dev, desc_sw->data,
++					 desc_sw->len, DMA_TO_DEVICE);
++		}
++
++		if (desc_sw->client_id > HIF_CLIENTS_MAX)
++			pr_err("Invalid cl id %d\n", desc_sw->client_id);
++
++		pkts_done[desc_sw->client_id]++;
++
++		client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
++
++		ttc = (ttc + 1) & (hif->tx_ring_size - 1);
++		tx_avl++;
++	}
++
++	if (pkts_done[0])
++		hif_lib_indicate_client(0, EVENT_TXDONE_IND, 0);
++	if (pkts_done[1])
++		hif_lib_indicate_client(1, EVENT_TXDONE_IND, 0);
++
++	hif->txtoclean = ttc;
++	hif->txavail = tx_avl;
++
++	if (!count) {
++		tasklet_schedule(&hif->tx_cleanup_tasklet);
++	} else {
++		/*Enable Tx done interrupt */
++		writel(readl_relaxed(HIF_INT_ENABLE) | HIF_TXPKT_INT,
++		       HIF_INT_ENABLE);
++	}
++}
++
++static void pfe_tx_do_cleanup(unsigned long data)
++{
++	struct pfe_hif *hif = (struct pfe_hif *)data;
++
++	writel(HIF_INT | HIF_TXPKT_INT, HIF_INT_SRC);
++
++	hif_tx_done_process(hif, 64);
++}
++
++/*
++ * __hif_xmit_pkt -
++ * This function puts one packet in the HIF Tx queue
++ */
++void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
++			q_no, void *data, u32 len, unsigned int flags)
++{
++	struct hif_desc	*desc;
++	struct hif_desc_sw *desc_sw;
++
++	desc = hif->tx_base + hif->txtosend;
++	desc_sw = &hif->tx_sw_queue[hif->txtosend];
++
++	desc_sw->len = len;
++	desc_sw->client_id = client_id;
++	desc_sw->q_no = q_no;
++	desc_sw->flags = flags;
++
++	if (flags & HIF_DONT_DMA_MAP) {
++		desc_sw->data = 0;
++		writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
++	} else {
++		desc_sw->data = dma_map_single(hif->dev, data, len,
++						DMA_TO_DEVICE);
++		writel((u32)DDR_PHYS_TO_PFE(desc_sw->data), &desc->data);
++	}
++
++	hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
++	hif->txavail--;
++
++	if ((!((flags & HIF_DATA_VALID) && (flags &
++				HIF_LAST_BUFFER))))
++		goto skip_tx;
++
++	/*
++	 * Ensure everything else is written to DDR before
++	 * writing bd->ctrl
++	 */
++	wmb();
++
++	do {
++		desc_sw = &hif->tx_sw_queue[hif->txtoflush];
++		desc = hif->tx_base + hif->txtoflush;
++
++		if (desc_sw->flags & HIF_LAST_BUFFER) {
++			writel((BD_CTRL_LIFM |
++			       BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
++			       | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
++				BD_CTRL_PKT_INT_EN | BD_BUF_LEN(desc_sw->len)),
++				&desc->ctrl);
++		} else {
++			writel((BD_CTRL_DESC_EN |
++				BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
++		}
++		hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
++	}
++	while (hif->txtoflush != hif->txtosend)
++		;
++
++skip_tx:
++	return;
++}
++
++static irqreturn_t wol_isr(int irq, void *dev_id)
++{
++	pr_info("WoL\n");
++	gemac_set_wol(EMAC1_BASE_ADDR, 0);
++	gemac_set_wol(EMAC2_BASE_ADDR, 0);
++	return IRQ_HANDLED;
++}
++
++/*
++ * hif_isr-
++ * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
++ */
++static irqreturn_t hif_isr(int irq, void *dev_id)
++{
++	struct pfe_hif *hif = (struct pfe_hif *)dev_id;
++	int int_status;
++	int int_enable_mask;
++
++	/*Read hif interrupt source register */
++	int_status = readl_relaxed(HIF_INT_SRC);
++	int_enable_mask = readl_relaxed(HIF_INT_ENABLE);
++
++	if ((int_status & HIF_INT) == 0)
++		return IRQ_NONE;
++
++	int_status &= ~(HIF_INT);
++
++	if (int_status & HIF_RXPKT_INT) {
++		int_status &= ~(HIF_RXPKT_INT);
++		int_enable_mask &= ~(HIF_RXPKT_INT);
++
++		napi_first_batch = 1;
++
++		if (napi_schedule_prep(&hif->napi)) {
++#ifdef HIF_NAPI_STATS
++			hif->napi_counters[NAPI_SCHED_COUNT]++;
++#endif
++			__napi_schedule(&hif->napi);
++		}
++	}
++
++	if (int_status & HIF_TXPKT_INT) {
++		int_status &= ~(HIF_TXPKT_INT);
++		int_enable_mask &= ~(HIF_TXPKT_INT);
++		/*Schedule tx cleanup tassklet */
++		tasklet_schedule(&hif->tx_cleanup_tasklet);
++	}
++
++	/*Disable interrupts, they will be enabled after they are serviced */
++	writel_relaxed(int_enable_mask, HIF_INT_ENABLE);
++
++	if (int_status) {
++		pr_info("%s : Invalid interrupt : %d\n", __func__,
++			int_status);
++		writel(int_status, HIF_INT_SRC);
++	}
++
++	return IRQ_HANDLED;
++}
++
++void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2)
++{
++	unsigned int client_id = data1;
++
++	if (client_id >= HIF_CLIENTS_MAX) {
++		pr_err("%s: client id %d out of bounds\n", __func__,
++		       client_id);
++		return;
++	}
++
++	switch (req) {
++	case REQUEST_CL_REGISTER:
++			/* Request for register a client */
++			pr_info("%s: register client_id %d\n",
++				__func__, client_id);
++			pfe_hif_client_register(hif, client_id, (struct
++				hif_client_shm *)&hif->shm->client[client_id]);
++			break;
++
++	case REQUEST_CL_UNREGISTER:
++			pr_info("%s: unregister client_id %d\n",
++				__func__, client_id);
++
++			/* Request for unregister a client */
++			pfe_hif_client_unregister(hif, client_id);
++
++			break;
++
++	default:
++			pr_err("%s: unsupported request %d\n",
++			       __func__, req);
++			break;
++	}
++
++	/*
++	 * Process client Tx queues
++	 * Currently we don't have checking for tx pending
++	 */
++}
++
++/*
++ * pfe_hif_rx_poll
++ *  This function is NAPI poll function to process HIF Rx queue.
++ */
++static int pfe_hif_rx_poll(struct napi_struct *napi, int budget)
++{
++	struct pfe_hif *hif = container_of(napi, struct pfe_hif, napi);
++	int work_done;
++
++#ifdef HIF_NAPI_STATS
++	hif->napi_counters[NAPI_POLL_COUNT]++;
++#endif
++
++	work_done = pfe_hif_rx_process(hif, budget);
++
++	if (work_done < budget) {
++		napi_complete(napi);
++		writel(readl_relaxed(HIF_INT_ENABLE) | HIF_RXPKT_INT,
++		       HIF_INT_ENABLE);
++	}
++#ifdef HIF_NAPI_STATS
++	else
++		hif->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
++#endif
++
++	return work_done;
++}
++
++/*
++ * pfe_hif_init
++ * This function initializes the baseaddresses and irq, etc.
++ */
++int pfe_hif_init(struct pfe *pfe)
++{
++	struct pfe_hif *hif = &pfe->hif;
++	int err;
++
++	pr_info("%s\n", __func__);
++
++	hif->dev = pfe->dev;
++	hif->irq = pfe->hif_irq;
++
++	err = pfe_hif_alloc_descr(hif);
++	if (err)
++		goto err0;
++
++	if (pfe_hif_init_buffers(hif)) {
++		pr_err("%s: Could not initialize buffer descriptors\n"
++			, __func__);
++		err = -ENOMEM;
++		goto err1;
++	}
++
++	/* Initialize NAPI for Rx processing */
++	init_dummy_netdev(&hif->dummy_dev);
++	netif_napi_add(&hif->dummy_dev, &hif->napi, pfe_hif_rx_poll,
++		       HIF_RX_POLL_WEIGHT);
++	napi_enable(&hif->napi);
++
++	spin_lock_init(&hif->tx_lock);
++	spin_lock_init(&hif->lock);
++
++	hif_init();
++	hif_rx_enable();
++	hif_tx_enable();
++
++	/* Disable tx done interrupt */
++	writel(HIF_INT_MASK, HIF_INT_ENABLE);
++
++	gpi_enable(HGPI_BASE_ADDR);
++
++	err = request_irq(hif->irq, hif_isr, 0, "pfe_hif", hif);
++	if (err) {
++		pr_err("%s: failed to get the hif IRQ = %d\n",
++		       __func__, hif->irq);
++		goto err1;
++	}
++
++	err = request_irq(pfe->wol_irq, wol_isr, 0, "pfe_wol", pfe);
++	if (err) {
++		pr_err("%s: failed to get the wol IRQ = %d\n",
++		       __func__, pfe->wol_irq);
++		goto err1;
++	}
++
++	tasklet_init(&hif->tx_cleanup_tasklet,
++		     (void(*)(unsigned long))pfe_tx_do_cleanup,
++		     (unsigned long)hif);
++
++	return 0;
++err1:
++	pfe_hif_free_descr(hif);
++err0:
++	return err;
++}
++
++/* pfe_hif_exit- */
++void pfe_hif_exit(struct pfe *pfe)
++{
++	struct pfe_hif *hif = &pfe->hif;
++
++	pr_info("%s\n", __func__);
++
++	tasklet_kill(&hif->tx_cleanup_tasklet);
++
++	spin_lock_bh(&hif->lock);
++	hif->shm->g_client_status[0] = 0;
++	/* Make sure all clients are disabled*/
++	hif->shm->g_client_status[1] = 0;
++
++	spin_unlock_bh(&hif->lock);
++
++	/*Disable Rx/Tx */
++	gpi_disable(HGPI_BASE_ADDR);
++	hif_rx_disable();
++	hif_tx_disable();
++
++	napi_disable(&hif->napi);
++	netif_napi_del(&hif->napi);
++
++	free_irq(pfe->wol_irq, pfe);
++	free_irq(hif->irq, hif);
++
++	pfe_hif_release_buffers(hif);
++	pfe_hif_free_descr(hif);
++}
+diff --git a/drivers/staging/fsl_ppfe/pfe_hif.h b/drivers/staging/fsl_ppfe/pfe_hif.h
+new file mode 100644
+index 00000000..6e36f0c1
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_hif.h
+@@ -0,0 +1,211 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _PFE_HIF_H_
++#define _PFE_HIF_H_
++
++#include <linux/netdevice.h>
++
++#define HIF_NAPI_STATS
++
++#define HIF_CLIENT_QUEUES_MAX	16
++#define HIF_RX_POLL_WEIGHT	64
++
++#define HIF_RX_PKT_MIN_SIZE 0x800 /* 2KB */
++#define HIF_RX_PKT_MIN_SIZE_MASK ~(HIF_RX_PKT_MIN_SIZE - 1)
++#define ROUND_MIN_RX_SIZE(_sz) (((_sz) + (HIF_RX_PKT_MIN_SIZE - 1)) \
++					& HIF_RX_PKT_MIN_SIZE_MASK)
++#define PRESENT_OFST_IN_PAGE(_buf) (((unsigned long int)(_buf) & (PAGE_SIZE \
++					- 1)) & HIF_RX_PKT_MIN_SIZE_MASK)
++
++enum {
++	NAPI_SCHED_COUNT = 0,
++	NAPI_POLL_COUNT,
++	NAPI_PACKET_COUNT,
++	NAPI_DESC_COUNT,
++	NAPI_FULL_BUDGET_COUNT,
++	NAPI_CLIENT_FULL_COUNT,
++	NAPI_MAX_COUNT
++};
++
++/*
++ * HIF_TX_DESC_NT value should be always greter than 4,
++ * Otherwise HIF_TX_POLL_MARK will become zero.
++ */
++#define HIF_RX_DESC_NT		256
++#define HIF_TX_DESC_NT		2048
++
++#define HIF_FIRST_BUFFER	BIT(0)
++#define HIF_LAST_BUFFER		BIT(1)
++#define HIF_DONT_DMA_MAP	BIT(2)
++#define HIF_DATA_VALID		BIT(3)
++#define HIF_TSO			BIT(4)
++
++enum {
++	PFE_CL_GEM0 = 0,
++	PFE_CL_GEM1,
++	HIF_CLIENTS_MAX
++};
++
++/*structure to store client queue info */
++struct hif_rx_queue {
++	struct rx_queue_desc *base;
++	u32	size;
++	u32	write_idx;
++};
++
++struct hif_tx_queue {
++	struct tx_queue_desc *base;
++	u32	size;
++	u32	ack_idx;
++};
++
++/*Structure to store the client info */
++struct hif_client {
++	int	rx_qn;
++	struct hif_rx_queue	rx_q[HIF_CLIENT_QUEUES_MAX];
++	int	tx_qn;
++	struct hif_tx_queue	tx_q[HIF_CLIENT_QUEUES_MAX];
++};
++
++/*HIF hardware buffer descriptor */
++struct hif_desc {
++	u32 ctrl;
++	u32 status;
++	u32 data;
++	u32 next;
++};
++
++struct __hif_desc {
++	u32 ctrl;
++	u32 status;
++	u32 data;
++};
++
++struct hif_desc_sw {
++	dma_addr_t data;
++	u16 len;
++	u8 client_id;
++	u8 q_no;
++	u16 flags;
++};
++
++struct hif_hdr {
++	u8 client_id;
++	u8 q_num;
++	u16 client_ctrl;
++	u16 client_ctrl1;
++};
++
++struct __hif_hdr {
++	union {
++		struct hif_hdr hdr;
++		u32 word[2];
++	};
++};
++
++struct hif_ipsec_hdr {
++	u16	sa_handle[2];
++} __packed;
++
++/*  HIF_CTRL_TX... defines */
++#define HIF_CTRL_TX_CHECKSUM		BIT(2)
++
++/*  HIF_CTRL_RX... defines */
++#define HIF_CTRL_RX_OFFSET_OFST         (24)
++#define HIF_CTRL_RX_CHECKSUMMED		BIT(2)
++#define HIF_CTRL_RX_CONTINUED		BIT(1)
++
++struct pfe_hif {
++	/* To store registered clients in hif layer */
++	struct hif_client client[HIF_CLIENTS_MAX];
++	struct hif_shm *shm;
++	int	irq;
++
++	void	*descr_baseaddr_v;
++	unsigned long	descr_baseaddr_p;
++
++	struct hif_desc *rx_base;
++	u32	rx_ring_size;
++	u32	rxtoclean_index;
++	void	*rx_buf_addr[HIF_RX_DESC_NT];
++	int	rx_buf_len[HIF_RX_DESC_NT];
++	unsigned int qno;
++	unsigned int client_id;
++	unsigned int client_ctrl;
++	unsigned int started;
++
++	struct hif_desc *tx_base;
++	u32	tx_ring_size;
++	u32	txtosend;
++	u32	txtoclean;
++	u32	txavail;
++	u32	txtoflush;
++	struct hif_desc_sw tx_sw_queue[HIF_TX_DESC_NT];
++
++/* tx_lock synchronizes hif packet tx as well as pfe_hif structure access */
++	spinlock_t tx_lock;
++/* lock synchronizes hif rx queue processing */
++	spinlock_t lock;
++	struct net_device	dummy_dev;
++	struct napi_struct	napi;
++	struct device *dev;
++
++#ifdef HIF_NAPI_STATS
++	unsigned int napi_counters[NAPI_MAX_COUNT];
++#endif
++	struct tasklet_struct	tx_cleanup_tasklet;
++};
++
++void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
++			q_no, void *data, u32 len, unsigned int flags);
++int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no,
++		 void *data, unsigned int len);
++void __hif_tx_done_process(struct pfe_hif *hif, int count);
++void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int
++				data2);
++int pfe_hif_init(struct pfe *pfe);
++void pfe_hif_exit(struct pfe *pfe);
++void pfe_hif_rx_idle(struct pfe_hif *hif);
++static inline void hif_tx_done_process(struct pfe_hif *hif, int count)
++{
++	spin_lock_bh(&hif->tx_lock);
++	__hif_tx_done_process(hif, count);
++	spin_unlock_bh(&hif->tx_lock);
++}
++
++static inline void hif_tx_lock(struct pfe_hif *hif)
++{
++	spin_lock_bh(&hif->tx_lock);
++}
++
++static inline void hif_tx_unlock(struct pfe_hif *hif)
++{
++	spin_unlock_bh(&hif->tx_lock);
++}
++
++static inline int __hif_tx_avail(struct pfe_hif *hif)
++{
++	return hif->txavail;
++}
++
++#define __memcpy8(dst, src)		memcpy(dst, src, 8)
++#define __memcpy12(dst, src)		memcpy(dst, src, 12)
++#define __memcpy(dst, src, len)		memcpy(dst, src, len)
++
++#endif /* _PFE_HIF_H_ */
+diff --git a/drivers/staging/fsl_ppfe/pfe_hif_lib.c b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
+new file mode 100644
+index 00000000..837eaa24
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
+@@ -0,0 +1,601 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/workqueue.h>
++#include <linux/dma-mapping.h>
++#include <linux/dmapool.h>
++#include <linux/sched.h>
++#include <linux/skbuff.h>
++#include <linux/moduleparam.h>
++#include <linux/cpu.h>
++
++#include "pfe_mod.h"
++#include "pfe_hif.h"
++#include "pfe_hif_lib.h"
++
++unsigned int lro_mode;
++unsigned int page_mode;
++unsigned int tx_qos;
++unsigned int pfe_pkt_size;
++unsigned int pfe_pkt_headroom;
++unsigned int emac_txq_cnt;
++
++/*
++ * @pfe_hal_lib.c.
++ * Common functions used by HIF client drivers
++ */
++
++/*HIF shared memory Global variable */
++struct hif_shm ghif_shm;
++
++/* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
++ * This function should be called after pfe_hif_exit
++ *
++ * @param[in] hif_shm		Shared memory address location in DDR
++ */
++static void pfe_hif_shm_clean(struct hif_shm *hif_shm)
++{
++	int i;
++	void *pkt;
++
++	for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
++		pkt = hif_shm->rx_buf_pool[i];
++		if (pkt) {
++			hif_shm->rx_buf_pool[i] = NULL;
++			pkt -= pfe_pkt_headroom;
++
++			if (page_mode)
++				put_page(virt_to_page(pkt));
++			else
++				kfree(pkt);
++		}
++	}
++}
++
++/* Initialize shared memory used between HIF driver and clients,
++ * allocate rx_buffer_pool required for HIF Rx descriptors.
++ * This function should be called before initializing HIF driver.
++ *
++ * @param[in] hif_shm		Shared memory address location in DDR
++ * @rerurn			0 - on succes, <0 on fail to initialize
++ */
++static int pfe_hif_shm_init(struct hif_shm *hif_shm)
++{
++	int i;
++	void *pkt;
++
++	memset(hif_shm, 0, sizeof(struct hif_shm));
++	hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
++
++	for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
++		if (page_mode) {
++			pkt = (void *)__get_free_page(GFP_KERNEL |
++				GFP_DMA_PFE);
++		} else {
++			pkt = kmalloc(PFE_BUF_SIZE, GFP_KERNEL | GFP_DMA_PFE);
++		}
++
++		if (pkt)
++			hif_shm->rx_buf_pool[i] = pkt + pfe_pkt_headroom;
++		else
++			goto err0;
++	}
++
++	return 0;
++
++err0:
++	pr_err("%s Low memory\n", __func__);
++	pfe_hif_shm_clean(hif_shm);
++	return -ENOMEM;
++}
++
++/*This function sends indication to HIF driver
++ *
++ * @param[in] hif	hif context
++ */
++static void hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int
++					data2)
++{
++	hif_process_client_req(hif, req, data1, data2);
++}
++
++void hif_lib_indicate_client(int client_id, int event_type, int qno)
++{
++	struct hif_client_s *client = pfe->hif_client[client_id];
++
++	if (!client || (event_type >= HIF_EVENT_MAX) || (qno >=
++		HIF_CLIENT_QUEUES_MAX))
++		return;
++
++	if (!test_and_set_bit(qno, &client->queue_mask[event_type]))
++		client->event_handler(client->priv, event_type, qno);
++}
++
++/*This function releases Rx queue descriptors memory and pre-filled buffers
++ *
++ * @param[in] client	hif_client context
++ */
++static void hif_lib_client_release_rx_buffers(struct hif_client_s *client)
++{
++	struct rx_queue_desc *desc;
++	int qno, ii;
++	void *buf;
++
++	for (qno = 0; qno < client->rx_qn; qno++) {
++		desc = client->rx_q[qno].base;
++
++		for (ii = 0; ii < client->rx_q[qno].size; ii++) {
++			buf = (void *)desc->data;
++			if (buf) {
++				buf -= pfe_pkt_headroom;
++
++				if (page_mode)
++					free_page((unsigned long)buf);
++				else
++					kfree(buf);
++
++				desc->ctrl = 0;
++			}
++
++			desc++;
++		}
++	}
++
++	kfree(client->rx_qbase);
++}
++
++/*This function allocates memory for the rxq descriptors and pre-fill rx queues
++ * with buffers.
++ * @param[in] client	client context
++ * @param[in] q_size	size of the rxQ, all queues are of same size
++ */
++static int hif_lib_client_init_rx_buffers(struct hif_client_s *client, int
++						q_size)
++{
++	struct rx_queue_desc *desc;
++	struct hif_client_rx_queue *queue;
++	int ii, qno;
++
++	/*Allocate memory for the client queues */
++	client->rx_qbase = kzalloc(client->rx_qn * q_size * sizeof(struct
++				rx_queue_desc), GFP_KERNEL);
++	if (!client->rx_qbase)
++		goto err;
++
++	for (qno = 0; qno < client->rx_qn; qno++) {
++		queue = &client->rx_q[qno];
++
++		queue->base = client->rx_qbase + qno * q_size * sizeof(struct
++				rx_queue_desc);
++		queue->size = q_size;
++		queue->read_idx = 0;
++		queue->write_idx = 0;
++
++		pr_debug("rx queue: %d, base: %p, size: %d\n", qno,
++			 queue->base, queue->size);
++	}
++
++	for (qno = 0; qno < client->rx_qn; qno++) {
++		queue = &client->rx_q[qno];
++		desc = queue->base;
++
++		for (ii = 0; ii < queue->size; ii++) {
++			desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) |
++					CL_DESC_OWN;
++			desc++;
++		}
++	}
++
++	return 0;
++
++err:
++	return 1;
++}
++
++
++static void hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
++{
++	pr_debug("%s\n", __func__);
++
++	/*
++	 * Check if there are any pending packets. Client must flush the tx
++	 * queues before unregistering, by calling by calling
++	 * hif_lib_tx_get_next_complete()
++	 *
++	 * Hif no longer calls since we are no longer registered
++	 */
++	if (queue->tx_pending)
++		pr_err("%s: pending transmit packets\n", __func__);
++}
++
++static void hif_lib_client_release_tx_buffers(struct hif_client_s *client)
++{
++	int qno;
++
++	pr_debug("%s\n", __func__);
++
++	for (qno = 0; qno < client->tx_qn; qno++)
++		hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
++
++	kfree(client->tx_qbase);
++}
++
++static int hif_lib_client_init_tx_buffers(struct hif_client_s *client, int
++						q_size)
++{
++	struct hif_client_tx_queue *queue;
++	int qno;
++
++	client->tx_qbase = kzalloc(client->tx_qn * q_size * sizeof(struct
++					tx_queue_desc), GFP_KERNEL);
++	if (!client->tx_qbase)
++		return 1;
++
++	for (qno = 0; qno < client->tx_qn; qno++) {
++		queue = &client->tx_q[qno];
++
++		queue->base = client->tx_qbase + qno * q_size * sizeof(struct
++				tx_queue_desc);
++		queue->size = q_size;
++		queue->read_idx = 0;
++		queue->write_idx = 0;
++		queue->tx_pending = 0;
++		queue->nocpy_flag = 0;
++		queue->prev_tmu_tx_pkts = 0;
++		queue->done_tmu_tx_pkts = 0;
++
++		pr_debug("tx queue: %d, base: %p, size: %d\n", qno,
++			 queue->base, queue->size);
++	}
++
++	return 0;
++}
++
++static int hif_lib_event_dummy(void *priv, int event_type, int qno)
++{
++	return 0;
++}
++
++int hif_lib_client_register(struct hif_client_s *client)
++{
++	struct hif_shm *hif_shm;
++	struct hif_client_shm *client_shm;
++	int err, i;
++	/* int loop_cnt = 0; */
++
++	pr_debug("%s\n", __func__);
++
++	/*Allocate memory before spin_lock*/
++	if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
++		err = -ENOMEM;
++		goto err_rx;
++	}
++
++	if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
++		err = -ENOMEM;
++		goto err_tx;
++	}
++
++	spin_lock_bh(&pfe->hif.lock);
++	if (!(client->pfe) || (client->id >= HIF_CLIENTS_MAX) ||
++	    (pfe->hif_client[client->id])) {
++		err = -EINVAL;
++		goto err;
++	}
++
++	hif_shm = client->pfe->hif.shm;
++
++	if (!client->event_handler)
++		client->event_handler = hif_lib_event_dummy;
++
++	/*Initialize client specific shared memory */
++	client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
++	client_shm->rx_qbase = (unsigned long int)client->rx_qbase;
++	client_shm->rx_qsize = client->rx_qsize;
++	client_shm->tx_qbase = (unsigned long int)client->tx_qbase;
++	client_shm->tx_qsize = client->tx_qsize;
++	client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) |
++				(client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
++	/* spin_lock_init(&client->rx_lock); */
++
++	for (i = 0; i < HIF_EVENT_MAX; i++) {
++		client->queue_mask[i] = 0;  /*
++					     * By default all events are
++					     * unmasked
++					     */
++	}
++
++	/*Indicate to HIF driver*/
++	hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_REGISTER, client->id, 0);
++
++	pr_debug("%s: client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d\n",
++		 __func__, client, client->id, client->tx_qsize,
++		 client->rx_qsize);
++
++	client->cpu_id = -1;
++
++	pfe->hif_client[client->id] = client;
++	spin_unlock_bh(&pfe->hif.lock);
++
++	return 0;
++
++err:
++	spin_unlock_bh(&pfe->hif.lock);
++	hif_lib_client_release_tx_buffers(client);
++
++err_tx:
++	hif_lib_client_release_rx_buffers(client);
++
++err_rx:
++	return err;
++}
++
++int hif_lib_client_unregister(struct hif_client_s *client)
++{
++	struct pfe *pfe = client->pfe;
++	u32 client_id = client->id;
++
++	pr_info(
++		"%s : client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d\n"
++		, __func__, client, client->id, client->tx_qsize,
++		client->rx_qsize);
++
++	spin_lock_bh(&pfe->hif.lock);
++	hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
++
++	hif_lib_client_release_tx_buffers(client);
++	hif_lib_client_release_rx_buffers(client);
++	pfe->hif_client[client_id] = NULL;
++	spin_unlock_bh(&pfe->hif.lock);
++
++	return 0;
++}
++
++int hif_lib_event_handler_start(struct hif_client_s *client, int event,
++				int qno)
++{
++	struct hif_client_rx_queue *queue = &client->rx_q[qno];
++	struct rx_queue_desc *desc = queue->base + queue->read_idx;
++
++	if ((event >= HIF_EVENT_MAX) || (qno >= HIF_CLIENT_QUEUES_MAX)) {
++		pr_debug("%s: Unsupported event : %d  queue number : %d\n",
++			 __func__, event, qno);
++		return -1;
++	}
++
++	test_and_clear_bit(qno, &client->queue_mask[event]);
++
++	switch (event) {
++	case EVENT_RX_PKT_IND:
++		if (!(desc->ctrl & CL_DESC_OWN))
++			hif_lib_indicate_client(client->id,
++						EVENT_RX_PKT_IND, qno);
++		break;
++
++	case EVENT_HIGH_RX_WM:
++	case EVENT_TXDONE_IND:
++	default:
++		break;
++	}
++
++	return 0;
++}
++
++/*
++ * This function gets one packet from the specified client queue
++ * It also refill the rx buffer
++ */
++void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
++				*ofst, unsigned int *rx_ctrl,
++				unsigned int *desc_ctrl, void **priv_data)
++{
++	struct hif_client_rx_queue *queue = &client->rx_q[qno];
++	struct rx_queue_desc *desc;
++	void *pkt = NULL;
++
++	/*
++	 * Following lock is to protect rx queue access from,
++	 * hif_lib_event_handler_start.
++	 * In general below lock is not required, because hif_lib_xmit_pkt and
++	 * hif_lib_event_handler_start are called from napi poll and which is
++	 * not re-entrant. But if some client use in different way this lock is
++	 * required.
++	 */
++	/*spin_lock_irqsave(&client->rx_lock, flags); */
++	desc = queue->base + queue->read_idx;
++	if (!(desc->ctrl & CL_DESC_OWN)) {
++		pkt = desc->data - pfe_pkt_headroom;
++
++		*rx_ctrl = desc->client_ctrl;
++		*desc_ctrl = desc->ctrl;
++
++		if (desc->ctrl & CL_DESC_FIRST) {
++			u16 size = *rx_ctrl >> HIF_CTRL_RX_OFFSET_OFST;
++
++			if (size) {
++				*len = CL_DESC_BUF_LEN(desc->ctrl) -
++						PFE_PKT_HEADER_SZ - size;
++				*ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ
++								+ size;
++				*priv_data = desc->data + PFE_PKT_HEADER_SZ;
++			} else {
++				*len = CL_DESC_BUF_LEN(desc->ctrl) -
++						PFE_PKT_HEADER_SZ;
++				*ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ;
++				*priv_data = NULL;
++			}
++
++		} else {
++			*len = CL_DESC_BUF_LEN(desc->ctrl);
++			*ofst = pfe_pkt_headroom;
++		}
++
++		/*
++		 * Needed so we don't free a buffer/page
++		 * twice on module_exit
++		 */
++		desc->data = NULL;
++
++		/*
++		 * Ensure everything else is written to DDR before
++		 * writing bd->ctrl
++		 */
++		smp_wmb();
++
++		desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN;
++		queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
++	}
++
++	/*spin_unlock_irqrestore(&client->rx_lock, flags); */
++	return pkt;
++}
++
++static inline void hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
++					client_id, unsigned int qno,
++					u32 client_ctrl)
++{
++	/* Optimize the write since the destinaton may be non-cacheable */
++	if (!((unsigned long)pkt_hdr & 0x3)) {
++		((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
++					client_id;
++	} else {
++		((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
++		((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
++	}
++}
++
++/*This function puts the given packet in the specific client queue */
++void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
++				*data, unsigned int len, u32 client_ctrl,
++				unsigned int flags, void *client_data)
++{
++	struct hif_client_tx_queue *queue = &client->tx_q[qno];
++	struct tx_queue_desc *desc = queue->base + queue->write_idx;
++
++	/* First buffer */
++	if (flags & HIF_FIRST_BUFFER) {
++		data -= sizeof(struct hif_hdr);
++		len += sizeof(struct hif_hdr);
++
++		hif_hdr_write(data, client->id, qno, client_ctrl);
++	}
++
++	desc->data = client_data;
++	desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
++
++	__hif_xmit_pkt(&pfe->hif, client->id, qno, data, len, flags);
++
++	queue->write_idx = (queue->write_idx + 1) & (queue->size - 1);
++	queue->tx_pending++;
++	queue->jiffies_last_packet = jiffies;
++}
++
++void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
++				   unsigned int *flags, int count)
++{
++	struct hif_client_tx_queue *queue = &client->tx_q[qno];
++	struct tx_queue_desc *desc = queue->base + queue->read_idx;
++
++	pr_debug("%s: qno : %d rd_indx: %d pending:%d\n", __func__, qno,
++		 queue->read_idx, queue->tx_pending);
++
++	if (!queue->tx_pending)
++		return NULL;
++
++	if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
++		u32 tmu_tx_pkts = be32_to_cpu(pe_dmem_read(TMU0_ID +
++			client->id, TMU_DM_TX_TRANS, 4));
++
++		if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
++			queue->done_tmu_tx_pkts = UINT_MAX -
++				queue->prev_tmu_tx_pkts + tmu_tx_pkts;
++		else
++			queue->done_tmu_tx_pkts = tmu_tx_pkts -
++						queue->prev_tmu_tx_pkts;
++
++		queue->prev_tmu_tx_pkts  = tmu_tx_pkts;
++
++		if (!queue->done_tmu_tx_pkts)
++			return NULL;
++	}
++
++	if (desc->ctrl & CL_DESC_OWN)
++		return NULL;
++
++	queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
++	queue->tx_pending--;
++
++	*flags = CL_DESC_GET_FLAGS(desc->ctrl);
++
++	if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
++		queue->done_tmu_tx_pkts--;
++
++	return desc->data;
++}
++
++static void hif_lib_tmu_credit_init(struct pfe *pfe)
++{
++	int i, q;
++
++	for (i = 0; i < NUM_GEMAC_SUPPORT; i++)
++		for (q = 0; q < emac_txq_cnt; q++) {
++			pfe->tmu_credit.tx_credit_max[i][q] = (q == 0) ?
++					DEFAULT_Q0_QDEPTH : DEFAULT_MAX_QDEPTH;
++			pfe->tmu_credit.tx_credit[i][q] =
++					pfe->tmu_credit.tx_credit_max[i][q];
++		}
++}
++
++int pfe_hif_lib_init(struct pfe *pfe)
++{
++	int rc;
++
++	pr_info("%s\n", __func__);
++
++	if (lro_mode) {
++		page_mode = 1;
++		pfe_pkt_size = min(PAGE_SIZE, MAX_PFE_PKT_SIZE);
++		pfe_pkt_headroom = 0;
++	} else {
++		page_mode = 0;
++		pfe_pkt_size = PFE_PKT_SIZE;
++		pfe_pkt_headroom = PFE_PKT_HEADROOM;
++	}
++
++	if (tx_qos)
++		emac_txq_cnt = EMAC_TXQ_CNT / 2;
++	else
++		emac_txq_cnt = EMAC_TXQ_CNT;
++
++	hif_lib_tmu_credit_init(pfe);
++	pfe->hif.shm = &ghif_shm;
++	rc = pfe_hif_shm_init(pfe->hif.shm);
++
++	return rc;
++}
++
++void pfe_hif_lib_exit(struct pfe *pfe)
++{
++	pr_info("%s\n", __func__);
++
++	pfe_hif_shm_clean(pfe->hif.shm);
++}
+diff --git a/drivers/staging/fsl_ppfe/pfe_hif_lib.h b/drivers/staging/fsl_ppfe/pfe_hif_lib.h
+new file mode 100644
+index 00000000..49e7b5f1
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.h
+@@ -0,0 +1,239 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _PFE_HIF_LIB_H_
++#define _PFE_HIF_LIB_H_
++
++#include "pfe_hif.h"
++
++#define HIF_CL_REQ_TIMEOUT	10
++#define GFP_DMA_PFE 0
++
++enum {
++	REQUEST_CL_REGISTER = 0,
++	REQUEST_CL_UNREGISTER,
++	HIF_REQUEST_MAX
++};
++
++enum {
++	/* Event to indicate that client rx queue is reached water mark level */
++	EVENT_HIGH_RX_WM = 0,
++	/* Event to indicate that, packet received for client */
++	EVENT_RX_PKT_IND,
++	/* Event to indicate that, packet tx done for client */
++	EVENT_TXDONE_IND,
++	HIF_EVENT_MAX
++};
++
++/*structure to store client queue info */
++
++/*structure to store client queue info */
++struct hif_client_rx_queue {
++	struct rx_queue_desc *base;
++	u32	size;
++	u32	read_idx;
++	u32	write_idx;
++};
++
++struct hif_client_tx_queue {
++	struct tx_queue_desc *base;
++	u32	size;
++	u32	read_idx;
++	u32	write_idx;
++	u32	tx_pending;
++	unsigned long jiffies_last_packet;
++	u32	nocpy_flag;
++	u32	prev_tmu_tx_pkts;
++	u32	done_tmu_tx_pkts;
++};
++
++struct hif_client_s {
++	int	id;
++	int	tx_qn;
++	int	rx_qn;
++	void	*rx_qbase;
++	void	*tx_qbase;
++	int	tx_qsize;
++	int	rx_qsize;
++	int	cpu_id;
++	struct hif_client_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
++	struct hif_client_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
++	int (*event_handler)(void *priv, int event, int data);
++	unsigned long queue_mask[HIF_EVENT_MAX];
++	struct pfe *pfe;
++	void *priv;
++};
++
++/*
++ * Client specific shared memory
++ * It contains number of Rx/Tx queues, base addresses and queue sizes
++ */
++struct hif_client_shm {
++	u32 ctrl; /*0-7: number of Rx queues, 8-15: number of tx queues */
++	unsigned long rx_qbase; /*Rx queue base address */
++	u32 rx_qsize; /*each Rx queue size, all Rx queues are of same size */
++	unsigned long tx_qbase; /* Tx queue base address */
++	u32 tx_qsize; /*each Tx queue size, all Tx queues are of same size */
++};
++
++/*Client shared memory ctrl bit description */
++#define CLIENT_CTRL_RX_Q_CNT_OFST	0
++#define CLIENT_CTRL_TX_Q_CNT_OFST	8
++#define CLIENT_CTRL_RX_Q_CNT(ctrl)	(((ctrl) >> CLIENT_CTRL_RX_Q_CNT_OFST) \
++						& 0xFF)
++#define CLIENT_CTRL_TX_Q_CNT(ctrl)	(((ctrl) >> CLIENT_CTRL_TX_Q_CNT_OFST) \
++						& 0xFF)
++
++/*
++ * Shared memory used to communicate between HIF driver and host/client drivers
++ * Before starting the hif driver rx_buf_pool ans rx_buf_pool_cnt should be
++ * initialized with host buffers and buffers count in the pool.
++ * rx_buf_pool_cnt should be >= HIF_RX_DESC_NT.
++ *
++ */
++struct hif_shm {
++	u32 rx_buf_pool_cnt; /*Number of rx buffers available*/
++	/*Rx buffers required to initialize HIF rx descriptors */
++	void *rx_buf_pool[HIF_RX_DESC_NT];
++	unsigned long g_client_status[2]; /*Global client status bit mask */
++	/* Client specific shared memory */
++	struct hif_client_shm client[HIF_CLIENTS_MAX];
++};
++
++#define CL_DESC_OWN	BIT(31)
++/* This sets owner ship to HIF driver */
++#define CL_DESC_LAST	BIT(30)
++/* This indicates last packet for multi buffers handling */
++#define CL_DESC_FIRST	BIT(29)
++/* This indicates first packet for multi buffers handling */
++
++#define CL_DESC_BUF_LEN(x)		((x) & 0xFFFF)
++#define CL_DESC_FLAGS(x)		(((x) & 0xF) << 16)
++#define CL_DESC_GET_FLAGS(x)		(((x) >> 16) & 0xF)
++
++struct rx_queue_desc {
++	void *data;
++	u32	ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
++	u32	client_ctrl;
++};
++
++struct tx_queue_desc {
++	void *data;
++	u32	ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
++};
++
++/* HIF Rx is not working properly for 2-byte aligned buffers and
++ * ip_header should be 4byte aligned for better iperformance.
++ * "ip_header = 64 + 6(hif_header) + 14 (MAC Header)" will be 4byte aligned.
++ */
++#define PFE_PKT_HEADER_SZ	sizeof(struct hif_hdr)
++/* must be big enough for headroom, pkt size and skb shared info */
++#define PFE_BUF_SIZE		2048
++#define PFE_PKT_HEADROOM	128
++
++#define SKB_SHARED_INFO_SIZE   (sizeof(struct skb_shared_info))
++#define PFE_PKT_SIZE		(PFE_BUF_SIZE - PFE_PKT_HEADROOM \
++				 - SKB_SHARED_INFO_SIZE)
++#define MAX_L2_HDR_SIZE		14	/* Not correct for VLAN/PPPoE */
++#define MAX_L3_HDR_SIZE		20	/* Not correct for IPv6 */
++#define MAX_L4_HDR_SIZE		60	/* TCP with maximum options */
++#define MAX_HDR_SIZE		(MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE \
++				 + MAX_L4_HDR_SIZE)
++/* Used in page mode to clamp packet size to the maximum supported by the hif
++ *hw interface (<16KiB)
++ */
++#define MAX_PFE_PKT_SIZE	16380UL
++
++extern unsigned int pfe_pkt_size;
++extern unsigned int pfe_pkt_headroom;
++extern unsigned int page_mode;
++extern unsigned int lro_mode;
++extern unsigned int tx_qos;
++extern unsigned int emac_txq_cnt;
++
++int pfe_hif_lib_init(struct pfe *pfe);
++void pfe_hif_lib_exit(struct pfe *pfe);
++int hif_lib_client_register(struct hif_client_s *client);
++int hif_lib_client_unregister(struct  hif_client_s *client);
++void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
++				*data, unsigned int len, u32 client_ctrl,
++				unsigned int flags, void *client_data);
++int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data,
++		     unsigned int len, u32 client_ctrl, void *client_data);
++void hif_lib_indicate_client(int cl_id, int event, int data);
++int hif_lib_event_handler_start(struct hif_client_s *client, int event, int
++					data);
++int hif_lib_tmu_queue_start(struct hif_client_s *client, int qno);
++int hif_lib_tmu_queue_stop(struct hif_client_s *client, int qno);
++void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
++				   unsigned int *flags, int count);
++void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
++				*ofst, unsigned int *rx_ctrl,
++				unsigned int *desc_ctrl, void **priv_data);
++void hif_lib_set_rx_cpu_affinity(struct hif_client_s *client, int cpu_id);
++void hif_lib_set_tx_queue_nocpy(struct hif_client_s *client, int qno, int
++					enable);
++static inline int hif_lib_tx_avail(struct hif_client_s *client, unsigned int
++					qno)
++{
++	struct hif_client_tx_queue *queue = &client->tx_q[qno];
++
++	return (queue->size - queue->tx_pending);
++}
++
++static inline int hif_lib_get_tx_wr_index(struct hif_client_s *client, unsigned
++						int qno)
++{
++	struct hif_client_tx_queue *queue = &client->tx_q[qno];
++
++	return queue->write_idx;
++}
++
++static inline int hif_lib_tx_pending(struct hif_client_s *client, unsigned int
++					qno)
++{
++	struct hif_client_tx_queue *queue = &client->tx_q[qno];
++
++	return queue->tx_pending;
++}
++
++#define hif_lib_tx_credit_avail(pfe, id, qno) \
++				((pfe)->tmu_credit.tx_credit[id][qno])
++
++#define hif_lib_tx_credit_max(pfe, id, qno) \
++				((pfe)->tmu_credit.tx_credit_max[id][qno])
++
++/*
++ * Test comment
++ */
++#define hif_lib_tx_credit_use(pfe, id, qno, credit)			\
++	({ typeof(pfe) pfe_ = pfe;					\
++		typeof(id) id_ = id;					\
++		typeof(qno) qno_ = qno_;				\
++		typeof(credit) credit_ = credit;			\
++		do {							\
++			if (tx_qos) {					\
++				(pfe_)->tmu_credit.tx_credit[id_][qno_]\
++					 -= credit_;			\
++				(pfe_)->tmu_credit.tx_packets[id_][qno_]\
++					+= credit_;			\
++			}						\
++		} while (0);						\
++	})
++
++#endif /* _PFE_HIF_LIB_H_ */
+diff --git a/drivers/staging/fsl_ppfe/pfe_hw.c b/drivers/staging/fsl_ppfe/pfe_hw.c
+new file mode 100644
+index 00000000..16ea2c65
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_hw.c
+@@ -0,0 +1,176 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include "pfe_mod.h"
++#include "pfe_hw.h"
++
++/* Functions to handle most of pfe hw register initialization */
++int pfe_hw_init(struct pfe *pfe, int resume)
++{
++	struct class_cfg class_cfg = {
++		.pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
++		.route_table_baseaddr = pfe->ddr_phys_baseaddr +
++					ROUTE_TABLE_BASEADDR,
++		.route_table_hash_bits = ROUTE_TABLE_HASH_BITS,
++	};
++
++	struct tmu_cfg tmu_cfg = {
++		.pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
++		.llm_base_addr = pfe->ddr_phys_baseaddr + TMU_LLM_BASEADDR,
++		.llm_queue_len = TMU_LLM_QUEUE_LEN,
++	};
++
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	struct util_cfg util_cfg = {
++		.pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
++	};
++#endif
++
++	struct BMU_CFG bmu1_cfg = {
++		.baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR +
++						BMU1_LMEM_BASEADDR),
++		.count = BMU1_BUF_COUNT,
++		.size = BMU1_BUF_SIZE,
++		.low_watermark = 10,
++		.high_watermark = 15,
++	};
++
++	struct BMU_CFG bmu2_cfg = {
++		.baseaddr = DDR_PHYS_TO_PFE(pfe->ddr_phys_baseaddr +
++						BMU2_DDR_BASEADDR),
++		.count = BMU2_BUF_COUNT,
++		.size = BMU2_BUF_SIZE,
++		.low_watermark = 250,
++		.high_watermark = 253,
++	};
++
++	struct gpi_cfg egpi1_cfg = {
++		.lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT,
++		.tmlf_txthres = EGPI1_TMLF_TXTHRES,
++		.aseq_len = EGPI1_ASEQ_LEN,
++		.mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC1_BASE_ADDR +
++						EMAC_TCNTRL_REG),
++	};
++
++	struct gpi_cfg egpi2_cfg = {
++		.lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT,
++		.tmlf_txthres = EGPI2_TMLF_TXTHRES,
++		.aseq_len = EGPI2_ASEQ_LEN,
++		.mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC2_BASE_ADDR +
++						EMAC_TCNTRL_REG),
++	};
++
++	struct gpi_cfg hgpi_cfg = {
++		.lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT,
++		.tmlf_txthres = HGPI_TMLF_TXTHRES,
++		.aseq_len = HGPI_ASEQ_LEN,
++		.mtip_pause_reg = 0,
++	};
++
++	pr_info("%s\n", __func__);
++
++#if !defined(LS1012A_PFE_RESET_WA)
++	/* LS1012A needs this to make PE work correctly */
++	writel(0x3,     CLASS_PE_SYS_CLK_RATIO);
++	writel(0x3,     TMU_PE_SYS_CLK_RATIO);
++	writel(0x3,     UTIL_PE_SYS_CLK_RATIO);
++	usleep_range(10, 20);
++#endif
++
++	pr_info("CLASS version: %x\n", readl(CLASS_VERSION));
++	pr_info("TMU version: %x\n", readl(TMU_VERSION));
++
++	pr_info("BMU1 version: %x\n", readl(BMU1_BASE_ADDR +
++		BMU_VERSION));
++	pr_info("BMU2 version: %x\n", readl(BMU2_BASE_ADDR +
++		BMU_VERSION));
++
++	pr_info("EGPI1 version: %x\n", readl(EGPI1_BASE_ADDR +
++		GPI_VERSION));
++	pr_info("EGPI2 version: %x\n", readl(EGPI2_BASE_ADDR +
++		GPI_VERSION));
++	pr_info("HGPI version: %x\n", readl(HGPI_BASE_ADDR +
++		GPI_VERSION));
++
++	pr_info("HIF version: %x\n", readl(HIF_VERSION));
++	pr_info("HIF NOPCY version: %x\n", readl(HIF_NOCPY_VERSION));
++
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	pr_info("UTIL version: %x\n", readl(UTIL_VERSION));
++#endif
++	while (!(readl(TMU_CTRL) & ECC_MEM_INIT_DONE))
++		;
++
++	hif_rx_disable();
++	hif_tx_disable();
++
++	bmu_init(BMU1_BASE_ADDR, &bmu1_cfg);
++
++	pr_info("bmu_init(1) done\n");
++
++	bmu_init(BMU2_BASE_ADDR, &bmu2_cfg);
++
++	pr_info("bmu_init(2) done\n");
++
++	class_cfg.resume = resume ? 1 : 0;
++
++	class_init(&class_cfg);
++
++	pr_info("class_init() done\n");
++
++	tmu_init(&tmu_cfg);
++
++	pr_info("tmu_init() done\n");
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	util_init(&util_cfg);
++
++	pr_info("util_init() done\n");
++#endif
++	gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg);
++
++	pr_info("gpi_init(1) done\n");
++
++	gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg);
++
++	pr_info("gpi_init(2) done\n");
++
++	gpi_init(HGPI_BASE_ADDR, &hgpi_cfg);
++
++	pr_info("gpi_init(hif) done\n");
++
++	bmu_enable(BMU1_BASE_ADDR);
++
++	pr_info("bmu_enable(1) done\n");
++
++	bmu_enable(BMU2_BASE_ADDR);
++
++	pr_info("bmu_enable(2) done\n");
++
++	return 0;
++}
++
++void pfe_hw_exit(struct pfe *pfe)
++{
++	pr_info("%s\n", __func__);
++
++	bmu_disable(BMU1_BASE_ADDR);
++	bmu_reset(BMU1_BASE_ADDR);
++
++	bmu_disable(BMU2_BASE_ADDR);
++	bmu_reset(BMU2_BASE_ADDR);
++}
+diff --git a/drivers/staging/fsl_ppfe/pfe_hw.h b/drivers/staging/fsl_ppfe/pfe_hw.h
+new file mode 100644
+index 00000000..53b5fe14
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_hw.h
+@@ -0,0 +1,27 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _PFE_HW_H_
++#define _PFE_HW_H_
++
++#define PE_SYS_CLK_RATIO	1	/* SYS/AXI = 250MHz, HFE = 500MHz */
++
++int pfe_hw_init(struct pfe *pfe, int resume);
++void pfe_hw_exit(struct pfe *pfe);
++
++#endif /* _PFE_HW_H_ */
+diff --git a/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
+new file mode 100644
+index 00000000..c579eb58
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
+@@ -0,0 +1,394 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/of_net.h>
++#include <linux/of_address.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/clk.h>
++#include <linux/mfd/syscon.h>
++#include <linux/regmap.h>
++
++#include "pfe_mod.h"
++
++struct ls1012a_pfe_platform_data pfe_platform_data;
++
++static int pfe_get_gemac_if_proprties(struct device_node *parent, int port, int
++					if_cnt,
++					struct ls1012a_pfe_platform_data
++					*pdata)
++{
++	struct device_node *gem = NULL, *phy = NULL;
++	int size;
++	int ii = 0, phy_id = 0;
++	const u32 *addr;
++	const void *mac_addr;
++
++	for (ii = 0; ii < if_cnt; ii++) {
++		gem = of_get_next_child(parent, gem);
++		if (!gem)
++			goto err;
++		addr = of_get_property(gem, "reg", &size);
++		if (addr && (be32_to_cpup(addr) == port))
++			break;
++	}
++
++	if (ii >= if_cnt) {
++		pr_err("%s:%d Failed to find interface = %d\n",
++		       __func__, __LINE__, if_cnt);
++		goto err;
++	}
++
++	pdata->ls1012a_eth_pdata[port].gem_id = port;
++
++	mac_addr = of_get_mac_address(gem);
++
++	if (mac_addr) {
++		memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
++		       ETH_ALEN);
++	}
++
++	pdata->ls1012a_eth_pdata[port].mii_config = of_get_phy_mode(gem);
++
++	if ((pdata->ls1012a_eth_pdata[port].mii_config) < 0)
++		pr_err("%s:%d Incorrect Phy mode....\n", __func__,
++		       __LINE__);
++
++	addr = of_get_property(gem, "fsl,gemac-bus-id", &size);
++	if (!addr)
++		pr_err("%s:%d Invalid gemac-bus-id....\n", __func__,
++		       __LINE__);
++	else
++		pdata->ls1012a_eth_pdata[port].bus_id = be32_to_cpup(addr);
++
++	addr = of_get_property(gem, "fsl,gemac-phy-id", &size);
++	if (!addr) {
++		pr_err("%s:%d Invalid gemac-phy-id....\n", __func__,
++		       __LINE__);
++	} else {
++		phy_id = be32_to_cpup(addr);
++		pdata->ls1012a_eth_pdata[port].phy_id = phy_id;
++		pdata->ls1012a_mdio_pdata[0].phy_mask &= ~(1 << phy_id);
++	}
++
++	addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
++	if (!addr)
++		pr_err("%s: Invalid mdio-mux-val....\n", __func__);
++	else
++		phy_id = be32_to_cpup(addr);
++		pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
++
++	if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
++		pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
++			 pdata->ls1012a_eth_pdata[port].mdio_muxval;
++
++	addr = of_get_property(gem, "fsl,pfe-phy-if-flags", &size);
++	if (!addr)
++		pr_err("%s:%d Invalid pfe-phy-if-flags....\n",
++		       __func__, __LINE__);
++	else
++		pdata->ls1012a_eth_pdata[port].phy_flags = be32_to_cpup(addr);
++
++	/* If PHY is enabled, read mdio properties */
++	if (pdata->ls1012a_eth_pdata[port].phy_flags & GEMAC_NO_PHY)
++		goto done;
++
++	phy = of_get_next_child(gem, NULL);
++
++	addr = of_get_property(phy, "reg", &size);
++
++	if (!addr)
++		pr_err("%s:%d Invalid phy enable flag....\n",
++		       __func__, __LINE__);
++	else
++		pdata->ls1012a_mdio_pdata[port].enabled = be32_to_cpup(addr);
++
++	pdata->ls1012a_mdio_pdata[port].irq[0] = PHY_POLL;
++
++done:
++
++	return 0;
++
++err:
++	return -1;
++}
++
++/*
++ *
++ * pfe_platform_probe -
++ *
++ *
++ */
++static int pfe_platform_probe(struct platform_device *pdev)
++{
++	struct resource res;
++	int ii, rc, interface_count = 0, size = 0;
++	const u32 *prop;
++	struct device_node  *np;
++	struct clk *pfe_clk;
++
++	np = pdev->dev.of_node;
++
++	if (!np) {
++		pr_err("Invalid device node\n");
++		return -EINVAL;
++	}
++
++	pfe = kzalloc(sizeof(*pfe), GFP_KERNEL);
++	if (!pfe) {
++		rc = -ENOMEM;
++		goto err_alloc;
++	}
++
++	platform_set_drvdata(pdev, pfe);
++
++	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
++
++	if (of_address_to_resource(np, 1, &res)) {
++		rc = -ENOMEM;
++		pr_err("failed to get ddr resource\n");
++		goto err_ddr;
++	}
++
++	pfe->ddr_phys_baseaddr = res.start;
++	pfe->ddr_size = resource_size(&res);
++
++	pfe->ddr_baseaddr = phys_to_virt(res.start);
++	if (!pfe->ddr_baseaddr) {
++		pr_err("ioremap() ddr failed\n");
++		rc = -ENOMEM;
++		goto err_ddr;
++	}
++
++	pfe->scfg =
++		syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
++						"fsl,pfe-scfg");
++	if (IS_ERR(pfe->scfg)) {
++		dev_err(&pdev->dev, "No syscfg phandle specified\n");
++		return PTR_ERR(pfe->scfg);
++	}
++
++	pfe->cbus_baseaddr = of_iomap(np, 0);
++	if (!pfe->cbus_baseaddr) {
++		rc = -ENOMEM;
++		pr_err("failed to get axi resource\n");
++		goto err_axi;
++	}
++
++	pfe->hif_irq = platform_get_irq(pdev, 0);
++	if (pfe->hif_irq < 0) {
++		pr_err("platform_get_irq for hif failed\n");
++		rc = pfe->hif_irq;
++		goto err_hif_irq;
++	}
++
++	pfe->wol_irq = platform_get_irq(pdev, 2);
++	if (pfe->wol_irq < 0) {
++		pr_err("platform_get_irq for WoL failed\n");
++		rc = pfe->wol_irq;
++		goto err_hif_irq;
++	}
++
++	/* Read interface count */
++	prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
++	if (!prop) {
++		pr_err("Failed to read number of interfaces\n");
++		rc = -ENXIO;
++		goto err_prop;
++	}
++
++	interface_count = be32_to_cpup(prop);
++	if (interface_count <= 0) {
++		pr_err("No ethernet interface count : %d\n",
++		       interface_count);
++		rc = -ENXIO;
++		goto err_prop;
++	}
++
++	pfe_platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
++
++	for (ii = 0; ii < interface_count; ii++) {
++		pfe_get_gemac_if_proprties(np, ii, interface_count,
++					   &pfe_platform_data);
++	}
++
++	pfe->dev = &pdev->dev;
++
++	pfe->dev->platform_data = &pfe_platform_data;
++
++	/* declare WoL capabilities */
++	device_init_wakeup(&pdev->dev, true);
++
++	/* find the clocks */
++	pfe_clk = devm_clk_get(pfe->dev, "pfe");
++	if (IS_ERR(pfe_clk))
++		return PTR_ERR(pfe_clk);
++
++	/* PFE clock is (platform clock / 2) */
++	/* save sys_clk value as KHz */
++	pfe->ctrl.sys_clk = clk_get_rate(pfe_clk) / (2 * 1000);
++
++	rc = pfe_probe(pfe);
++	if (rc < 0)
++		goto err_probe;
++
++	return 0;
++
++err_probe:
++err_prop:
++err_hif_irq:
++	iounmap(pfe->cbus_baseaddr);
++
++err_axi:
++	iounmap(pfe->ddr_baseaddr);
++
++err_ddr:
++	platform_set_drvdata(pdev, NULL);
++
++	kfree(pfe);
++
++err_alloc:
++	return rc;
++}
++
++/*
++ * pfe_platform_remove -
++ */
++static int pfe_platform_remove(struct platform_device *pdev)
++{
++	struct pfe *pfe = platform_get_drvdata(pdev);
++	int rc;
++
++	pr_info("%s\n", __func__);
++
++	rc = pfe_remove(pfe);
++
++	iounmap(pfe->cbus_baseaddr);
++	iounmap(pfe->ddr_baseaddr);
++
++	platform_set_drvdata(pdev, NULL);
++
++	kfree(pfe);
++
++	return rc;
++}
++
++#ifdef CONFIG_PM
++#ifdef CONFIG_PM_SLEEP
++int pfe_platform_suspend(struct device *dev)
++{
++	struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
++	struct net_device *netdev;
++	int i;
++
++	pfe->wake = 0;
++
++	for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
++		netdev = pfe->eth.eth_priv[i]->ndev;
++
++		netif_device_detach(netdev);
++
++		if (netif_running(netdev))
++			if (pfe_eth_suspend(netdev))
++				pfe->wake = 1;
++	}
++
++	/* Shutdown PFE only if we're not waking up the system */
++	if (!pfe->wake) {
++#if defined(LS1012A_PFE_RESET_WA)
++		pfe_hif_rx_idle(&pfe->hif);
++#endif
++		pfe_ctrl_suspend(&pfe->ctrl);
++		pfe_firmware_exit(pfe);
++
++		pfe_hif_exit(pfe);
++		pfe_hif_lib_exit(pfe);
++
++		pfe_hw_exit(pfe);
++	}
++
++	return 0;
++}
++
++static int pfe_platform_resume(struct device *dev)
++{
++	struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
++	struct net_device *netdev;
++	int i;
++
++	if (!pfe->wake) {
++		pfe_hw_init(pfe, 1);
++		pfe_hif_lib_init(pfe);
++		pfe_hif_init(pfe);
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++		util_enable();
++#endif
++		tmu_enable(0xf);
++		class_enable();
++		pfe_ctrl_resume(&pfe->ctrl);
++	}
++
++	for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
++		netdev = pfe->eth.eth_priv[i]->ndev;
++
++		if (pfe->eth.eth_priv[i]->mii_bus)
++			pfe_eth_mdio_reset(pfe->eth.eth_priv[i]->mii_bus);
++
++		if (netif_running(netdev))
++			pfe_eth_resume(netdev);
++
++		netif_device_attach(netdev);
++	}
++	return 0;
++}
++#else
++#define pfe_platform_suspend NULL
++#define pfe_platform_resume NULL
++#endif
++
++static const struct dev_pm_ops pfe_platform_pm_ops = {
++	SET_SYSTEM_SLEEP_PM_OPS(pfe_platform_suspend, pfe_platform_resume)
++};
++#endif
++
++static const struct of_device_id pfe_match[] = {
++	{
++		.compatible = "fsl,pfe",
++	},
++	{},
++};
++MODULE_DEVICE_TABLE(of, pfe_match);
++
++static struct platform_driver pfe_platform_driver = {
++	.probe = pfe_platform_probe,
++	.remove = pfe_platform_remove,
++	.driver = {
++		.name = "pfe",
++		.of_match_table = pfe_match,
++#ifdef CONFIG_PM
++		.pm = &pfe_platform_pm_ops,
++#endif
++	},
++};
++
++module_platform_driver(pfe_platform_driver);
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("PFE Ethernet driver");
++MODULE_AUTHOR("NXP DNCPE");
+diff --git a/drivers/staging/fsl_ppfe/pfe_mod.c b/drivers/staging/fsl_ppfe/pfe_mod.c
+new file mode 100644
+index 00000000..d5ba56a3
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_mod.c
+@@ -0,0 +1,141 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/dma-mapping.h>
++#include "pfe_mod.h"
++
++struct pfe *pfe;
++
++/*
++ * pfe_probe -
++ */
++int pfe_probe(struct pfe *pfe)
++{
++	int rc;
++
++	if (pfe->ddr_size < DDR_MAX_SIZE) {
++		pr_err("%s: required DDR memory (%x) above platform ddr memory (%x)\n",
++		       __func__, (unsigned int)DDR_MAX_SIZE, pfe->ddr_size);
++		rc = -ENOMEM;
++		goto err_hw;
++	}
++
++	if (((int)(pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR) &
++			(8 * SZ_1M - 1)) != 0) {
++		pr_err("%s: BMU2 base address (0x%x) must be aligned on 8MB boundary\n",
++		       __func__, (int)pfe->ddr_phys_baseaddr +
++			BMU2_DDR_BASEADDR);
++		rc = -ENOMEM;
++		goto err_hw;
++	}
++
++	pr_info("cbus_baseaddr: %lx, ddr_baseaddr: %lx, ddr_phys_baseaddr: %lx, ddr_size: %x\n",
++		(unsigned long)pfe->cbus_baseaddr,
++		(unsigned long)pfe->ddr_baseaddr,
++		pfe->ddr_phys_baseaddr, pfe->ddr_size);
++
++	pfe_lib_init(pfe->cbus_baseaddr, pfe->ddr_baseaddr,
++		     pfe->ddr_phys_baseaddr, pfe->ddr_size);
++
++	rc = pfe_hw_init(pfe, 0);
++	if (rc < 0)
++		goto err_hw;
++
++	rc = pfe_hif_lib_init(pfe);
++	if (rc < 0)
++		goto err_hif_lib;
++
++	rc = pfe_hif_init(pfe);
++	if (rc < 0)
++		goto err_hif;
++
++	rc = pfe_firmware_init(pfe);
++	if (rc < 0)
++		goto err_firmware;
++
++	rc = pfe_ctrl_init(pfe);
++	if (rc < 0)
++		goto err_ctrl;
++
++	rc = pfe_eth_init(pfe);
++	if (rc < 0)
++		goto err_eth;
++
++	rc = pfe_sysfs_init(pfe);
++	if (rc < 0)
++		goto err_sysfs;
++
++	rc = pfe_debugfs_init(pfe);
++	if (rc < 0)
++		goto err_debugfs;
++
++	return 0;
++
++err_debugfs:
++	pfe_sysfs_exit(pfe);
++
++err_sysfs:
++	pfe_eth_exit(pfe);
++
++err_eth:
++	pfe_ctrl_exit(pfe);
++
++err_ctrl:
++	pfe_firmware_exit(pfe);
++
++err_firmware:
++	pfe_hif_exit(pfe);
++
++err_hif:
++	pfe_hif_lib_exit(pfe);
++
++err_hif_lib:
++	pfe_hw_exit(pfe);
++
++err_hw:
++	return rc;
++}
++
++/*
++ * pfe_remove -
++ */
++int pfe_remove(struct pfe *pfe)
++{
++	pr_info("%s\n", __func__);
++
++	pfe_debugfs_exit(pfe);
++
++	pfe_sysfs_exit(pfe);
++
++	pfe_eth_exit(pfe);
++
++	pfe_ctrl_exit(pfe);
++
++#if defined(LS1012A_PFE_RESET_WA)
++	pfe_hif_rx_idle(&pfe->hif);
++#endif
++	pfe_firmware_exit(pfe);
++
++	pfe_hif_exit(pfe);
++
++	pfe_hif_lib_exit(pfe);
++
++	pfe_hw_exit(pfe);
++
++	return 0;
++}
+diff --git a/drivers/staging/fsl_ppfe/pfe_mod.h b/drivers/staging/fsl_ppfe/pfe_mod.h
+new file mode 100644
+index 00000000..3012f17f
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_mod.h
+@@ -0,0 +1,112 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _PFE_MOD_H_
++#define _PFE_MOD_H_
++
++#include <linux/device.h>
++#include <linux/elf.h>
++
++struct pfe;
++
++#include "pfe_hw.h"
++#include "pfe_firmware.h"
++#include "pfe_ctrl.h"
++#include "pfe_hif.h"
++#include "pfe_hif_lib.h"
++#include "pfe_eth.h"
++#include "pfe_sysfs.h"
++#include "pfe_perfmon.h"
++#include "pfe_debugfs.h"
++
++#define PHYID_MAX_VAL 32
++
++struct pfe_tmu_credit {
++	/* Number of allowed TX packet in-flight, matches TMU queue size */
++	unsigned int tx_credit[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
++	unsigned int tx_credit_max[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
++	unsigned int tx_packets[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
++};
++
++struct pfe {
++	struct regmap	*scfg;
++	unsigned long ddr_phys_baseaddr;
++	void *ddr_baseaddr;
++	unsigned int ddr_size;
++	void *cbus_baseaddr;
++	void *apb_baseaddr;
++	unsigned long iram_phys_baseaddr;
++	void *iram_baseaddr;
++	unsigned long ipsec_phys_baseaddr;
++	void *ipsec_baseaddr;
++	int hif_irq;
++	int wol_irq;
++	int hif_client_irq;
++	struct device *dev;
++	struct dentry *dentry;
++	struct pfe_ctrl ctrl;
++	struct pfe_hif hif;
++	struct pfe_eth eth;
++	struct hif_client_s *hif_client[HIF_CLIENTS_MAX];
++#if defined(CFG_DIAGS)
++	struct pfe_diags diags;
++#endif
++	struct pfe_tmu_credit tmu_credit;
++	struct pfe_cpumon cpumon;
++	struct pfe_memmon memmon;
++	int wake;
++	int mdio_muxval[PHYID_MAX_VAL];
++	struct clk *hfe_clock;
++};
++
++extern struct pfe *pfe;
++
++int pfe_probe(struct pfe *pfe);
++int pfe_remove(struct pfe *pfe);
++
++/* DDR Mapping in reserved memory*/
++#define ROUTE_TABLE_BASEADDR	0
++#define ROUTE_TABLE_HASH_BITS	15	/* 32K entries */
++#define ROUTE_TABLE_SIZE	((1 << ROUTE_TABLE_HASH_BITS) \
++				  * CLASS_ROUTE_SIZE)
++#define BMU2_DDR_BASEADDR	(ROUTE_TABLE_BASEADDR + ROUTE_TABLE_SIZE)
++#define BMU2_BUF_COUNT		(4096 - 256)
++/* This is to get a total DDR size of 12MiB */
++#define BMU2_DDR_SIZE		(DDR_BUF_SIZE * BMU2_BUF_COUNT)
++#define UTIL_CODE_BASEADDR	(BMU2_DDR_BASEADDR + BMU2_DDR_SIZE)
++#define UTIL_CODE_SIZE		(128 * SZ_1K)
++#define UTIL_DDR_DATA_BASEADDR	(UTIL_CODE_BASEADDR + UTIL_CODE_SIZE)
++#define UTIL_DDR_DATA_SIZE	(64 * SZ_1K)
++#define CLASS_DDR_DATA_BASEADDR	(UTIL_DDR_DATA_BASEADDR + UTIL_DDR_DATA_SIZE)
++#define CLASS_DDR_DATA_SIZE	(32 * SZ_1K)
++#define TMU_DDR_DATA_BASEADDR	(CLASS_DDR_DATA_BASEADDR + CLASS_DDR_DATA_SIZE)
++#define TMU_DDR_DATA_SIZE	(32 * SZ_1K)
++#define TMU_LLM_BASEADDR	(TMU_DDR_DATA_BASEADDR + TMU_DDR_DATA_SIZE)
++#define TMU_LLM_QUEUE_LEN	(8 * 512)
++/* Must be power of two and at least 16 * 8 = 128 bytes */
++#define TMU_LLM_SIZE		(4 * 16 * TMU_LLM_QUEUE_LEN)
++/* (4 TMU's x 16 queues x queue_len) */
++
++#define DDR_MAX_SIZE		(TMU_LLM_BASEADDR + TMU_LLM_SIZE)
++
++/* LMEM Mapping */
++#define BMU1_LMEM_BASEADDR	0
++#define BMU1_BUF_COUNT		256
++#define BMU1_LMEM_SIZE		(LMEM_BUF_SIZE * BMU1_BUF_COUNT)
++
++#endif /* _PFE_MOD_H */
+diff --git a/drivers/staging/fsl_ppfe/pfe_perfmon.h b/drivers/staging/fsl_ppfe/pfe_perfmon.h
+new file mode 100644
+index 00000000..84908121
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_perfmon.h
+@@ -0,0 +1,38 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _PFE_PERFMON_H_
++#define _PFE_PERFMON_H_
++
++#include "pfe/pfe.h"
++
++#define	CT_CPUMON_INTERVAL	(1 * TIMER_TICKS_PER_SEC)
++
++struct pfe_cpumon {
++	u32 cpu_usage_pct[MAX_PE];
++	u32 class_usage_pct;
++};
++
++struct pfe_memmon {
++	u32 kernel_memory_allocated;
++};
++
++int pfe_perfmon_init(struct pfe *pfe);
++void pfe_perfmon_exit(struct pfe *pfe);
++
++#endif /* _PFE_PERFMON_H_ */
+diff --git a/drivers/staging/fsl_ppfe/pfe_sysfs.c b/drivers/staging/fsl_ppfe/pfe_sysfs.c
+new file mode 100644
+index 00000000..2a763844
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_sysfs.c
+@@ -0,0 +1,818 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#include <linux/module.h>
++#include <linux/platform_device.h>
++
++#include "pfe_mod.h"
++
++#define PE_EXCEPTION_DUMP_ADDRESS 0x1fa8
++#define NUM_QUEUES		16
++
++static char register_name[20][5] = {
++	"EPC", "ECAS", "EID", "ED",
++	"r0", "r1", "r2", "r3",
++	"r4", "r5", "r6", "r7",
++	"r8", "r9", "r10", "r11",
++	"r12", "r13", "r14", "r15",
++};
++
++static char exception_name[14][20] = {
++	"Reset",
++	"HardwareFailure",
++	"NMI",
++	"InstBreakpoint",
++	"DataBreakpoint",
++	"Unsupported",
++	"PrivilegeViolation",
++	"InstBusError",
++	"DataBusError",
++	"AlignmentError",
++	"ArithmeticError",
++	"SystemCall",
++	"MemoryManagement",
++	"Interrupt",
++};
++
++static unsigned long class_do_clear;
++static unsigned long tmu_do_clear;
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++static unsigned long util_do_clear;
++#endif
++
++static ssize_t display_pe_status(char *buf, int id, u32 dmem_addr, unsigned long
++					do_clear)
++{
++	ssize_t len = 0;
++	u32 val;
++	char statebuf[5];
++	struct pfe_cpumon *cpumon = &pfe->cpumon;
++	u32 debug_indicator;
++	u32 debug[20];
++
++	*(u32 *)statebuf = pe_dmem_read(id, dmem_addr, 4);
++	dmem_addr += 4;
++
++	statebuf[4] = '\0';
++	len += sprintf(buf + len, "state=%4s ", statebuf);
++
++	val = pe_dmem_read(id, dmem_addr, 4);
++	dmem_addr += 4;
++	len += sprintf(buf + len, "ctr=%08x ", cpu_to_be32(val));
++
++	val = pe_dmem_read(id, dmem_addr, 4);
++	if (do_clear && val)
++		pe_dmem_write(id, 0, dmem_addr, 4);
++	dmem_addr += 4;
++	len += sprintf(buf + len, "rx=%u ", cpu_to_be32(val));
++
++	val = pe_dmem_read(id, dmem_addr, 4);
++	if (do_clear && val)
++		pe_dmem_write(id, 0, dmem_addr, 4);
++	dmem_addr += 4;
++	if (id >= TMU0_ID && id <= TMU_MAX_ID)
++		len += sprintf(buf + len, "qstatus=%x", cpu_to_be32(val));
++	else
++		len += sprintf(buf + len, "tx=%u", cpu_to_be32(val));
++
++	val = pe_dmem_read(id, dmem_addr, 4);
++	if (do_clear && val)
++		pe_dmem_write(id, 0, dmem_addr, 4);
++	dmem_addr += 4;
++	if (val)
++		len += sprintf(buf + len, " drop=%u", cpu_to_be32(val));
++
++	len += sprintf(buf + len, " load=%d%%", cpumon->cpu_usage_pct[id]);
++
++	len += sprintf(buf + len, "\n");
++
++	debug_indicator = pe_dmem_read(id, dmem_addr, 4);
++	dmem_addr += 4;
++	if (!strncmp((char *)&debug_indicator, "DBUG", 4)) {
++		int j, last = 0;
++
++		for (j = 0; j < 16; j++) {
++			debug[j] = pe_dmem_read(id, dmem_addr, 4);
++			if (debug[j]) {
++				if (do_clear)
++					pe_dmem_write(id, 0, dmem_addr, 4);
++				last = j + 1;
++			}
++			dmem_addr += 4;
++		}
++		for (j = 0; j < last; j++) {
++			len += sprintf(buf + len, "%08x%s",
++			cpu_to_be32(debug[j]),
++			(j & 0x7) == 0x7 || j == last - 1 ? "\n" : " ");
++		}
++	}
++
++	if (!strncmp(statebuf, "DEAD", 4)) {
++		u32 i, dump = PE_EXCEPTION_DUMP_ADDRESS;
++
++		len += sprintf(buf + len, "Exception details:\n");
++		for (i = 0; i < 20; i++) {
++			debug[i] = pe_dmem_read(id, dump, 4);
++			dump += 4;
++			if (i == 2)
++				len += sprintf(buf + len, "%4s = %08x (=%s) ",
++				register_name[i], cpu_to_be32(debug[i]),
++				exception_name[min((u32)
++				cpu_to_be32(debug[i]), (u32)13)]);
++			else
++				len += sprintf(buf + len, "%4s = %08x%s",
++				register_name[i], cpu_to_be32(debug[i]),
++				(i & 0x3) == 0x3 || i == 19 ? "\n" : " ");
++		}
++	}
++
++	return len;
++}
++
++static ssize_t class_phy_stats(char *buf, int phy)
++{
++	ssize_t len = 0;
++	int off1 = phy * 0x28;
++	int off2 = phy * 0x10;
++
++	if (phy == 3)
++		off1 = CLASS_PHY4_RX_PKTS - CLASS_PHY1_RX_PKTS;
++
++	len += sprintf(buf + len, "phy: %d\n", phy);
++	len += sprintf(buf + len,
++			"  rx:   %10u, tx:   %10u, intf:  %10u, ipv4:    %10u, ipv6: %10u\n",
++			readl(CLASS_PHY1_RX_PKTS + off1),
++			readl(CLASS_PHY1_TX_PKTS + off1),
++			readl(CLASS_PHY1_INTF_MATCH_PKTS + off1),
++			readl(CLASS_PHY1_V4_PKTS + off1),
++			readl(CLASS_PHY1_V6_PKTS + off1));
++
++	len += sprintf(buf + len,
++			"  icmp: %10u, igmp: %10u, tcp:   %10u, udp:     %10u\n",
++			readl(CLASS_PHY1_ICMP_PKTS + off2),
++			readl(CLASS_PHY1_IGMP_PKTS + off2),
++			readl(CLASS_PHY1_TCP_PKTS + off2),
++			readl(CLASS_PHY1_UDP_PKTS + off2));
++
++	len += sprintf(buf + len, "  err\n");
++	len += sprintf(buf + len,
++			"  lp:   %10u, intf: %10u, l3:    %10u, chcksum: %10u, ttl:  %10u\n",
++			readl(CLASS_PHY1_LP_FAIL_PKTS + off1),
++			readl(CLASS_PHY1_INTF_FAIL_PKTS + off1),
++			readl(CLASS_PHY1_L3_FAIL_PKTS + off1),
++			readl(CLASS_PHY1_CHKSUM_ERR_PKTS + off1),
++			readl(CLASS_PHY1_TTL_ERR_PKTS + off1));
++
++	return len;
++}
++
++/* qm_read_drop_stat
++ * This function is used to read the drop statistics from the TMU
++ * hw drop counter.  Since the hw counter is always cleared afer
++ * reading, this function maintains the previous drop count, and
++ * adds the new value to it.  That value can be retrieved by
++ * passing a pointer to it with the total_drops arg.
++ *
++ * @param tmu		TMU number (0 - 3)
++ * @param queue		queue number (0 - 15)
++ * @param total_drops	pointer to location to store total drops (or NULL)
++ * @param do_reset	if TRUE, clear total drops after updating
++ */
++u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset)
++{
++	static u32 qtotal[TMU_MAX_ID + 1][NUM_QUEUES];
++	u32 val;
++
++	writel((tmu << 8) | queue, TMU_TEQ_CTRL);
++	writel((tmu << 8) | queue, TMU_LLM_CTRL);
++	val = readl(TMU_TEQ_DROP_STAT);
++	qtotal[tmu][queue] += val;
++	if (total_drops)
++		*total_drops = qtotal[tmu][queue];
++	if (do_reset)
++		qtotal[tmu][queue] = 0;
++	return val;
++}
++
++static ssize_t tmu_queue_stats(char *buf, int tmu, int queue)
++{
++	ssize_t len = 0;
++	u32 drops;
++
++	len += sprintf(buf + len, "%d-%02d, ", tmu, queue);
++
++	drops = qm_read_drop_stat(tmu, queue, NULL, 0);
++
++	/* Select queue */
++	writel((tmu << 8) | queue, TMU_TEQ_CTRL);
++	writel((tmu << 8) | queue, TMU_LLM_CTRL);
++
++	len += sprintf(buf + len,
++			"(teq) drop: %10u, tx: %10u (llm) head: %08x, tail: %08x, drop: %10u\n",
++		drops, readl(TMU_TEQ_TRANS_STAT),
++		readl(TMU_LLM_QUE_HEADPTR), readl(TMU_LLM_QUE_TAILPTR),
++		readl(TMU_LLM_QUE_DROPCNT));
++
++	return len;
++}
++
++static ssize_t tmu_queues(char *buf, int tmu)
++{
++	ssize_t len = 0;
++	int queue;
++
++	for (queue = 0; queue < 16; queue++)
++		len += tmu_queue_stats(buf + len, tmu, queue);
++
++	return len;
++}
++
++static ssize_t block_version(char *buf, void *addr)
++{
++	ssize_t len = 0;
++	u32 val;
++
++	val = readl(addr);
++	len += sprintf(buf + len, "revision: %x, version: %x, id: %x\n",
++		(val >> 24) & 0xff, (val >> 16) & 0xff, val & 0xffff);
++
++	return len;
++}
++
++static ssize_t bmu(char *buf, int id, void *base)
++{
++	ssize_t len = 0;
++
++	len += sprintf(buf + len, "%s: %d\n  ", __func__, id);
++
++	len += block_version(buf + len, base + BMU_VERSION);
++
++	len += sprintf(buf + len, "  buf size:  %x\n", (1 << readl(base +
++			BMU_BUF_SIZE)));
++	len += sprintf(buf + len, "  buf count: %x\n", readl(base +
++			BMU_BUF_CNT));
++	len += sprintf(buf + len, "  buf rem:   %x\n", readl(base +
++			BMU_REM_BUF_CNT));
++	len += sprintf(buf + len, "  buf curr:  %x\n", readl(base +
++			BMU_CURR_BUF_CNT));
++	len += sprintf(buf + len, "  free err:  %x\n", readl(base +
++			BMU_FREE_ERR_ADDR));
++
++	return len;
++}
++
++static ssize_t gpi(char *buf, int id, void *base)
++{
++	ssize_t len = 0;
++	u32 val;
++
++	len += sprintf(buf + len, "%s%d:\n  ", __func__, id);
++	len += block_version(buf + len, base + GPI_VERSION);
++
++	len += sprintf(buf + len, "  tx under stick: %x\n", readl(base +
++			GPI_FIFO_STATUS));
++	val = readl(base + GPI_FIFO_DEBUG);
++	len += sprintf(buf + len, "  tx pkts:        %x\n", (val >> 23) &
++			0x3f);
++	len += sprintf(buf + len, "  rx pkts:        %x\n", (val >> 18) &
++			0x3f);
++	len += sprintf(buf + len, "  tx bytes:       %x\n", (val >> 9) &
++			0x1ff);
++	len += sprintf(buf + len, "  rx bytes:       %x\n", (val >> 0) &
++			0x1ff);
++	len += sprintf(buf + len, "  overrun:        %x\n", readl(base +
++			GPI_OVERRUN_DROPCNT));
++
++	return len;
++}
++
++static ssize_t pfe_set_class(struct device *dev, struct device_attribute *attr,
++			     const char *buf, size_t count)
++{
++	class_do_clear = kstrtoul(buf, 0, 0);
++	return count;
++}
++
++static ssize_t pfe_show_class(struct device *dev, struct device_attribute *attr,
++			      char *buf)
++{
++	ssize_t len = 0;
++	int id;
++	u32 val;
++	struct pfe_cpumon *cpumon = &pfe->cpumon;
++
++	len += block_version(buf + len, CLASS_VERSION);
++
++	for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
++		len += sprintf(buf + len, "%d: ", id - CLASS0_ID);
++
++		val = readl(CLASS_PE0_DEBUG + id * 4);
++		len += sprintf(buf + len, "pc=1%04x ", val & 0xffff);
++
++		len += display_pe_status(buf + len, id, CLASS_DM_PESTATUS,
++						class_do_clear);
++	}
++	len += sprintf(buf + len, "aggregate load=%d%%\n\n",
++			cpumon->class_usage_pct);
++
++	len += sprintf(buf + len, "pe status:   0x%x\n",
++			readl(CLASS_PE_STATUS));
++	len += sprintf(buf + len, "max buf cnt: 0x%x   afull thres: 0x%x\n",
++			readl(CLASS_MAX_BUF_CNT), readl(CLASS_AFULL_THRES));
++	len += sprintf(buf + len, "tsq max cnt: 0x%x   tsq fifo thres: 0x%x\n",
++			readl(CLASS_TSQ_MAX_CNT), readl(CLASS_TSQ_FIFO_THRES));
++	len += sprintf(buf + len, "state:       0x%x\n", readl(CLASS_STATE));
++
++	len += class_phy_stats(buf + len, 0);
++	len += class_phy_stats(buf + len, 1);
++	len += class_phy_stats(buf + len, 2);
++	len += class_phy_stats(buf + len, 3);
++
++	return len;
++}
++
++static ssize_t pfe_set_tmu(struct device *dev, struct device_attribute *attr,
++			   const char *buf, size_t count)
++{
++	tmu_do_clear = kstrtoul(buf, 0, 0);
++	return count;
++}
++
++static ssize_t pfe_show_tmu(struct device *dev, struct device_attribute *attr,
++			    char *buf)
++{
++	ssize_t len = 0;
++	int id;
++	u32 val;
++
++	len += block_version(buf + len, TMU_VERSION);
++
++	for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
++		if (id == TMU2_ID)
++			continue;
++		len += sprintf(buf + len, "%d: ", id - TMU0_ID);
++
++		len += display_pe_status(buf + len, id, TMU_DM_PESTATUS,
++						tmu_do_clear);
++	}
++
++	len += sprintf(buf + len, "pe status:    %x\n", readl(TMU_PE_STATUS));
++	len += sprintf(buf + len, "inq fifo cnt: %x\n",
++			readl(TMU_PHY_INQ_FIFO_CNT));
++	val = readl(TMU_INQ_STAT);
++	len += sprintf(buf + len, "inq wr ptr:     %x\n", val & 0x3ff);
++	len += sprintf(buf + len, "inq rd ptr:     %x\n", val >> 10);
++
++	return len;
++}
++
++static unsigned long drops_do_clear;
++static u32 class_drop_counter[CLASS_NUM_DROP_COUNTERS];
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++static u32 util_drop_counter[UTIL_NUM_DROP_COUNTERS];
++#endif
++
++char *class_drop_description[CLASS_NUM_DROP_COUNTERS] = {
++	"ICC",
++	"Host Pkt Error",
++	"Rx Error",
++	"IPsec Outbound",
++	"IPsec Inbound",
++	"EXPT IPsec Error",
++	"Reassembly",
++	"Fragmenter",
++	"NAT-T",
++	"Socket",
++	"Multicast",
++	"NAT-PT",
++	"Tx Disabled",
++};
++
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++char *util_drop_description[UTIL_NUM_DROP_COUNTERS] = {
++	"IPsec Outbound",
++	"IPsec Inbound",
++	"IPsec Rate Limiter",
++	"Fragmenter",
++	"Socket",
++	"Tx Disabled",
++	"Rx Error",
++};
++#endif
++
++static ssize_t pfe_set_drops(struct device *dev, struct device_attribute *attr,
++			     const char *buf, size_t count)
++{
++	drops_do_clear = kstrtoul(buf, 0, 0);
++	return count;
++}
++
++static u32 tmu_drops[4][16];
++static ssize_t pfe_show_drops(struct device *dev, struct device_attribute *attr,
++			      char *buf)
++{
++	ssize_t len = 0;
++	int id, dropnum;
++	int tmu, queue;
++	u32 val;
++	u32 dmem_addr;
++	int num_class_drops = 0, num_tmu_drops = 0, num_util_drops = 0;
++	struct pfe_ctrl *ctrl = &pfe->ctrl;
++
++	memset(class_drop_counter, 0, sizeof(class_drop_counter));
++	for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
++		if (drops_do_clear)
++			pe_sync_stop(ctrl, (1 << id));
++		for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
++			dropnum++) {
++			dmem_addr = CLASS_DM_DROP_CNTR;
++			val = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
++			class_drop_counter[dropnum] += val;
++			num_class_drops += val;
++			if (drops_do_clear)
++				pe_dmem_write(id, 0, dmem_addr, 4);
++		}
++		if (drops_do_clear)
++			pe_start(ctrl, (1 << id));
++	}
++
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	if (drops_do_clear)
++		pe_sync_stop(ctrl, (1 << UTIL_ID));
++	for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
++		dmem_addr = UTIL_DM_DROP_CNTR;
++		val = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4));
++		util_drop_counter[dropnum] = val;
++		num_util_drops += val;
++		if (drops_do_clear)
++			pe_dmem_write(UTIL_ID, 0, dmem_addr, 4);
++	}
++	if (drops_do_clear)
++		pe_start(ctrl, (1 << UTIL_ID));
++#endif
++	for (tmu = 0; tmu < 4; tmu++) {
++		for (queue = 0; queue < 16; queue++) {
++			qm_read_drop_stat(tmu, queue, &tmu_drops[tmu][queue],
++					  drops_do_clear);
++			num_tmu_drops += tmu_drops[tmu][queue];
++		}
++	}
++
++	if (num_class_drops == 0 && num_util_drops == 0 && num_tmu_drops == 0)
++		len += sprintf(buf + len, "No PE drops\n\n");
++
++	if (num_class_drops > 0) {
++		len += sprintf(buf + len, "Class PE drops --\n");
++		for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
++			dropnum++) {
++			if (class_drop_counter[dropnum] > 0)
++				len += sprintf(buf + len, "  %s: %d\n",
++					class_drop_description[dropnum],
++					class_drop_counter[dropnum]);
++		}
++		len += sprintf(buf + len, "\n");
++	}
++
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	if (num_util_drops > 0) {
++		len += sprintf(buf + len, "Util PE drops --\n");
++		for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
++			if (util_drop_counter[dropnum] > 0)
++				len += sprintf(buf + len, "  %s: %d\n",
++					util_drop_description[dropnum],
++					util_drop_counter[dropnum]);
++		}
++		len += sprintf(buf + len, "\n");
++	}
++#endif
++	if (num_tmu_drops > 0) {
++		len += sprintf(buf + len, "TMU drops --\n");
++		for (tmu = 0; tmu < 4; tmu++) {
++			for (queue = 0; queue < 16; queue++) {
++				if (tmu_drops[tmu][queue] > 0)
++					len += sprintf(buf + len,
++						"  TMU%d-Q%d: %d\n"
++					, tmu, queue, tmu_drops[tmu][queue]);
++			}
++		}
++		len += sprintf(buf + len, "\n");
++	}
++
++	return len;
++}
++
++static ssize_t pfe_show_tmu0_queues(struct device *dev, struct device_attribute
++					*attr, char *buf)
++{
++	return tmu_queues(buf, 0);
++}
++
++static ssize_t pfe_show_tmu1_queues(struct device *dev, struct device_attribute
++					*attr, char *buf)
++{
++	return tmu_queues(buf, 1);
++}
++
++static ssize_t pfe_show_tmu2_queues(struct device *dev, struct device_attribute
++					*attr, char *buf)
++{
++	return tmu_queues(buf, 2);
++}
++
++static ssize_t pfe_show_tmu3_queues(struct device *dev, struct device_attribute
++					*attr, char *buf)
++{
++	return tmu_queues(buf, 3);
++}
++
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++static ssize_t pfe_set_util(struct device *dev, struct device_attribute *attr,
++			    const char *buf, size_t count)
++{
++	util_do_clear = kstrtoul(buf, NULL, 0);
++	return count;
++}
++
++static ssize_t pfe_show_util(struct device *dev, struct device_attribute *attr,
++			     char *buf)
++{
++	ssize_t len = 0;
++	struct pfe_ctrl *ctrl = &pfe->ctrl;
++
++	len += block_version(buf + len, UTIL_VERSION);
++
++	pe_sync_stop(ctrl, (1 << UTIL_ID));
++	len += display_pe_status(buf + len, UTIL_ID, UTIL_DM_PESTATUS,
++					util_do_clear);
++	pe_start(ctrl, (1 << UTIL_ID));
++
++	len += sprintf(buf + len, "pe status:   %x\n", readl(UTIL_PE_STATUS));
++	len += sprintf(buf + len, "max buf cnt: %x\n",
++			readl(UTIL_MAX_BUF_CNT));
++	len += sprintf(buf + len, "tsq max cnt: %x\n",
++			readl(UTIL_TSQ_MAX_CNT));
++
++	return len;
++}
++#endif
++
++static ssize_t pfe_show_bmu(struct device *dev, struct device_attribute *attr,
++			    char *buf)
++{
++	ssize_t len = 0;
++
++	len += bmu(buf + len, 1, BMU1_BASE_ADDR);
++	len += bmu(buf + len, 2, BMU2_BASE_ADDR);
++
++	return len;
++}
++
++static ssize_t pfe_show_hif(struct device *dev, struct device_attribute *attr,
++			    char *buf)
++{
++	ssize_t len = 0;
++
++	len += sprintf(buf + len, "hif:\n  ");
++	len += block_version(buf + len, HIF_VERSION);
++
++	len += sprintf(buf + len, "  tx curr bd:    %x\n",
++			readl(HIF_TX_CURR_BD_ADDR));
++	len += sprintf(buf + len, "  tx status:     %x\n",
++			readl(HIF_TX_STATUS));
++	len += sprintf(buf + len, "  tx dma status: %x\n",
++			readl(HIF_TX_DMA_STATUS));
++
++	len += sprintf(buf + len, "  rx curr bd:    %x\n",
++			readl(HIF_RX_CURR_BD_ADDR));
++	len += sprintf(buf + len, "  rx status:     %x\n",
++			readl(HIF_RX_STATUS));
++	len += sprintf(buf + len, "  rx dma status: %x\n",
++			readl(HIF_RX_DMA_STATUS));
++
++	len += sprintf(buf + len, "hif nocopy:\n  ");
++	len += block_version(buf + len, HIF_NOCPY_VERSION);
++
++	len += sprintf(buf + len, "  tx curr bd:    %x\n",
++			readl(HIF_NOCPY_TX_CURR_BD_ADDR));
++	len += sprintf(buf + len, "  tx status:     %x\n",
++			readl(HIF_NOCPY_TX_STATUS));
++	len += sprintf(buf + len, "  tx dma status: %x\n",
++			readl(HIF_NOCPY_TX_DMA_STATUS));
++
++	len += sprintf(buf + len, "  rx curr bd:    %x\n",
++			readl(HIF_NOCPY_RX_CURR_BD_ADDR));
++	len += sprintf(buf + len, "  rx status:     %x\n",
++			readl(HIF_NOCPY_RX_STATUS));
++	len += sprintf(buf + len, "  rx dma status: %x\n",
++			readl(HIF_NOCPY_RX_DMA_STATUS));
++
++	return len;
++}
++
++static ssize_t pfe_show_gpi(struct device *dev, struct device_attribute *attr,
++			    char *buf)
++{
++	ssize_t len = 0;
++
++	len += gpi(buf + len, 0, EGPI1_BASE_ADDR);
++	len += gpi(buf + len, 1, EGPI2_BASE_ADDR);
++	len += gpi(buf + len, 3, HGPI_BASE_ADDR);
++
++	return len;
++}
++
++static ssize_t pfe_show_pfemem(struct device *dev, struct device_attribute
++				*attr, char *buf)
++{
++	ssize_t len = 0;
++	struct pfe_memmon *memmon = &pfe->memmon;
++
++	len += sprintf(buf + len, "Kernel Memory: %d Bytes (%d KB)\n",
++		memmon->kernel_memory_allocated,
++		(memmon->kernel_memory_allocated + 1023) / 1024);
++
++	return len;
++}
++
++#ifdef HIF_NAPI_STATS
++static ssize_t pfe_show_hif_napi_stats(struct device *dev,
++				       struct device_attribute *attr,
++				       char *buf)
++{
++	struct platform_device *pdev = to_platform_device(dev);
++	struct pfe *pfe = platform_get_drvdata(pdev);
++	ssize_t len = 0;
++
++	len += sprintf(buf + len, "sched:  %u\n",
++			pfe->hif.napi_counters[NAPI_SCHED_COUNT]);
++	len += sprintf(buf + len, "poll:   %u\n",
++			pfe->hif.napi_counters[NAPI_POLL_COUNT]);
++	len += sprintf(buf + len, "packet: %u\n",
++			pfe->hif.napi_counters[NAPI_PACKET_COUNT]);
++	len += sprintf(buf + len, "budget: %u\n",
++			pfe->hif.napi_counters[NAPI_FULL_BUDGET_COUNT]);
++	len += sprintf(buf + len, "desc:   %u\n",
++			pfe->hif.napi_counters[NAPI_DESC_COUNT]);
++	len += sprintf(buf + len, "full:   %u\n",
++			pfe->hif.napi_counters[NAPI_CLIENT_FULL_COUNT]);
++
++	return len;
++}
++
++static ssize_t pfe_set_hif_napi_stats(struct device *dev,
++				      struct device_attribute *attr,
++					const char *buf, size_t count)
++{
++	struct platform_device *pdev = to_platform_device(dev);
++	struct pfe *pfe = platform_get_drvdata(pdev);
++
++	memset(pfe->hif.napi_counters, 0, sizeof(pfe->hif.napi_counters));
++
++	return count;
++}
++
++static DEVICE_ATTR(hif_napi_stats, 0644, pfe_show_hif_napi_stats,
++			pfe_set_hif_napi_stats);
++#endif
++
++static DEVICE_ATTR(class, 0644, pfe_show_class, pfe_set_class);
++static DEVICE_ATTR(tmu, 0644, pfe_show_tmu, pfe_set_tmu);
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++static DEVICE_ATTR(util, 0644, pfe_show_util, pfe_set_util);
++#endif
++static DEVICE_ATTR(bmu, 0444, pfe_show_bmu, NULL);
++static DEVICE_ATTR(hif, 0444, pfe_show_hif, NULL);
++static DEVICE_ATTR(gpi, 0444, pfe_show_gpi, NULL);
++static DEVICE_ATTR(drops, 0644, pfe_show_drops, pfe_set_drops);
++static DEVICE_ATTR(tmu0_queues, 0444, pfe_show_tmu0_queues, NULL);
++static DEVICE_ATTR(tmu1_queues, 0444, pfe_show_tmu1_queues, NULL);
++static DEVICE_ATTR(tmu2_queues, 0444, pfe_show_tmu2_queues, NULL);
++static DEVICE_ATTR(tmu3_queues, 0444, pfe_show_tmu3_queues, NULL);
++static DEVICE_ATTR(pfemem, 0444, pfe_show_pfemem, NULL);
++
++int pfe_sysfs_init(struct pfe *pfe)
++{
++	if (device_create_file(pfe->dev, &dev_attr_class))
++		goto err_class;
++
++	if (device_create_file(pfe->dev, &dev_attr_tmu))
++		goto err_tmu;
++
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	if (device_create_file(pfe->dev, &dev_attr_util))
++		goto err_util;
++#endif
++
++	if (device_create_file(pfe->dev, &dev_attr_bmu))
++		goto err_bmu;
++
++	if (device_create_file(pfe->dev, &dev_attr_hif))
++		goto err_hif;
++
++	if (device_create_file(pfe->dev, &dev_attr_gpi))
++		goto err_gpi;
++
++	if (device_create_file(pfe->dev, &dev_attr_drops))
++		goto err_drops;
++
++	if (device_create_file(pfe->dev, &dev_attr_tmu0_queues))
++		goto err_tmu0_queues;
++
++	if (device_create_file(pfe->dev, &dev_attr_tmu1_queues))
++		goto err_tmu1_queues;
++
++	if (device_create_file(pfe->dev, &dev_attr_tmu2_queues))
++		goto err_tmu2_queues;
++
++	if (device_create_file(pfe->dev, &dev_attr_tmu3_queues))
++		goto err_tmu3_queues;
++
++	if (device_create_file(pfe->dev, &dev_attr_pfemem))
++		goto err_pfemem;
++
++#ifdef HIF_NAPI_STATS
++	if (device_create_file(pfe->dev, &dev_attr_hif_napi_stats))
++		goto err_hif_napi_stats;
++#endif
++
++	return 0;
++
++#ifdef HIF_NAPI_STATS
++err_hif_napi_stats:
++	device_remove_file(pfe->dev, &dev_attr_pfemem);
++#endif
++
++err_pfemem:
++	device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
++
++err_tmu3_queues:
++	device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
++
++err_tmu2_queues:
++	device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
++
++err_tmu1_queues:
++	device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
++
++err_tmu0_queues:
++	device_remove_file(pfe->dev, &dev_attr_drops);
++
++err_drops:
++	device_remove_file(pfe->dev, &dev_attr_gpi);
++
++err_gpi:
++	device_remove_file(pfe->dev, &dev_attr_hif);
++
++err_hif:
++	device_remove_file(pfe->dev, &dev_attr_bmu);
++
++err_bmu:
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	device_remove_file(pfe->dev, &dev_attr_util);
++
++err_util:
++#endif
++	device_remove_file(pfe->dev, &dev_attr_tmu);
++
++err_tmu:
++	device_remove_file(pfe->dev, &dev_attr_class);
++
++err_class:
++	return -1;
++}
++
++void pfe_sysfs_exit(struct pfe *pfe)
++{
++#ifdef HIF_NAPI_STATS
++	device_remove_file(pfe->dev, &dev_attr_hif_napi_stats);
++#endif
++	device_remove_file(pfe->dev, &dev_attr_pfemem);
++	device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
++	device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
++	device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
++	device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
++	device_remove_file(pfe->dev, &dev_attr_drops);
++	device_remove_file(pfe->dev, &dev_attr_gpi);
++	device_remove_file(pfe->dev, &dev_attr_hif);
++	device_remove_file(pfe->dev, &dev_attr_bmu);
++#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
++	device_remove_file(pfe->dev, &dev_attr_util);
++#endif
++	device_remove_file(pfe->dev, &dev_attr_tmu);
++	device_remove_file(pfe->dev, &dev_attr_class);
++}
+diff --git a/drivers/staging/fsl_ppfe/pfe_sysfs.h b/drivers/staging/fsl_ppfe/pfe_sysfs.h
+new file mode 100644
+index 00000000..4fb39c93
+--- /dev/null
++++ b/drivers/staging/fsl_ppfe/pfe_sysfs.h
+@@ -0,0 +1,29 @@
++/*
++ * Copyright 2015-2016 Freescale Semiconductor, Inc.
++ * Copyright 2017 NXP
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef _PFE_SYSFS_H_
++#define _PFE_SYSFS_H_
++
++#include <linux/proc_fs.h>
++
++u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset);
++
++int pfe_sysfs_init(struct pfe *pfe);
++void pfe_sysfs_exit(struct pfe *pfe);
++
++#endif /* _PFE_SYSFS_H_ */
+-- 
+2.14.1
+

文件差异内容过多而无法显示
+ 131 - 110
target/linux/layerscape/patches-4.9/804-crypto-support-layerscape.patch


+ 638 - 4
target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch

@@ -1,4 +1,4 @@
-From 659603c5f6cbc3d39922d4374df25ae4627d0e88 Mon Sep 17 00:00:00 2001
+From 854c1f0e9574e9b25a55b439608c71e013b34a56 Mon Sep 17 00:00:00 2001
 From: Yangbo Lu <[email protected]>
 Date: Mon, 25 Sep 2017 12:12:20 +0800
 Subject: [PATCH] dma: support layerscape
@@ -8,8 +8,9 @@ This is a integrated patch for layerscape dma support.
 Signed-off-by: jiaheng.fan <[email protected]>
 Signed-off-by: Yangbo Lu <[email protected]>
 ---
- drivers/dma/Kconfig                     |   14 +
- drivers/dma/Makefile                    |    2 +
+ drivers/dma/Kconfig                     |   31 +
+ drivers/dma/Makefile                    |    3 +
+ drivers/dma/caam_dma.c                  |  563 +++++++++++++++
  drivers/dma/dpaa2-qdma/Kconfig          |    8 +
  drivers/dma/dpaa2-qdma/Makefile         |    8 +
  drivers/dma/dpaa2-qdma/dpaa2-qdma.c     |  986 +++++++++++++++++++++++++
@@ -18,7 +19,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  drivers/dma/dpaa2-qdma/fsl_dpdmai.h     |  521 ++++++++++++++
  drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h |  222 ++++++
  drivers/dma/fsl-qdma.c                  | 1201 +++++++++++++++++++++++++++++++
- 10 files changed, 3678 insertions(+)
+ 11 files changed, 4259 insertions(+)
+ create mode 100644 drivers/dma/caam_dma.c
  create mode 100644 drivers/dma/dpaa2-qdma/Kconfig
  create mode 100644 drivers/dma/dpaa2-qdma/Makefile
  create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.c
@@ -28,6 +30,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
  create mode 100644 drivers/dma/fsl-qdma.c
 
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index 141aefbe..8caaf091 100644
 --- a/drivers/dma/Kconfig
 +++ b/drivers/dma/Kconfig
 @@ -192,6 +192,20 @@ config FSL_EDMA
@@ -51,6 +55,32 @@ Signed-off-by: Yangbo Lu <[email protected]>
  config FSL_RAID
          tristate "Freescale RAID engine Support"
          depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
+@@ -564,6 +578,23 @@ config ZX_DMA
+ 	help
+ 	  Support the DMA engine for ZTE ZX296702 platform devices.
+ 
++config CRYPTO_DEV_FSL_CAAM_DMA
++	tristate "CAAM DMA engine support"
++	depends on CRYPTO_DEV_FSL_CAAM_JR
++	default y
++	select DMA_ENGINE
++	select ASYNC_CORE
++	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
++	help
++	  Selecting this will offload the DMA operations for users of
++	  the scatter gather memcopy API to the CAAM via job rings. The
++	  CAAM is a hardware module that provides hardware acceleration to
++	  cryptographic operations. It has a built-in DMA controller that can
++	  be programmed to read/write cryptographic data. This module defines
++	  a DMA driver that uses the DMA capabilities of the CAAM.
++
++	  To compile this as a module, choose M here: the module
++	  will be called caam_dma.
+ 
+ # driver files
+ source "drivers/dma/bestcomm/Kconfig"
+diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
+index e4dc9cac..a694da0e 100644
 --- a/drivers/dma/Makefile
 +++ b/drivers/dma/Makefile
 @@ -29,6 +29,8 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/
@@ -62,6 +92,586 @@ Signed-off-by: Yangbo Lu <[email protected]>
  obj-$(CONFIG_FSL_RAID) += fsl_raid.o
  obj-$(CONFIG_HSU_DMA) += hsu/
  obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
+@@ -67,6 +69,7 @@ obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
+ obj-$(CONFIG_TI_EDMA) += edma.o
+ obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
+ obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_DMA) += caam_dma.o
+ 
+ obj-y += qcom/
+ obj-y += xilinx/
+diff --git a/drivers/dma/caam_dma.c b/drivers/dma/caam_dma.c
+new file mode 100644
+index 00000000..e430b320
+--- /dev/null
++++ b/drivers/dma/caam_dma.c
+@@ -0,0 +1,563 @@
++/*
++ * caam support for SG DMA
++ *
++ * Copyright 2016 Freescale Semiconductor, Inc
++ * Copyright 2017 NXP
++ */
++
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/dma-mapping.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/debugfs.h>
++
++#include <linux/dmaengine.h>
++#include "dmaengine.h"
++
++#include "../crypto/caam/regs.h"
++#include "../crypto/caam/jr.h"
++#include "../crypto/caam/error.h"
++#include "../crypto/caam/intern.h"
++#include "../crypto/caam/desc_constr.h"
++#include "../crypto/caam/sg_sw_sec4.h"
++
++#define DESC_DMA_MEMCPY_LEN	((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / \
++				 CAAM_CMD_SZ)
++
++/* This is max chunk size of a DMA transfer. If a buffer is larger than this
++ * value it is internally broken into chunks of max CAAM_DMA_CHUNK_SIZE bytes
++ * and for each chunk a DMA transfer request is issued.
++ * This value is the largest number on 16 bits that is a multiple of 256 bytes
++ * (the largest configurable CAAM DMA burst size).
++ */
++#define CAAM_DMA_CHUNK_SIZE	65280
++
++struct caam_dma_sh_desc {
++	u32 desc[DESC_DMA_MEMCPY_LEN] ____cacheline_aligned;
++	dma_addr_t desc_dma;
++};
++
++/* caam dma extended descriptor */
++struct caam_dma_edesc {
++	struct dma_async_tx_descriptor async_tx;
++	struct list_head node;
++	struct caam_dma_ctx *ctx;
++	dma_addr_t src_dma;
++	dma_addr_t dst_dma;
++	unsigned int src_len;
++	unsigned int dst_len;
++	struct sec4_sg_entry *sec4_sg;
++	u32 jd[] ____cacheline_aligned;
++};
++
++/*
++ * caam_dma_ctx - per jr/channel context
++ * @chan: dma channel used by async_tx API
++ * @node: list_head used to attach to the global dma_ctx_list
++ * @jrdev: Job Ring device
++ * @submit_q: queue of pending (submitted, but not enqueued) jobs
++ * @done_not_acked: jobs that have been completed by jr, but maybe not acked
++ * @edesc_lock: protects extended descriptor
++ */
++struct caam_dma_ctx {
++	struct dma_chan chan;
++	struct list_head node;
++	struct device *jrdev;
++	struct list_head submit_q;
++	struct list_head done_not_acked;
++	spinlock_t edesc_lock;
++};
++
++static struct dma_device *dma_dev;
++static struct caam_dma_sh_desc *dma_sh_desc;
++static LIST_HEAD(dma_ctx_list);
++
++static dma_cookie_t caam_dma_tx_submit(struct dma_async_tx_descriptor *tx)
++{
++	struct caam_dma_edesc *edesc = NULL;
++	struct caam_dma_ctx *ctx = NULL;
++	dma_cookie_t cookie;
++
++	edesc = container_of(tx, struct caam_dma_edesc, async_tx);
++	ctx = container_of(tx->chan, struct caam_dma_ctx, chan);
++
++	spin_lock_bh(&ctx->edesc_lock);
++
++	cookie = dma_cookie_assign(tx);
++	list_add_tail(&edesc->node, &ctx->submit_q);
++
++	spin_unlock_bh(&ctx->edesc_lock);
++
++	return cookie;
++}
++
++static unsigned int caam_dma_sg_dma_len(struct scatterlist *sg,
++					unsigned int nents)
++{
++	unsigned int len;
++
++	for (len = 0; sg && nents; sg = sg_next(sg), nents--)
++		len += sg_dma_len(sg);
++
++	return len;
++}
++
++static struct caam_dma_edesc *
++caam_dma_sg_edesc_alloc(struct dma_chan *chan,
++			struct scatterlist *dst_sg, unsigned int dst_nents,
++			struct scatterlist *src_sg, unsigned int src_nents,
++			unsigned long flags)
++{
++	struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
++						chan);
++	struct device *jrdev = ctx->jrdev;
++	struct caam_dma_edesc *edesc;
++	struct sec4_sg_entry *sec4_sg;
++	dma_addr_t sec4_sg_dma_src;
++	unsigned int sec4_sg_bytes;
++
++	if (!dst_sg || !src_sg || !dst_nents || !src_nents)
++		return NULL;
++
++	sec4_sg_bytes = (src_nents + dst_nents) * sizeof(*sec4_sg);
++
++	edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
++			GFP_DMA | GFP_NOWAIT);
++	if (!edesc)
++		return ERR_PTR(-ENOMEM);
++
++	edesc->src_len = caam_dma_sg_dma_len(src_sg, src_nents);
++	edesc->dst_len = caam_dma_sg_dma_len(dst_sg, dst_nents);
++	if (edesc->src_len != edesc->dst_len) {
++		dev_err(jrdev, "%s: src(%u) and dst(%u) len mismatch.\n",
++			__func__, edesc->src_len, edesc->dst_len);
++		kfree(edesc);
++		return ERR_PTR(-EINVAL);
++	}
++
++	dma_async_tx_descriptor_init(&edesc->async_tx, chan);
++	edesc->async_tx.tx_submit = caam_dma_tx_submit;
++	edesc->async_tx.flags = flags;
++	edesc->async_tx.cookie = -EBUSY;
++
++	/* Prepare SEC SGs */
++	edesc->sec4_sg = (void *)edesc + offsetof(struct caam_dma_edesc, jd) +
++			 DESC_JOB_IO_LEN;
++
++	sec4_sg = edesc->sec4_sg;
++	sg_to_sec4_sg_last(src_sg, src_nents, sec4_sg, 0);
++
++	sec4_sg += src_nents;
++	sg_to_sec4_sg_last(dst_sg, dst_nents, sec4_sg, 0);
++
++	sec4_sg_dma_src = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes,
++					 DMA_TO_DEVICE);
++	if (dma_mapping_error(jrdev, sec4_sg_dma_src)) {
++		dev_err(jrdev, "error mapping segments to device\n");
++		kfree(edesc);
++		return ERR_PTR(-ENOMEM);
++	}
++
++	edesc->src_dma = sec4_sg_dma_src;
++	edesc->dst_dma = sec4_sg_dma_src + src_nents * sizeof(*sec4_sg);
++	edesc->ctx = ctx;
++
++	return edesc;
++}
++
++static void caam_jr_chan_free_edesc(struct caam_dma_edesc *edesc)
++{
++	struct caam_dma_ctx *ctx = edesc->ctx;
++	struct caam_dma_edesc *_edesc = NULL;
++
++	spin_lock_bh(&ctx->edesc_lock);
++
++	list_add_tail(&edesc->node, &ctx->done_not_acked);
++	list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
++		if (async_tx_test_ack(&edesc->async_tx)) {
++			list_del(&edesc->node);
++			kfree(edesc);
++		}
++	}
++
++	spin_unlock_bh(&ctx->edesc_lock);
++}
++
++static void caam_dma_done(struct device *dev, u32 *hwdesc, u32 err,
++			  void *context)
++{
++	struct caam_dma_edesc *edesc = context;
++	struct caam_dma_ctx *ctx = edesc->ctx;
++	dma_async_tx_callback callback;
++	void *callback_param;
++
++	if (err)
++		caam_jr_strstatus(ctx->jrdev, err);
++
++	dma_run_dependencies(&edesc->async_tx);
++
++	spin_lock_bh(&ctx->edesc_lock);
++	dma_cookie_complete(&edesc->async_tx);
++	spin_unlock_bh(&ctx->edesc_lock);
++
++	callback = edesc->async_tx.callback;
++	callback_param = edesc->async_tx.callback_param;
++
++	dma_descriptor_unmap(&edesc->async_tx);
++
++	caam_jr_chan_free_edesc(edesc);
++
++	if (callback)
++		callback(callback_param);
++}
++
++static void caam_dma_sg_init_job_desc(struct caam_dma_edesc *edesc)
++{
++	u32 *jd = edesc->jd;
++	u32 *sh_desc = dma_sh_desc->desc;
++	dma_addr_t desc_dma = dma_sh_desc->desc_dma;
++
++	/* init the job descriptor */
++	init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE);
++
++	/* set SEQIN PTR */
++	append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, LDST_SGF);
++
++	/* set SEQOUT PTR */
++	append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, LDST_SGF);
++
++#ifdef DEBUG
++	print_hex_dump(KERN_ERR, "caam dma desc@" __stringify(__LINE__) ": ",
++		       DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1);
++#endif
++}
++
++/* This function can be called from an interrupt context */
++static struct dma_async_tx_descriptor *
++caam_dma_prep_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
++		 unsigned int dst_nents, struct scatterlist *src_sg,
++		 unsigned int src_nents, unsigned long flags)
++{
++	struct caam_dma_edesc *edesc;
++
++	/* allocate extended descriptor */
++	edesc = caam_dma_sg_edesc_alloc(chan, dst_sg, dst_nents, src_sg,
++					src_nents, flags);
++	if (IS_ERR_OR_NULL(edesc))
++		return ERR_CAST(edesc);
++
++	/* Initialize job descriptor */
++	caam_dma_sg_init_job_desc(edesc);
++
++	return &edesc->async_tx;
++}
++
++static void caam_dma_memcpy_init_job_desc(struct caam_dma_edesc *edesc)
++{
++	u32 *jd = edesc->jd;
++	u32 *sh_desc = dma_sh_desc->desc;
++	dma_addr_t desc_dma = dma_sh_desc->desc_dma;
++
++	/* init the job descriptor */
++	init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE);
++
++	/* set SEQIN PTR */
++	append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, 0);
++
++	/* set SEQOUT PTR */
++	append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, 0);
++
++#ifdef DEBUG
++	print_hex_dump(KERN_ERR, "caam dma desc@" __stringify(__LINE__) ": ",
++		       DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1);
++#endif
++}
++
++static struct dma_async_tx_descriptor *
++caam_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
++		     size_t len, unsigned long flags)
++{
++	struct caam_dma_edesc *edesc;
++	struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
++						chan);
++
++	edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | GFP_NOWAIT);
++	if (!edesc)
++		return ERR_PTR(-ENOMEM);
++
++	dma_async_tx_descriptor_init(&edesc->async_tx, chan);
++	edesc->async_tx.tx_submit = caam_dma_tx_submit;
++	edesc->async_tx.flags = flags;
++	edesc->async_tx.cookie = -EBUSY;
++
++	edesc->src_dma = src;
++	edesc->src_len = len;
++	edesc->dst_dma = dst;
++	edesc->dst_len = len;
++	edesc->ctx = ctx;
++
++	caam_dma_memcpy_init_job_desc(edesc);
++
++	return &edesc->async_tx;
++}
++
++/* This function can be called in an interrupt context */
++static void caam_dma_issue_pending(struct dma_chan *chan)
++{
++	struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
++						chan);
++	struct caam_dma_edesc *edesc, *_edesc;
++
++	spin_lock_bh(&ctx->edesc_lock);
++	list_for_each_entry_safe(edesc, _edesc, &ctx->submit_q, node) {
++		if (caam_jr_enqueue(ctx->jrdev, edesc->jd,
++				    caam_dma_done, edesc) < 0)
++			break;
++		list_del(&edesc->node);
++	}
++	spin_unlock_bh(&ctx->edesc_lock);
++}
++
++static void caam_dma_free_chan_resources(struct dma_chan *chan)
++{
++	struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
++						chan);
++	struct caam_dma_edesc *edesc, *_edesc;
++
++	spin_lock_bh(&ctx->edesc_lock);
++	list_for_each_entry_safe(edesc, _edesc, &ctx->submit_q, node) {
++		list_del(&edesc->node);
++		kfree(edesc);
++	}
++	list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) {
++		list_del(&edesc->node);
++		kfree(edesc);
++	}
++	spin_unlock_bh(&ctx->edesc_lock);
++}
++
++static int caam_dma_jr_chan_bind(void)
++{
++	struct device *jrdev;
++	struct caam_dma_ctx *ctx;
++	int bonds = 0;
++	int i;
++
++	for (i = 0; i < caam_jr_driver_probed(); i++) {
++		jrdev = caam_jridx_alloc(i);
++		if (IS_ERR(jrdev)) {
++			pr_err("job ring device %d allocation failed\n", i);
++			continue;
++		}
++
++		ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
++		if (!ctx) {
++			caam_jr_free(jrdev);
++			continue;
++		}
++
++		ctx->chan.device = dma_dev;
++		ctx->chan.private = ctx;
++
++		ctx->jrdev = jrdev;
++
++		INIT_LIST_HEAD(&ctx->submit_q);
++		INIT_LIST_HEAD(&ctx->done_not_acked);
++		INIT_LIST_HEAD(&ctx->node);
++		spin_lock_init(&ctx->edesc_lock);
++
++		dma_cookie_init(&ctx->chan);
++
++		/* add the context of this channel to the context list */
++		list_add_tail(&ctx->node, &dma_ctx_list);
++
++		/* add this channel to the device chan list */
++		list_add_tail(&ctx->chan.device_node, &dma_dev->channels);
++
++		bonds++;
++	}
++
++	return bonds;
++}
++
++static inline void caam_jr_dma_free(struct dma_chan *chan)
++{
++	struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx,
++						chan);
++
++	list_del(&ctx->node);
++	list_del(&chan->device_node);
++	caam_jr_free(ctx->jrdev);
++	kfree(ctx);
++}
++
++static void set_caam_dma_desc(u32 *desc)
++{
++	u32 *jmp_cmd;
++
++	/* dma shared descriptor */
++	init_sh_desc(desc, HDR_SHARE_NEVER | (1 << HDR_START_IDX_SHIFT));
++
++	/* REG1 = CAAM_DMA_CHUNK_SIZE */
++	append_math_add_imm_u32(desc, REG1, ZERO, IMM, CAAM_DMA_CHUNK_SIZE);
++
++	/* REG0 = SEQINLEN - CAAM_DMA_CHUNK_SIZE */
++	append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, CAAM_DMA_CHUNK_SIZE);
++
++	/* if (REG0 > 0)
++	 *	jmp to LABEL1
++	 */
++	jmp_cmd = append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
++			      JUMP_COND_MATH_Z);
++
++	/* REG1 = SEQINLEN */
++	append_math_sub(desc, REG1, SEQINLEN, ZERO, CAAM_CMD_SZ);
++
++	/* LABEL1 */
++	set_jump_tgt_here(desc, jmp_cmd);
++
++	/* VARSEQINLEN = REG1 */
++	append_math_add(desc, VARSEQINLEN, REG1, ZERO, CAAM_CMD_SZ);
++
++	/* VARSEQOUTLEN = REG1 */
++	append_math_add(desc, VARSEQOUTLEN, REG1, ZERO, CAAM_CMD_SZ);
++
++	/* do FIFO STORE */
++	append_seq_fifo_store(desc, 0, FIFOST_TYPE_METADATA | LDST_VLF);
++
++	/* do FIFO LOAD */
++	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
++			     FIFOLD_TYPE_IFIFO | LDST_VLF);
++
++	/* if (REG0 > 0)
++	 *	jmp 0xF8 (after shared desc header)
++	 */
++	append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N |
++		    JUMP_COND_MATH_Z | 0xF8);
++
++#ifdef DEBUG
++	print_hex_dump(KERN_ERR, "caam dma shdesc@" __stringify(__LINE__) ": ",
++		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++}
++
++static int __init caam_dma_probe(struct platform_device *pdev)
++{
++	struct device *dev = &pdev->dev;
++	struct device *ctrldev = dev->parent;
++	struct dma_chan *chan, *_chan;
++	u32 *sh_desc;
++	int err = -ENOMEM;
++	int bonds;
++
++	if (!caam_jr_driver_probed()) {
++		dev_info(dev, "Defer probing after JR driver probing\n");
++		return -EPROBE_DEFER;
++	}
++
++	dma_dev = kzalloc(sizeof(*dma_dev), GFP_KERNEL);
++	if (!dma_dev)
++		return -ENOMEM;
++
++	dma_sh_desc = kzalloc(sizeof(*dma_sh_desc), GFP_KERNEL | GFP_DMA);
++	if (!dma_sh_desc)
++		goto desc_err;
++
++	sh_desc = dma_sh_desc->desc;
++	set_caam_dma_desc(sh_desc);
++	dma_sh_desc->desc_dma = dma_map_single(ctrldev, sh_desc,
++					       desc_bytes(sh_desc),
++					       DMA_TO_DEVICE);
++	if (dma_mapping_error(ctrldev, dma_sh_desc->desc_dma)) {
++		dev_err(dev, "unable to map dma descriptor\n");
++		goto map_err;
++	}
++
++	INIT_LIST_HEAD(&dma_dev->channels);
++
++	bonds = caam_dma_jr_chan_bind();
++	if (!bonds) {
++		err = -ENODEV;
++		goto jr_bind_err;
++	}
++
++	dma_dev->dev = dev;
++	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
++	dma_cap_set(DMA_SG, dma_dev->cap_mask);
++	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
++	dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
++	dma_dev->device_tx_status = dma_cookie_status;
++	dma_dev->device_issue_pending = caam_dma_issue_pending;
++	dma_dev->device_prep_dma_sg = caam_dma_prep_sg;
++	dma_dev->device_prep_dma_memcpy = caam_dma_prep_memcpy;
++	dma_dev->device_free_chan_resources = caam_dma_free_chan_resources;
++
++	err = dma_async_device_register(dma_dev);
++	if (err) {
++		dev_err(dev, "Failed to register CAAM DMA engine\n");
++		goto jr_bind_err;
++	}
++
++	dev_info(dev, "caam dma support with %d job rings\n", bonds);
++
++	return err;
++
++jr_bind_err:
++	list_for_each_entry_safe(chan, _chan, &dma_dev->channels, device_node)
++		caam_jr_dma_free(chan);
++
++	dma_unmap_single(ctrldev, dma_sh_desc->desc_dma, desc_bytes(sh_desc),
++			 DMA_TO_DEVICE);
++map_err:
++	kfree(dma_sh_desc);
++desc_err:
++	kfree(dma_dev);
++	return err;
++}
++
++static int caam_dma_remove(struct platform_device *pdev)
++{
++	struct device *dev = &pdev->dev;
++	struct device *ctrldev = dev->parent;
++	struct caam_dma_ctx *ctx, *_ctx;
++
++	dma_async_device_unregister(dma_dev);
++
++	list_for_each_entry_safe(ctx, _ctx, &dma_ctx_list, node) {
++		list_del(&ctx->node);
++		caam_jr_free(ctx->jrdev);
++		kfree(ctx);
++	}
++
++	dma_unmap_single(ctrldev, dma_sh_desc->desc_dma,
++			 desc_bytes(dma_sh_desc->desc), DMA_TO_DEVICE);
++
++	kfree(dma_sh_desc);
++	kfree(dma_dev);
++
++	dev_info(dev, "caam dma support disabled\n");
++	return 0;
++}
++
++static const struct of_device_id caam_dma_match[] = {
++	{ .compatible = "fsl,sec-v5.4-dma", },
++	{ .compatible = "fsl,sec-v5.0-dma", },
++	{ .compatible = "fsl,sec-v4.0-dma", },
++	{},
++};
++MODULE_DEVICE_TABLE(of, caam_dma_match);
++
++static struct platform_driver caam_dma_driver = {
++	.driver = {
++		.name = "caam-dma",
++		.of_match_table = caam_dma_match,
++	},
++	.probe  = caam_dma_probe,
++	.remove = caam_dma_remove,
++};
++module_platform_driver(caam_dma_driver);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("NXP CAAM support for SG DMA");
++MODULE_AUTHOR("NXP Semiconductors");
+diff --git a/drivers/dma/dpaa2-qdma/Kconfig b/drivers/dma/dpaa2-qdma/Kconfig
+new file mode 100644
+index 00000000..084e34bf
 --- /dev/null
 +++ b/drivers/dma/dpaa2-qdma/Kconfig
 @@ -0,0 +1,8 @@
@@ -73,6 +683,9 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +	---help---
 +	  NXP Data Path Acceleration Architecture 2 QDMA driver,
 +	  using the NXP MC bus driver.
+diff --git a/drivers/dma/dpaa2-qdma/Makefile b/drivers/dma/dpaa2-qdma/Makefile
+new file mode 100644
+index 00000000..ba599ac6
 --- /dev/null
 +++ b/drivers/dma/dpaa2-qdma/Makefile
 @@ -0,0 +1,8 @@
@@ -84,6 +697,9 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma.o
 +
 +fsl-dpaa2-qdma-objs    := dpaa2-qdma.o dpdmai.o
+diff --git a/drivers/dma/dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
+new file mode 100644
+index 00000000..ad6b03f7
 --- /dev/null
 +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c
 @@ -0,0 +1,986 @@
@@ -1073,6 +1689,9 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +
 +MODULE_DESCRIPTION("NXP DPAA2 qDMA driver");
 +MODULE_LICENSE("Dual BSD/GPL");
+diff --git a/drivers/dma/dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
+new file mode 100644
+index 00000000..71a00db8
 --- /dev/null
 +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h
 @@ -0,0 +1,262 @@
@@ -1338,6 +1957,9 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +#define SG_POOL_SIZE (sizeof(struct qdma_sg_blk) +\
 +		sizeof(struct dpaa2_qdma_sg) * NUM_SG_PER_BLK)
 +#endif /* __DPAA2_QDMA_H */
+diff --git a/drivers/dma/dpaa2-qdma/dpdmai.c b/drivers/dma/dpaa2-qdma/dpdmai.c
+new file mode 100644
+index 00000000..ad13fc1e
 --- /dev/null
 +++ b/drivers/dma/dpaa2-qdma/dpdmai.c
 @@ -0,0 +1,454 @@
@@ -1795,6 +2417,9 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +
 +	return 0;
 +}
+diff --git a/drivers/dma/dpaa2-qdma/fsl_dpdmai.h b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h
+new file mode 100644
+index 00000000..e931ce16
 --- /dev/null
 +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h
 @@ -0,0 +1,521 @@
@@ -2319,6 +2944,9 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +			struct dpdmai_tx_queue_attr	*attr);
 +
 +#endif /* __FSL_DPDMAI_H */
+diff --git a/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
+new file mode 100644
+index 00000000..7d403c01
 --- /dev/null
 +++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h
 @@ -0,0 +1,222 @@
@@ -2544,6 +3172,9 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +	MC_RSP_OP(cmd, 1, 0,  32, uint32_t,  attr->fqid)
 +
 +#endif /* _FSL_DPDMAI_CMD_H */
+diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
+new file mode 100644
+index 00000000..6c4c2813
 --- /dev/null
 +++ b/drivers/dma/fsl-qdma.c
 @@ -0,0 +1,1201 @@
@@ -3748,3 +4379,6 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +MODULE_ALIAS("platform:fsl-qdma");
 +MODULE_DESCRIPTION("Freescale qDMA engine driver");
 +MODULE_LICENSE("GPL v2");
+-- 
+2.14.1
+

+ 99 - 10
target/linux/layerscape/patches-4.9/806-flextimer-support-layerscape.patch

@@ -1,4 +1,4 @@
-From a5b3155b532289af793c26251cb087b4a24d5c15 Mon Sep 17 00:00:00 2001
+From 76cd2ef6b69b67c09480a3248f7b910897f0bb2f Mon Sep 17 00:00:00 2001
 From: Yangbo Lu <[email protected]>
 Date: Mon, 25 Sep 2017 12:13:12 +0800
 Subject: [PATCH] flextimer: support layerscape
@@ -10,13 +10,15 @@ Signed-off-by: Meng Yi <[email protected]>
 Signed-off-by: Yangbo Lu <[email protected]>
 ---
  drivers/clocksource/fsl_ftm_timer.c    |   8 +-
- drivers/soc/fsl/layerscape/ftm_alarm.c | 286 +++++++++++++++++++++++++++++++++
- 2 files changed, 290 insertions(+), 4 deletions(-)
+ drivers/soc/fsl/layerscape/ftm_alarm.c | 367 +++++++++++++++++++++++++++++++++
+ 2 files changed, 371 insertions(+), 4 deletions(-)
  create mode 100644 drivers/soc/fsl/layerscape/ftm_alarm.c
 
+diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
+index 738515b8..770bbbca 100644
 --- a/drivers/clocksource/fsl_ftm_timer.c
 +++ b/drivers/clocksource/fsl_ftm_timer.c
-@@ -83,11 +83,11 @@ static inline void ftm_counter_disable(v
+@@ -83,11 +83,11 @@ static inline void ftm_counter_disable(void __iomem *base)
  
  static inline void ftm_irq_acknowledge(void __iomem *base)
  {
@@ -32,9 +34,12 @@ Signed-off-by: Yangbo Lu <[email protected]>
  }
  
  static inline void ftm_irq_enable(void __iomem *base)
+diff --git a/drivers/soc/fsl/layerscape/ftm_alarm.c b/drivers/soc/fsl/layerscape/ftm_alarm.c
+new file mode 100644
+index 00000000..49865b0b
 --- /dev/null
 +++ b/drivers/soc/fsl/layerscape/ftm_alarm.c
-@@ -0,0 +1,286 @@
+@@ -0,0 +1,367 @@
 +/*
 + * Freescale FlexTimer Module (FTM) Alarm driver.
 + *
@@ -53,6 +58,10 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +#include <linux/of_address.h>
 +#include <linux/of_irq.h>
 +#include <linux/platform_device.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/libata.h>
++#include <linux/module.h>
 +
 +#define FTM_SC			0x00
 +#define FTM_SC_CLK_SHIFT	3
@@ -77,6 +86,57 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +static u32 alarm_freq;
 +static bool big_endian;
 +
++enum pmu_endian_type {
++	BIG_ENDIAN,
++	LITTLE_ENDIAN,
++};
++
++struct rcpm_cfg {
++	enum pmu_endian_type big_endian;	/* Big/Little endian of PMU module */
++	u32 flextimer_set_bit;	/* FlexTimer1 is not powerdown during device LPM20 */
++};
++
++static struct rcpm_cfg ls1012a_rcpm_cfg = {
++	.big_endian = BIG_ENDIAN,
++	.flextimer_set_bit = 0x20000,
++};
++
++static struct rcpm_cfg ls1021a_rcpm_cfg = {
++	.big_endian = BIG_ENDIAN,
++	.flextimer_set_bit = 0x20000,
++};
++
++static struct rcpm_cfg ls1043a_rcpm_cfg = {
++	.big_endian = BIG_ENDIAN,
++	.flextimer_set_bit = 0x20000,
++};
++
++static struct rcpm_cfg ls1046a_rcpm_cfg = {
++	.big_endian = BIG_ENDIAN,
++	.flextimer_set_bit = 0x20000,
++};
++
++static struct rcpm_cfg ls1088a_rcpm_cfg = {
++	.big_endian = LITTLE_ENDIAN,
++	.flextimer_set_bit = 0x4000,
++};
++
++static struct rcpm_cfg ls208xa_rcpm_cfg = {
++	.big_endian = LITTLE_ENDIAN,
++	.flextimer_set_bit = 0x4000,
++};
++
++static const struct of_device_id ippdexpcr_of_match[] = {
++	{ .compatible = "fsl,ls1012a-ftm", .data = &ls1012a_rcpm_cfg},
++	{ .compatible = "fsl,ls1021a-ftm", .data = &ls1021a_rcpm_cfg},
++	{ .compatible = "fsl,ls1043a-ftm", .data = &ls1043a_rcpm_cfg},
++	{ .compatible = "fsl,ls1046a-ftm", .data = &ls1046a_rcpm_cfg},
++	{ .compatible = "fsl,ls1088a-ftm", .data = &ls1088a_rcpm_cfg},
++	{ .compatible = "fsl,ls208xa-ftm", .data = &ls208xa_rcpm_cfg},
++	{},
++};
++MODULE_DEVICE_TABLE(of, ippdexpcr_of_match);
++
 +static inline u32 ftm_readl(void __iomem *addr)
 +{
 +	if (big_endian)
@@ -251,7 +311,10 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +	struct resource *r;
 +	int irq;
 +	int ret;
-+	u32 ippdexpcr;
++	struct rcpm_cfg *rcpm_cfg;
++	u32 ippdexpcr, flextimer;
++	const struct of_device_id *of_id;
++	enum pmu_endian_type endian;
 +
 +	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 +	if (!r)
@@ -261,14 +324,32 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +	if (IS_ERR(ftm1_base))
 +		return PTR_ERR(ftm1_base);
 +
++	of_id = of_match_node(ippdexpcr_of_match, np);
++	if (!of_id)
++		return -ENODEV;
++
++	rcpm_cfg = devm_kzalloc(&pdev->dev, sizeof(*rcpm_cfg), GFP_KERNEL);
++	if (!rcpm_cfg)
++		return -ENOMEM;
++
++	rcpm_cfg = (struct rcpm_cfg*)of_id->data;
++	endian = rcpm_cfg->big_endian;
++	flextimer = rcpm_cfg->flextimer_set_bit;
++
 +	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "FlexTimer1");
 +	if (r) {
 +		rcpm_ftm_addr = devm_ioremap_resource(&pdev->dev, r);
 +		if (IS_ERR(rcpm_ftm_addr))
 +			return PTR_ERR(rcpm_ftm_addr);
-+		ippdexpcr = ioread32be(rcpm_ftm_addr);
-+		ippdexpcr |= 0x20000;
-+		iowrite32be(ippdexpcr, rcpm_ftm_addr);
++		if (endian == BIG_ENDIAN)
++			ippdexpcr = ioread32be(rcpm_ftm_addr);
++		else
++			ippdexpcr = ioread32(rcpm_ftm_addr);
++		ippdexpcr |= flextimer;
++		if (endian == BIG_ENDIAN)
++			iowrite32be(ippdexpcr, rcpm_ftm_addr);
++		else
++			iowrite32(ippdexpcr, rcpm_ftm_addr);
 +	}
 +
 +	irq = irq_of_parse_and_map(np, 0);
@@ -302,7 +383,12 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +}
 +
 +static const struct of_device_id ftm_alarm_match[] = {
-+	{ .compatible = "fsl,ftm-alarm", },
++	{ .compatible = "fsl,ls1012a-ftm", },
++	{ .compatible = "fsl,ls1021a-ftm", },
++	{ .compatible = "fsl,ls1043a-ftm", },
++	{ .compatible = "fsl,ls1046a-ftm", },
++	{ .compatible = "fsl,ls1088a-ftm", },
++	{ .compatible = "fsl,ls208xa-ftm", },
 +	{ .compatible = "fsl,ftm-timer", },
 +	{ },
 +};
@@ -321,3 +407,6 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +	return platform_driver_register(&ftm_alarm_driver);
 +}
 +device_initcall(ftm_alarm_init);
+-- 
+2.14.1
+

+ 408 - 53
target/linux/layerscape/patches-4.9/810-iommu-support-layerscape.patch

@@ -1,4 +1,4 @@
-From f1874c71c855bd8ca8478a622053276f2c61eeca Mon Sep 17 00:00:00 2001
+From 152f316e7829f6aeb3a36009e7e5ec0f1d97d770 Mon Sep 17 00:00:00 2001
 From: Yangbo Lu <[email protected]>
 Date: Wed, 27 Sep 2017 10:33:26 +0800
 Subject: [PATCH] iommu: support layerscape
@@ -12,20 +12,22 @@ Signed-off-by: Sunil Goutham <[email protected]>
 Signed-off-by: Yangbo Lu <[email protected]>
 ---
  drivers/iommu/amd_iommu.c    |  56 ++++++----
- drivers/iommu/arm-smmu-v3.c  |  35 ++++++-
- drivers/iommu/arm-smmu.c     |  74 ++++++++++---
+ drivers/iommu/arm-smmu-v3.c  | 117 ++++++++++++++-------
+ drivers/iommu/arm-smmu.c     | 100 +++++++++++++++---
  drivers/iommu/dma-iommu.c    | 242 ++++++++++++++++++++++++++++++++++++-------
  drivers/iommu/intel-iommu.c  |  92 ++++++++++++----
- drivers/iommu/iommu.c        | 191 ++++++++++++++++++++++++++++++++--
+ drivers/iommu/iommu.c        | 219 ++++++++++++++++++++++++++++++++++++---
  drivers/iommu/mtk_iommu.c    |   2 +
  drivers/iommu/mtk_iommu_v1.c |   2 +
  include/linux/dma-iommu.h    |  11 ++
  include/linux/iommu.h        |  55 +++++++---
- 10 files changed, 645 insertions(+), 115 deletions(-)
+ 10 files changed, 739 insertions(+), 157 deletions(-)
 
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index c380b7e8..93199931 100644
 --- a/drivers/iommu/amd_iommu.c
 +++ b/drivers/iommu/amd_iommu.c
-@@ -373,6 +373,8 @@ static struct iommu_group *acpihid_devic
+@@ -373,6 +373,8 @@ static struct iommu_group *acpihid_device_group(struct device *dev)
  
  	if (!entry->group)
  		entry->group = generic_device_group(dev);
@@ -34,7 +36,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	return entry->group;
  }
-@@ -3160,9 +3162,10 @@ static bool amd_iommu_capable(enum iommu
+@@ -3159,9 +3161,10 @@ static bool amd_iommu_capable(enum iommu_cap cap)
  	return false;
  }
  
@@ -47,7 +49,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	struct unity_map_entry *entry;
  	int devid;
  
-@@ -3171,41 +3174,56 @@ static void amd_iommu_get_dm_regions(str
+@@ -3170,41 +3173,56 @@ static void amd_iommu_get_dm_regions(struct device *dev,
  		return;
  
  	list_for_each_entry(entry, &amd_iommu_unity_map, list) {
@@ -118,7 +120,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  {
  	struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
  	unsigned long start, end;
-@@ -3229,9 +3247,9 @@ static const struct iommu_ops amd_iommu_
+@@ -3228,9 +3246,9 @@ static const struct iommu_ops amd_iommu_ops = {
  	.add_device = amd_iommu_add_device,
  	.remove_device = amd_iommu_remove_device,
  	.device_group = amd_iommu_device_group,
@@ -131,6 +133,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	.pgsize_bitmap	= AMD_IOMMU_PGSIZES,
  };
  
+diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
+index e6f9b2d7..48e2a7c4 100644
 --- a/drivers/iommu/arm-smmu-v3.c
 +++ b/drivers/iommu/arm-smmu-v3.c
 @@ -410,6 +410,9 @@
@@ -143,7 +147,92 @@ Signed-off-by: Yangbo Lu <[email protected]>
  static bool disable_bypass;
  module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
  MODULE_PARM_DESC(disable_bypass,
-@@ -1370,8 +1373,6 @@ static bool arm_smmu_capable(enum iommu_
+@@ -552,9 +555,14 @@ struct arm_smmu_s2_cfg {
+ };
+ 
+ struct arm_smmu_strtab_ent {
+-	bool				valid;
+-
+-	bool				bypass;	/* Overrides s1/s2 config */
++	/*
++	 * An STE is "assigned" if the master emitting the corresponding SID
++	 * is attached to a domain. The behaviour of an unassigned STE is
++	 * determined by the disable_bypass parameter, whereas an assigned
++	 * STE behaves according to s1_cfg/s2_cfg, which themselves are
++	 * configured according to the domain type.
++	 */
++	bool				assigned;
+ 	struct arm_smmu_s1_cfg		*s1_cfg;
+ 	struct arm_smmu_s2_cfg		*s2_cfg;
+ };
+@@ -627,6 +635,7 @@ enum arm_smmu_domain_stage {
+ 	ARM_SMMU_DOMAIN_S1 = 0,
+ 	ARM_SMMU_DOMAIN_S2,
+ 	ARM_SMMU_DOMAIN_NESTED,
++	ARM_SMMU_DOMAIN_BYPASS,
+ };
+ 
+ struct arm_smmu_domain {
+@@ -1000,9 +1009,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
+ 	 * This is hideously complicated, but we only really care about
+ 	 * three cases at the moment:
+ 	 *
+-	 * 1. Invalid (all zero) -> bypass  (init)
+-	 * 2. Bypass -> translation (attach)
+-	 * 3. Translation -> bypass (detach)
++	 * 1. Invalid (all zero) -> bypass/fault (init)
++	 * 2. Bypass/fault -> translation/bypass (attach)
++	 * 3. Translation/bypass -> bypass/fault (detach)
+ 	 *
+ 	 * Given that we can't update the STE atomically and the SMMU
+ 	 * doesn't read the thing in a defined order, that leaves us
+@@ -1040,17 +1049,16 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
+ 		}
+ 	}
+ 
+-	/* Nuke the existing Config, as we're going to rewrite it */
+-	val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
++	/* Nuke the existing STE_0 value, as we're going to rewrite it */
++	val = STRTAB_STE_0_V;
+ 
+-	if (ste->valid)
+-		val |= STRTAB_STE_0_V;
+-	else
+-		val &= ~STRTAB_STE_0_V;
++	/* Bypass/fault */
++	if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) {
++		if (!ste->assigned && disable_bypass)
++			val |= STRTAB_STE_0_CFG_ABORT;
++		else
++			val |= STRTAB_STE_0_CFG_BYPASS;
+ 
+-	if (ste->bypass) {
+-		val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
+-				      : STRTAB_STE_0_CFG_BYPASS;
+ 		dst[0] = cpu_to_le64(val);
+ 		dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
+ 			 << STRTAB_STE_1_SHCFG_SHIFT);
+@@ -1081,7 +1089,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
+ 		val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
+ 		        << STRTAB_STE_0_S1CTXPTR_SHIFT) |
+ 			STRTAB_STE_0_CFG_S1_TRANS;
+-
+ 	}
+ 
+ 	if (ste->s2_cfg) {
+@@ -1114,10 +1121,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
+ static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
+ {
+ 	unsigned int i;
+-	struct arm_smmu_strtab_ent ste = {
+-		.valid	= true,
+-		.bypass	= true,
+-	};
++	struct arm_smmu_strtab_ent ste = { .assigned = false };
+ 
+ 	for (i = 0; i < nent; ++i) {
+ 		arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
+@@ -1370,8 +1374,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
  	switch (cap) {
  	case IOMMU_CAP_CACHE_COHERENCY:
  		return true;
@@ -152,7 +241,96 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	case IOMMU_CAP_NOEXEC:
  		return true;
  	default:
-@@ -1709,6 +1710,9 @@ arm_smmu_iova_to_phys(struct iommu_domai
+@@ -1383,7 +1385,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
+ {
+ 	struct arm_smmu_domain *smmu_domain;
+ 
+-	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
++	if (type != IOMMU_DOMAIN_UNMANAGED &&
++	    type != IOMMU_DOMAIN_DMA &&
++	    type != IOMMU_DOMAIN_IDENTITY)
+ 		return NULL;
+ 
+ 	/*
+@@ -1514,6 +1518,11 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
+ 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ 	struct arm_smmu_device *smmu = smmu_domain->smmu;
+ 
++	if (domain->type == IOMMU_DOMAIN_IDENTITY) {
++		smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
++		return 0;
++	}
++
+ 	/* Restrict the stage to what we can actually support */
+ 	if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
+ 		smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
+@@ -1584,7 +1593,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
+ 	return step;
+ }
+ 
+-static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
++static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
+ {
+ 	int i;
+ 	struct arm_smmu_master_data *master = fwspec->iommu_priv;
+@@ -1596,17 +1605,14 @@ static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
+ 
+ 		arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
+ 	}
+-
+-	return 0;
+ }
+ 
+ static void arm_smmu_detach_dev(struct device *dev)
+ {
+ 	struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
+ 
+-	master->ste.bypass = true;
+-	if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0)
+-		dev_warn(dev, "failed to install bypass STE\n");
++	master->ste.assigned = false;
++	arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
+ }
+ 
+ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+@@ -1625,7 +1631,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+ 	ste = &master->ste;
+ 
+ 	/* Already attached to a different domain? */
+-	if (!ste->bypass)
++	if (ste->assigned)
+ 		arm_smmu_detach_dev(dev);
+ 
+ 	mutex_lock(&smmu_domain->init_mutex);
+@@ -1646,10 +1652,12 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+ 		goto out_unlock;
+ 	}
+ 
+-	ste->bypass = false;
+-	ste->valid = true;
++	ste->assigned = true;
+ 
+-	if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
++	if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) {
++		ste->s1_cfg = NULL;
++		ste->s2_cfg = NULL;
++	} else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ 		ste->s1_cfg = &smmu_domain->s1_cfg;
+ 		ste->s2_cfg = NULL;
+ 		arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
+@@ -1658,10 +1666,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+ 		ste->s2_cfg = &smmu_domain->s2_cfg;
+ 	}
+ 
+-	ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
+-	if (ret < 0)
+-		ste->valid = false;
+-
++	arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
+ out_unlock:
+ 	mutex_unlock(&smmu_domain->init_mutex);
+ 	return ret;
+@@ -1709,6 +1714,9 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
  	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
  
@@ -162,7 +340,36 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	if (!ops)
  		return 0;
  
-@@ -1880,6 +1884,31 @@ static int arm_smmu_of_xlate(struct devi
+@@ -1807,7 +1815,7 @@ static void arm_smmu_remove_device(struct device *dev)
+ 		return;
+ 
+ 	master = fwspec->iommu_priv;
+-	if (master && master->ste.valid)
++	if (master && master->ste.assigned)
+ 		arm_smmu_detach_dev(dev);
+ 	iommu_group_remove_device(dev);
+ 	kfree(master);
+@@ -1836,6 +1844,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
+ {
+ 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ 
++	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
++		return -EINVAL;
++
+ 	switch (attr) {
+ 	case DOMAIN_ATTR_NESTING:
+ 		*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
+@@ -1851,6 +1862,9 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
+ 	int ret = 0;
+ 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ 
++	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
++		return -EINVAL;
++
+ 	mutex_lock(&smmu_domain->init_mutex);
+ 
+ 	switch (attr) {
+@@ -1880,6 +1894,31 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
  	return iommu_fwspec_add_ids(dev, args->args, 1);
  }
  
@@ -194,7 +401,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  static struct iommu_ops arm_smmu_ops = {
  	.capable		= arm_smmu_capable,
  	.domain_alloc		= arm_smmu_domain_alloc,
-@@ -1895,6 +1924,8 @@ static struct iommu_ops arm_smmu_ops = {
+@@ -1895,6 +1934,8 @@ static struct iommu_ops arm_smmu_ops = {
  	.domain_get_attr	= arm_smmu_domain_get_attr,
  	.domain_set_attr	= arm_smmu_domain_set_attr,
  	.of_xlate		= arm_smmu_of_xlate,
@@ -203,6 +410,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
  };
  
+diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
+index 8f728144..3243a96d 100644
 --- a/drivers/iommu/arm-smmu.c
 +++ b/drivers/iommu/arm-smmu.c
 @@ -49,6 +49,7 @@
@@ -231,7 +440,65 @@ Signed-off-by: Yangbo Lu <[email protected]>
  static int force_stage;
  module_param(force_stage, int, S_IRUGO);
  MODULE_PARM_DESC(force_stage,
-@@ -1343,6 +1348,9 @@ static phys_addr_t arm_smmu_iova_to_phys
+@@ -401,6 +406,7 @@ enum arm_smmu_domain_stage {
+ 	ARM_SMMU_DOMAIN_S1 = 0,
+ 	ARM_SMMU_DOMAIN_S2,
+ 	ARM_SMMU_DOMAIN_NESTED,
++	ARM_SMMU_DOMAIN_BYPASS,
+ };
+ 
+ struct arm_smmu_domain {
+@@ -821,6 +827,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+ 	if (smmu_domain->smmu)
+ 		goto out_unlock;
+ 
++	if (domain->type == IOMMU_DOMAIN_IDENTITY) {
++		smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
++		smmu_domain->smmu = smmu;
++		goto out_unlock;
++	}
++
+ 	/*
+ 	 * Mapping the requested stage onto what we support is surprisingly
+ 	 * complicated, mainly because the spec allows S1+S2 SMMUs without
+@@ -981,7 +993,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
+ 	void __iomem *cb_base;
+ 	int irq;
+ 
+-	if (!smmu)
++	if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
+ 		return;
+ 
+ 	/*
+@@ -1004,7 +1016,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
+ {
+ 	struct arm_smmu_domain *smmu_domain;
+ 
+-	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
++	if (type != IOMMU_DOMAIN_UNMANAGED &&
++	    type != IOMMU_DOMAIN_DMA &&
++	    type != IOMMU_DOMAIN_IDENTITY)
+ 		return NULL;
+ 	/*
+ 	 * Allocate the domain and initialise some of its data structures.
+@@ -1202,10 +1216,15 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
+ {
+ 	struct arm_smmu_device *smmu = smmu_domain->smmu;
+ 	struct arm_smmu_s2cr *s2cr = smmu->s2crs;
+-	enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
+ 	u8 cbndx = smmu_domain->cfg.cbndx;
++	enum arm_smmu_s2cr_type type;
+ 	int i, idx;
+ 
++	if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
++		type = S2CR_TYPE_BYPASS;
++	else
++		type = S2CR_TYPE_TRANS;
++
+ 	for_each_cfg_sme(fwspec, i, idx) {
+ 		if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
+ 			continue;
+@@ -1343,6 +1362,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
  	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  	struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
  
@@ -241,7 +508,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	if (!ops)
  		return 0;
  
-@@ -1368,8 +1376,6 @@ static bool arm_smmu_capable(enum iommu_
+@@ -1368,8 +1390,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
  		 * requests.
  		 */
  		return true;
@@ -250,7 +517,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	case IOMMU_CAP_NOEXEC:
  		return true;
  	default:
-@@ -1478,10 +1484,12 @@ static struct iommu_group *arm_smmu_devi
+@@ -1478,10 +1498,12 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
  	}
  
  	if (group)
@@ -264,7 +531,27 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	else
  		group = generic_device_group(dev);
  
-@@ -1534,17 +1542,44 @@ out_unlock:
+@@ -1493,6 +1515,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
+ {
+ 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ 
++	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
++		return -EINVAL;
++
+ 	switch (attr) {
+ 	case DOMAIN_ATTR_NESTING:
+ 		*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
+@@ -1508,6 +1533,9 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
+ 	int ret = 0;
+ 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ 
++	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
++		return -EINVAL;
++
+ 	mutex_lock(&smmu_domain->init_mutex);
+ 
+ 	switch (attr) {
+@@ -1534,17 +1562,44 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
  
  static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
  {
@@ -310,7 +597,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  static struct iommu_ops arm_smmu_ops = {
  	.capable		= arm_smmu_capable,
  	.domain_alloc		= arm_smmu_domain_alloc,
-@@ -1560,6 +1595,8 @@ static struct iommu_ops arm_smmu_ops = {
+@@ -1560,6 +1615,8 @@ static struct iommu_ops arm_smmu_ops = {
  	.domain_get_attr	= arm_smmu_domain_get_attr,
  	.domain_set_attr	= arm_smmu_domain_set_attr,
  	.of_xlate		= arm_smmu_of_xlate,
@@ -319,7 +606,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
  };
  
-@@ -1581,16 +1618,22 @@ static void arm_smmu_device_reset(struct
+@@ -1581,16 +1638,22 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
  	for (i = 0; i < smmu->num_mapping_groups; ++i)
  		arm_smmu_write_sme(smmu, i);
  
@@ -351,7 +638,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
  	}
  
-@@ -2024,6 +2067,11 @@ static int arm_smmu_device_dt_probe(stru
+@@ -2024,6 +2087,11 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
  		bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
  	}
  #endif
@@ -363,6 +650,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	return 0;
  }
  
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index 1520e7f0..3ade4153 100644
 --- a/drivers/iommu/dma-iommu.c
 +++ b/drivers/iommu/dma-iommu.c
 @@ -37,15 +37,50 @@ struct iommu_dma_msi_page {
@@ -420,10 +709,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  }
  
  int iommu_dma_init(void)
-@@ -62,25 +97,53 @@ int iommu_dma_init(void)
+@@ -61,26 +96,54 @@ int iommu_dma_init(void)
+  * callback when domain->type == IOMMU_DOMAIN_DMA.
   */
  int iommu_get_dma_cookie(struct iommu_domain *domain)
- {
++{
 +	if (domain->iova_cookie)
 +		return -EEXIST;
 +
@@ -448,7 +738,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
 + * used by the devices attached to @domain.
 + */
 +int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
-+{
+ {
  	struct iommu_dma_cookie *cookie;
  
 +	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
@@ -479,7 +769,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
   *
   * IOMMU drivers should normally call this from their domain_free callback.
   */
-@@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_d
+@@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
  	if (!cookie)
  		return;
  
@@ -488,7 +778,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		put_iova_domain(&cookie->iovad);
  
  	list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
-@@ -104,21 +167,99 @@ void iommu_put_dma_cookie(struct iommu_d
+@@ -104,21 +167,99 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
  }
  EXPORT_SYMBOL(iommu_put_dma_cookie);
  
@@ -594,7 +884,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  }
  
  /**
-@@ -136,11 +277,12 @@ static void iova_reserve_pci_windows(str
+@@ -136,11 +277,12 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
  int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
  		u64 size, struct device *dev)
  {
@@ -610,7 +900,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	/* Use the smallest supported page size for IOVA granularity */
  	order = __ffs(domain->pgsize_bitmap);
-@@ -160,22 +302,37 @@ int iommu_dma_init_domain(struct iommu_d
+@@ -160,22 +302,37 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
  		end_pfn = min_t(unsigned long, end_pfn,
  				domain->geometry.aperture_end >> order);
  	}
@@ -657,7 +947,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  }
  EXPORT_SYMBOL(iommu_dma_init_domain);
  
-@@ -643,11 +800,12 @@ static struct iommu_dma_msi_page *iommu_
+@@ -643,11 +800,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
  {
  	struct iommu_dma_cookie *cookie = domain->iova_cookie;
  	struct iommu_dma_msi_page *msi_page;
@@ -672,7 +962,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	list_for_each_entry(msi_page, &cookie->msi_page_list, list)
  		if (msi_page->phys == msi_addr)
  			return msi_page;
-@@ -656,13 +814,18 @@ static struct iommu_dma_msi_page *iommu_
+@@ -656,13 +814,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
  	if (!msi_page)
  		return NULL;
  
@@ -697,7 +987,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		goto out_free_iova;
  
  	INIT_LIST_HEAD(&msi_page->list);
-@@ -670,7 +833,10 @@ static struct iommu_dma_msi_page *iommu_
+@@ -670,7 +833,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
  	return msi_page;
  
  out_free_iova:
@@ -709,7 +999,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  out_free_page:
  	kfree(msi_page);
  	return NULL;
-@@ -711,7 +877,7 @@ void iommu_dma_map_msi_msg(int irq, stru
+@@ -711,7 +877,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
  		msg->data = ~0U;
  	} else {
  		msg->address_hi = upper_32_bits(msi_page->iova);
@@ -718,6 +1008,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		msg->address_lo += lower_32_bits(msi_page->iova);
  	}
  }
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 002f8a42..befbfd30 100644
 --- a/drivers/iommu/intel-iommu.c
 +++ b/drivers/iommu/intel-iommu.c
 @@ -440,6 +440,7 @@ struct dmar_rmrr_unit {
@@ -728,7 +1020,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  };
  
  struct dmar_atsr_unit {
-@@ -4250,27 +4251,40 @@ static inline void init_iommu_pm_ops(voi
+@@ -4250,27 +4251,40 @@ static inline void init_iommu_pm_ops(void) {}
  int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
  {
  	struct acpi_dmar_reserved_memory *rmrr;
@@ -782,7 +1074,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		kfree(rmrru);
  	}
  
-@@ -5219,6 +5234,45 @@ static void intel_iommu_remove_device(st
+@@ -5219,6 +5234,45 @@ static void intel_iommu_remove_device(struct device *dev)
  	iommu_device_unlink(iommu->iommu_dev, dev);
  }
  
@@ -828,7 +1120,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  #ifdef CONFIG_INTEL_IOMMU_SVM
  #define MAX_NR_PASID_BITS (20)
  static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
-@@ -5349,19 +5403,21 @@ struct intel_iommu *intel_svm_device_to_
+@@ -5349,19 +5403,21 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
  #endif /* CONFIG_INTEL_IOMMU_SVM */
  
  static const struct iommu_ops intel_iommu_ops = {
@@ -863,9 +1155,19 @@ Signed-off-by: Yangbo Lu <[email protected]>
  };
  
  static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 87d3060f..e6a8c225 100644
 --- a/drivers/iommu/iommu.c
 +++ b/drivers/iommu/iommu.c
-@@ -68,6 +68,13 @@ struct iommu_group_attribute {
+@@ -36,6 +36,7 @@
+ 
+ static struct kset *iommu_group_kset;
+ static DEFINE_IDA(iommu_group_ida);
++static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
+ 
+ struct iommu_callback_data {
+ 	const struct iommu_ops *ops;
+@@ -68,6 +69,13 @@ struct iommu_group_attribute {
  			 const char *buf, size_t count);
  };
  
@@ -879,7 +1181,26 @@ Signed-off-by: Yangbo Lu <[email protected]>
  #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\
  struct iommu_group_attribute iommu_group_attr_##_name =		\
  	__ATTR(_name, _mode, _show, _store)
-@@ -133,8 +140,131 @@ static ssize_t iommu_group_show_name(str
+@@ -86,6 +94,18 @@ static int __iommu_attach_group(struct iommu_domain *domain,
+ static void __iommu_detach_group(struct iommu_domain *domain,
+ 				 struct iommu_group *group);
+ 
++static int __init iommu_set_def_domain_type(char *str)
++{
++	bool pt;
++
++	if (!str || strtobool(str, &pt))
++		return -EINVAL;
++
++	iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
++	return 0;
++}
++early_param("iommu.passthrough", iommu_set_def_domain_type);
++
+ static ssize_t iommu_group_attr_show(struct kobject *kobj,
+ 				     struct attribute *__attr, char *buf)
+ {
+@@ -133,8 +153,131 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
  	return sprintf(buf, "%s\n", group->name);
  }
  
@@ -1011,7 +1332,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  static void iommu_group_release(struct kobject *kobj)
  {
  	struct iommu_group *group = to_iommu_group(kobj);
-@@ -212,6 +342,11 @@ struct iommu_group *iommu_group_alloc(vo
+@@ -212,6 +355,11 @@ struct iommu_group *iommu_group_alloc(void)
  	 */
  	kobject_put(&group->kobj);
  
@@ -1023,7 +1344,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	pr_debug("Allocated group %d\n", group->id);
  
  	return group;
-@@ -318,7 +453,7 @@ static int iommu_group_create_direct_map
+@@ -318,7 +466,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
  					      struct device *dev)
  {
  	struct iommu_domain *domain = group->default_domain;
@@ -1032,7 +1353,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	struct list_head mappings;
  	unsigned long pg_size;
  	int ret = 0;
-@@ -331,18 +466,21 @@ static int iommu_group_create_direct_map
+@@ -331,18 +479,21 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
  	pg_size = 1UL << __ffs(domain->pgsize_bitmap);
  	INIT_LIST_HEAD(&mappings);
  
@@ -1057,7 +1378,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		for (addr = start; addr < end; addr += pg_size) {
  			phys_addr_t phys_addr;
  
-@@ -358,7 +496,7 @@ static int iommu_group_create_direct_map
+@@ -358,7 +509,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
  	}
  
  out:
@@ -1066,10 +1387,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	return ret;
  }
-@@ -563,6 +701,19 @@ struct iommu_group *iommu_group_get(stru
+@@ -562,6 +713,19 @@ struct iommu_group *iommu_group_get(struct device *dev)
+ }
  EXPORT_SYMBOL_GPL(iommu_group_get);
  
- /**
++/**
 + * iommu_group_ref_get - Increment reference on a group
 + * @group: the group to use, must not be NULL
 + *
@@ -1082,11 +1404,33 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +	return group;
 +}
 +
-+/**
+ /**
   * iommu_group_put - Decrement group reference
   * @group: the group to use
-  *
-@@ -1557,20 +1708,38 @@ int iommu_domain_set_attr(struct iommu_d
+@@ -845,10 +1009,19 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
+ 	 * IOMMU driver.
+ 	 */
+ 	if (!group->default_domain) {
+-		group->default_domain = __iommu_domain_alloc(dev->bus,
+-							     IOMMU_DOMAIN_DMA);
++		struct iommu_domain *dom;
++
++		dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
++		if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
++			dev_warn(dev,
++				 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
++				 iommu_def_domain_type);
++			dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
++		}
++
++		group->default_domain = dom;
+ 		if (!group->domain)
+-			group->domain = group->default_domain;
++			group->domain = dom;
+ 	}
+ 
+ 	ret = iommu_group_add_device(group, dev);
+@@ -1557,20 +1730,38 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
  }
  EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
  
@@ -1131,9 +1475,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  }
  
  /* Request that a device is direct mapped by the IOMMU */
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index b12c12d7..9799daea 100644
 --- a/drivers/iommu/mtk_iommu.c
 +++ b/drivers/iommu/mtk_iommu.c
-@@ -410,6 +410,8 @@ static struct iommu_group *mtk_iommu_dev
+@@ -410,6 +410,8 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
  		data->m4u_group = iommu_group_alloc();
  		if (IS_ERR(data->m4u_group))
  			dev_err(dev, "Failed to allocate M4U IOMMU group\n");
@@ -1142,9 +1488,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	}
  	return data->m4u_group;
  }
+diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
+index b8aeb076..c7063e9d 100644
 --- a/drivers/iommu/mtk_iommu_v1.c
 +++ b/drivers/iommu/mtk_iommu_v1.c
-@@ -502,6 +502,8 @@ static struct iommu_group *mtk_iommu_dev
+@@ -502,6 +502,8 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
  		data->m4u_group = iommu_group_alloc();
  		if (IS_ERR(data->m4u_group))
  			dev_err(dev, "Failed to allocate M4U IOMMU group\n");
@@ -1153,6 +1501,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	}
  	return data->m4u_group;
  }
+diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
+index 32c58906..36d3206d 100644
 --- a/include/linux/dma-iommu.h
 +++ b/include/linux/dma-iommu.h
 @@ -27,6 +27,7 @@ int iommu_dma_init(void);
@@ -1163,7 +1513,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  void iommu_put_dma_cookie(struct iommu_domain *domain);
  
  /* Setup call for arch DMA mapping code */
-@@ -66,6 +67,7 @@ int iommu_dma_mapping_error(struct devic
+@@ -66,6 +67,7 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
  
  /* The DMA API isn't _quite_ the whole story, though... */
  void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
@@ -1171,7 +1521,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  #else
  
-@@ -82,6 +84,11 @@ static inline int iommu_get_dma_cookie(s
+@@ -82,6 +84,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
  	return -ENODEV;
  }
  
@@ -1183,7 +1533,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
  {
  }
-@@ -90,6 +97,10 @@ static inline void iommu_dma_map_msi_msg
+@@ -90,6 +97,10 @@ static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
  {
  }
  
@@ -1194,6 +1544,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  #endif	/* CONFIG_IOMMU_DMA */
  #endif	/* __KERNEL__ */
  #endif	/* __DMA_IOMMU_H */
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index 436dc213..188599f5 100644
 --- a/include/linux/iommu.h
 +++ b/include/linux/iommu.h
 @@ -117,18 +117,32 @@ enum iommu_attr {
@@ -1262,7 +1614,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	/* Window handling functions */
  	int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
-@@ -233,9 +248,14 @@ extern phys_addr_t iommu_iova_to_phys(st
+@@ -233,9 +248,14 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
  extern void iommu_set_fault_handler(struct iommu_domain *domain,
  			iommu_fault_handler_t handler, void *token);
  
@@ -1279,7 +1631,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  extern int iommu_attach_group(struct iommu_domain *domain,
  			      struct iommu_group *group);
-@@ -253,6 +273,7 @@ extern void iommu_group_remove_device(st
+@@ -253,6 +273,7 @@ extern void iommu_group_remove_device(struct device *dev);
  extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
  				    int (*fn)(struct device *, void *));
  extern struct iommu_group *iommu_group_get(struct device *dev);
@@ -1287,7 +1639,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  extern void iommu_group_put(struct iommu_group *group);
  extern int iommu_group_register_notifier(struct iommu_group *group,
  					 struct notifier_block *nb);
-@@ -439,16 +460,22 @@ static inline void iommu_set_fault_handl
+@@ -439,16 +460,22 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
  {
  }
  
@@ -1312,3 +1664,6 @@ Signed-off-by: Yangbo Lu <[email protected]>
  static inline int iommu_request_dm_for_dev(struct device *dev)
  {
  	return -ENODEV;
+-- 
+2.14.1
+

+ 100 - 47
target/linux/layerscape/patches-4.9/817-usb-support-layerscape.patch

@@ -1,4 +1,4 @@
-From f8daa8e984213554008e73cd155530dceec5a109 Mon Sep 17 00:00:00 2001
+From b14460ee524a34d3b94b44032b52155c4cd708e5 Mon Sep 17 00:00:00 2001
 From: Yangbo Lu <[email protected]>
 Date: Wed, 27 Sep 2017 10:34:07 +0800
 Subject: [PATCH] usb: support layerscape
@@ -15,6 +15,7 @@ Signed-off-by: Suresh Gupta <[email protected]>
 Signed-off-by: Zhao Chenhui <[email protected]>
 Signed-off-by: Yangbo Lu <[email protected]>
 ---
+ drivers/net/usb/r8152.c               |   4 +
  drivers/usb/common/common.c           |  50 ++++++
  drivers/usb/core/hub.c                |   8 +
  drivers/usb/dwc3/core.c               | 235 ++++++++++++++++++++++++++-
@@ -32,11 +33,28 @@ Signed-off-by: Yangbo Lu <[email protected]>
  drivers/usb/phy/phy-fsl-usb.h         |   8 +
  include/linux/usb.h                   |   1 +
  include/linux/usb/of.h                |   2 +
- 17 files changed, 726 insertions(+), 73 deletions(-)
+ 18 files changed, 730 insertions(+), 73 deletions(-)
 
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index afb953a2..c9c86495 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -1812,6 +1812,10 @@ static int rx_bottom(struct r8152 *tp, int budget)
+ 			unsigned int pkt_len;
+ 			struct sk_buff *skb;
+ 
++			/* limite the skb numbers for rx_queue */
++			if (unlikely(skb_queue_len(&tp->rx_queue) >= 1000))
++				break;
++
+ 			pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
+ 			if (pkt_len < ETH_ZLEN)
+ 				break;
+diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
+index 5ef8da6e..176dee01 100644
 --- a/drivers/usb/common/common.c
 +++ b/drivers/usb/common/common.c
-@@ -105,6 +105,56 @@ static const char *const usb_dr_modes[]
+@@ -105,6 +105,56 @@ static const char *const usb_dr_modes[] = {
  	[USB_DR_MODE_OTG]		= "otg",
  };
  
@@ -93,9 +111,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  static enum usb_dr_mode usb_get_dr_mode_from_string(const char *str)
  {
  	int ret;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 80d4ef31..e23acf03 100644
 --- a/drivers/usb/core/hub.c
 +++ b/drivers/usb/core/hub.c
-@@ -4412,6 +4412,14 @@ hub_port_init(struct usb_hub *hub, struc
+@@ -4412,6 +4412,14 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
  	else
  		speed = usb_speed_string(udev->speed);
  
@@ -110,9 +130,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	if (udev->speed < USB_SPEED_SUPER)
  		dev_info(&udev->dev,
  				"%s %s USB device number %d using %s\n",
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index fea44690..e34ef90a 100644
 --- a/drivers/usb/dwc3/core.c
 +++ b/drivers/usb/dwc3/core.c
-@@ -58,6 +58,7 @@ static int dwc3_get_dr_mode(struct dwc3
+@@ -58,6 +58,7 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
  	enum usb_dr_mode mode;
  	struct device *dev = dwc->dev;
  	unsigned int hw_mode;
@@ -120,7 +142,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
  		dwc->dr_mode = USB_DR_MODE_OTG;
-@@ -83,6 +84,24 @@ static int dwc3_get_dr_mode(struct dwc3
+@@ -83,6 +84,24 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc)
  		mode = USB_DR_MODE_HOST;
  		break;
  	default:
@@ -145,7 +167,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
  			mode = USB_DR_MODE_HOST;
  		else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
-@@ -213,8 +232,9 @@ static void dwc3_frame_length_adjustment
+@@ -213,8 +232,9 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
  
  	reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
  	dft = reg & DWC3_GFLADJ_30MHZ_MASK;
@@ -157,7 +179,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		reg &= ~DWC3_GFLADJ_30MHZ_MASK;
  		reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
  		dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
-@@ -579,6 +599,99 @@ static int dwc3_phy_setup(struct dwc3 *d
+@@ -579,6 +599,99 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
  	return 0;
  }
  
@@ -257,7 +279,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  static void dwc3_core_exit(struct dwc3 *dwc)
  {
  	dwc3_event_buffers_cleanup(dwc);
-@@ -721,6 +834,8 @@ static int dwc3_core_init(struct dwc3 *d
+@@ -721,6 +834,8 @@ static int dwc3_core_init(struct dwc3 *dwc)
  	if (ret)
  		goto err1;
  
@@ -266,7 +288,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	/* Adjust Frame Length */
  	dwc3_frame_length_adjustment(dwc);
  
-@@ -919,11 +1034,109 @@ static void dwc3_core_exit_mode(struct d
+@@ -919,11 +1034,109 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
  	}
  }
  
@@ -376,7 +398,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	struct resource		*res;
  	struct dwc3		*dwc;
  	u8			lpm_nyet_threshold;
-@@ -955,6 +1168,11 @@ static int dwc3_probe(struct platform_de
+@@ -955,6 +1168,11 @@ static int dwc3_probe(struct platform_device *pdev)
  	dwc->xhci_resources[0].flags = res->flags;
  	dwc->xhci_resources[0].name = res->name;
  
@@ -388,7 +410,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	res->start += DWC3_GLOBALS_REGS_START;
  
  	/*
-@@ -997,6 +1215,12 @@ static int dwc3_probe(struct platform_de
+@@ -997,6 +1215,12 @@ static int dwc3_probe(struct platform_device *pdev)
  	dwc->usb3_lpm_capable = device_property_read_bool(dev,
  				"snps,usb3_lpm_capable");
  
@@ -401,7 +423,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	dwc->disable_scramble_quirk = device_property_read_bool(dev,
  				"snps,disable_scramble_quirk");
  	dwc->u2exit_lfps_quirk = device_property_read_bool(dev,
-@@ -1041,6 +1265,8 @@ static int dwc3_probe(struct platform_de
+@@ -1041,6 +1265,8 @@ static int dwc3_probe(struct platform_device *pdev)
  	dwc->hird_threshold = hird_threshold
  		| (dwc->is_utmi_l1_suspend << 4);
  
@@ -410,7 +432,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	platform_set_drvdata(pdev, dwc);
  	dwc3_cache_hwparams(dwc);
  
-@@ -1064,6 +1290,11 @@ static int dwc3_probe(struct platform_de
+@@ -1064,6 +1290,11 @@ static int dwc3_probe(struct platform_device *pdev)
  	if (ret < 0)
  		goto err1;
  
@@ -422,6 +444,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	pm_runtime_forbid(dev);
  
  	ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 884c4371..9151eef4 100644
 --- a/drivers/usb/dwc3/core.h
 +++ b/drivers/usb/dwc3/core.h
 @@ -26,6 +26,7 @@
@@ -539,6 +563,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  };
  
  /* -------------------------------------------------------------------------- */
+diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
+index 626d87d5..f1b98273 100644
 --- a/drivers/usb/dwc3/host.c
 +++ b/drivers/usb/dwc3/host.c
 @@ -17,6 +17,8 @@
@@ -574,6 +600,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	dwc->xhci = xhci;
  
  	ret = platform_device_add_resources(xhci, dwc->xhci_resources,
+diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
+index aac0ce8a..fe49e758 100644
 --- a/drivers/usb/gadget/udc/fsl_udc_core.c
 +++ b/drivers/usb/gadget/udc/fsl_udc_core.c
 @@ -198,7 +198,11 @@ __acquires(ep->udc->lock)
@@ -589,7 +617,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	spin_lock(&ep->udc->lock);
  	ep->stopped = stopped;
-@@ -245,10 +249,10 @@ static int dr_controller_setup(struct fs
+@@ -245,10 +249,10 @@ static int dr_controller_setup(struct fsl_udc *udc)
  		if (udc->pdata->have_sysif_regs) {
  			if (udc->pdata->controller_ver) {
  				/* controller version 1.6 or above */
@@ -602,7 +630,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  			}
  		}
  		portctrl |= PORTSCX_PTS_ULPI;
-@@ -257,13 +261,14 @@ static int dr_controller_setup(struct fs
+@@ -257,13 +261,14 @@ static int dr_controller_setup(struct fsl_udc *udc)
  		portctrl |= PORTSCX_PTW_16BIT;
  		/* fall through */
  	case FSL_USB2_PHY_UTMI:
@@ -619,7 +647,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  				mdelay(FSL_UTMI_PHY_DLY); /* Delay for UTMI
  					PHY CLK to become stable - 10ms*/
  			}
-@@ -329,22 +334,22 @@ static int dr_controller_setup(struct fs
+@@ -329,22 +334,22 @@ static int dr_controller_setup(struct fsl_udc *udc)
  	/* Config control enable i/o output, cpu endian register */
  #ifndef CONFIG_ARCH_MXC
  	if (udc->pdata->have_sysif_regs) {
@@ -647,7 +675,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	}
  #endif
  
-@@ -1057,7 +1062,7 @@ static int fsl_ep_fifo_status(struct usb
+@@ -1057,7 +1062,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
  	struct ep_queue_head *qh;
  
  	ep = container_of(_ep, struct fsl_ep, ep);
@@ -656,7 +684,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		return -ENODEV;
  
  	udc = (struct fsl_udc *)ep->udc;
-@@ -1599,14 +1604,13 @@ static int process_ep_req(struct fsl_udc
+@@ -1599,14 +1604,13 @@ static int process_ep_req(struct fsl_udc *udc, int pipe,
  		struct fsl_req *curr_req)
  {
  	struct ep_td_struct *curr_td;
@@ -672,7 +700,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	actual = curr_req->req.length;
  
  	for (j = 0; j < curr_req->dtd_count; j++) {
-@@ -1651,11 +1655,9 @@ static int process_ep_req(struct fsl_udc
+@@ -1651,11 +1655,9 @@ static int process_ep_req(struct fsl_udc *udc, int pipe,
  				status = -EPROTO;
  				break;
  			} else {
@@ -684,7 +712,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  			VDBG("dTD transmitted successful");
  		}
  
-@@ -1698,7 +1700,7 @@ static void dtd_complete_irq(struct fsl_
+@@ -1698,7 +1700,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)
  		curr_ep = get_ep_by_pipe(udc, i);
  
  		/* If the ep is configured */
@@ -693,7 +721,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  			WARNING("Invalid EP?");
  			continue;
  		}
-@@ -2420,10 +2422,12 @@ static int fsl_udc_probe(struct platform
+@@ -2420,10 +2422,12 @@ static int fsl_udc_probe(struct platform_device *pdev)
  		usb_sys_regs = (void *)dr_regs + USB_DR_SYS_OFFSET;
  #endif
  
@@ -706,7 +734,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	/* Read Device Controller Capability Parameters register */
  	dccparams = fsl_readl(&dr_regs->dccparams);
-@@ -2463,9 +2467,11 @@ static int fsl_udc_probe(struct platform
+@@ -2463,9 +2467,11 @@ static int fsl_udc_probe(struct platform_device *pdev)
  		dr_controller_setup(udc_controller);
  	}
  
@@ -718,7 +746,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	/* Setup gadget structure */
  	udc_controller->gadget.ops = &fsl_gadget_ops;
-@@ -2478,6 +2484,7 @@ static int fsl_udc_probe(struct platform
+@@ -2478,6 +2484,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
  	/* Setup gadget.dev and register with kernel */
  	dev_set_name(&udc_controller->gadget.dev, "gadget");
  	udc_controller->gadget.dev.of_node = pdev->dev.of_node;
@@ -726,7 +754,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	if (!IS_ERR_OR_NULL(udc_controller->transceiver))
  		udc_controller->gadget.is_otg = 1;
-@@ -2529,7 +2536,9 @@ err_free_irq:
+@@ -2529,7 +2536,9 @@ static int fsl_udc_probe(struct platform_device *pdev)
  err_iounmap:
  	if (pdata->exit)
  		pdata->exit(pdev);
@@ -736,7 +764,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  err_iounmap_noclk:
  	iounmap(dr_regs);
  err_release_mem_region:
-@@ -2557,8 +2566,9 @@ static int fsl_udc_remove(struct platfor
+@@ -2557,8 +2566,9 @@ static int fsl_udc_remove(struct platform_device *pdev)
  	udc_controller->done = &done;
  	usb_del_gadget_udc(&udc_controller->gadget);
  
@@ -747,7 +775,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	/* DR has been stopped in usb_gadget_unregister_driver() */
  	remove_proc_file();
  
-@@ -2570,7 +2580,7 @@ static int fsl_udc_remove(struct platfor
+@@ -2570,7 +2580,7 @@ static int fsl_udc_remove(struct platform_device *pdev)
  	dma_pool_destroy(udc_controller->td_pool);
  	free_irq(udc_controller->irq, udc_controller);
  	iounmap(dr_regs);
@@ -756,6 +784,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		release_mem_region(res->start, resource_size(res));
  
  	/* free udc --wait for the release() finished */
+diff --git a/drivers/usb/gadget/udc/fsl_usb2_udc.h b/drivers/usb/gadget/udc/fsl_usb2_udc.h
+index 84715625..f76c4ddd 100644
 --- a/drivers/usb/gadget/udc/fsl_usb2_udc.h
 +++ b/drivers/usb/gadget/udc/fsl_usb2_udc.h
 @@ -20,6 +20,10 @@
@@ -788,6 +818,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  #endif
  
  #endif
+diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
+index 0b80cee3..a57d95c3 100644
 --- a/drivers/usb/host/Kconfig
 +++ b/drivers/usb/host/Kconfig
 @@ -165,7 +165,7 @@ config XPS_USB_HCD_XILINX
@@ -799,6 +831,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	select USB_EHCI_ROOT_HUB_TT
  	---help---
  	  Variation of ARC USB block used in some Freescale chips.
+diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
+index 9f5ffb62..cd16860c 100644
 --- a/drivers/usb/host/ehci-fsl.c
 +++ b/drivers/usb/host/ehci-fsl.c
 @@ -37,13 +37,141 @@
@@ -943,7 +977,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  /* configure so an HC device and id are always provided */
  /* always called with process context; sleeping is OK */
-@@ -131,6 +259,12 @@ static int fsl_ehci_drv_probe(struct pla
+@@ -131,6 +259,12 @@ static int fsl_ehci_drv_probe(struct platform_device *pdev)
  		clrsetbits_be32(hcd->regs + FSL_SOC_USB_CTRL,
  				CONTROL_REGISTER_W1C_MASK, 0x4);
  
@@ -956,7 +990,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	/*
  	 * Enable UTMI phy and program PTS field in UTMI mode before asserting
  	 * controller reset for USB Controller version 2.5
-@@ -143,16 +277,20 @@ static int fsl_ehci_drv_probe(struct pla
+@@ -143,16 +277,20 @@ static int fsl_ehci_drv_probe(struct platform_device *pdev)
  
  	/* Don't need to set host mode here. It will be done by tdi_reset() */
  
@@ -979,7 +1013,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		dev_dbg(&pdev->dev, "hcd=0x%p  ehci=0x%p, phy=0x%p\n",
  			hcd, ehci, hcd->usb_phy);
  
-@@ -168,6 +306,11 @@ static int fsl_ehci_drv_probe(struct pla
+@@ -168,6 +306,11 @@ static int fsl_ehci_drv_probe(struct platform_device *pdev)
  			retval = -ENODEV;
  			goto err2;
  		}
@@ -991,7 +1025,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	}
  #endif
  	return retval;
-@@ -181,6 +324,18 @@ static int fsl_ehci_drv_probe(struct pla
+@@ -181,6 +324,18 @@ static int fsl_ehci_drv_probe(struct platform_device *pdev)
  	return retval;
  }
  
@@ -1010,7 +1044,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
  			       enum fsl_usb2_phy_modes phy_mode,
  			       unsigned int port_offset)
-@@ -219,6 +374,21 @@ static int ehci_fsl_setup_phy(struct usb
+@@ -219,6 +374,21 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
  		/* fall through */
  	case FSL_USB2_PHY_UTMI:
  	case FSL_USB2_PHY_UTMI_DUAL:
@@ -1032,7 +1066,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		if (pdata->have_sysif_regs && pdata->controller_ver) {
  			/* controller version 1.6 or above */
  			clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
-@@ -292,14 +462,9 @@ static int ehci_fsl_usb_setup(struct ehc
+@@ -292,14 +462,9 @@ static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
  			return -EINVAL;
  
  	if (pdata->operating_mode == FSL_USB2_MPH_HOST) {
@@ -1048,7 +1082,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  			ehci->has_fsl_port_bug = 1;
  
  		if (pdata->port_enables & FSL_USB2_PORT0_ENABLED)
-@@ -379,16 +544,57 @@ static int ehci_fsl_setup(struct usb_hcd
+@@ -379,16 +544,57 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
  	return retval;
  }
  
@@ -1113,7 +1147,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  #ifdef CONFIG_PPC_MPC512x
  static int ehci_fsl_mpc512x_drv_suspend(struct device *dev)
-@@ -535,26 +741,43 @@ static inline int ehci_fsl_mpc512x_drv_r
+@@ -535,26 +741,43 @@ static inline int ehci_fsl_mpc512x_drv_resume(struct device *dev)
  }
  #endif /* CONFIG_PPC_MPC512x */
  
@@ -1164,7 +1198,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	if (!fsl_deep_sleep())
  		return 0;
  
-@@ -568,12 +791,34 @@ static int ehci_fsl_drv_resume(struct de
+@@ -568,12 +791,34 @@ static int ehci_fsl_drv_resume(struct device *dev)
  	struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
  	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
  	void __iomem *non_ehci = hcd->regs;
@@ -1199,6 +1233,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	ehci_prepare_ports_for_controller_resume(ehci);
  	if (!fsl_deep_sleep())
  		return 0;
+diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
+index 1a8a60a5..42ea2976 100644
 --- a/drivers/usb/host/ehci-fsl.h
 +++ b/drivers/usb/host/ehci-fsl.h
 @@ -63,4 +63,7 @@
@@ -1209,9 +1245,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
 +/* Retry count for checking UTMI PHY CLK validity */
 +#define UTMI_PHY_CLK_VALID_CHK_RETRY 5
  #endif				/* _EHCI_FSL_H */
+diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
+index 255acca8..c8838c33 100644
 --- a/drivers/usb/host/ehci-hub.c
 +++ b/drivers/usb/host/ehci-hub.c
-@@ -305,6 +305,8 @@ static int ehci_bus_suspend (struct usb_
+@@ -305,6 +305,8 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
  						USB_PORT_STAT_HIGH_SPEED)
  				fs_idle_delay = true;
  			ehci_writel(ehci, t2, reg);
@@ -1220,9 +1258,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  			changed = 1;
  		}
  	}
+diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
+index 3b06bb77..f296d1fb 100644
 --- a/drivers/usb/host/ehci.h
 +++ b/drivers/usb/host/ehci.h
-@@ -180,6 +180,9 @@ struct ehci_hcd {			/* one per controlle
+@@ -180,6 +180,9 @@ struct ehci_hcd {			/* one per controller */
  	unsigned		periodic_count;	/* periodic activity count */
  	unsigned		uframe_periodic_max; /* max periodic time per uframe */
  
@@ -1232,7 +1272,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	/* list of itds & sitds completed while now_frame was still active */
  	struct list_head	cached_itd_list;
-@@ -706,8 +709,10 @@ ehci_port_speed(struct ehci_hcd *ehci, u
+@@ -706,8 +709,10 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
   * incoming packets get corrupted in HS mode
   */
  #define ehci_has_fsl_hs_errata(e)	((e)->has_fsl_hs_errata)
@@ -1243,9 +1283,11 @@ Signed-off-by: Yangbo Lu <[email protected]>
  #endif
  
  /*
+diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
+index f07ccb25..1e59ea9f 100644
 --- a/drivers/usb/host/fsl-mph-dr-of.c
 +++ b/drivers/usb/host/fsl-mph-dr-of.c
-@@ -226,6 +226,18 @@ static int fsl_usb2_mph_dr_of_probe(stru
+@@ -226,6 +226,18 @@ static int fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev)
  		of_property_read_bool(np, "fsl,usb-erratum-a007792");
  	pdata->has_fsl_erratum_a005275 =
  		of_property_read_bool(np, "fsl,usb-erratum-a005275");
@@ -1264,6 +1306,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  	/*
  	 * Determine whether phy_clk_valid needs to be checked
+diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
+index 94eb2923..836355fa 100644
 --- a/drivers/usb/phy/phy-fsl-usb.c
 +++ b/drivers/usb/phy/phy-fsl-usb.c
 @@ -1,5 +1,5 @@
@@ -1281,7 +1325,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	struct device *dev;
  	struct fsl_otg *otg_dev =
  		container_of(otg->usb_phy, struct fsl_otg, phy);
-@@ -486,6 +487,7 @@ int fsl_otg_start_host(struct otg_fsm *f
+@@ -486,6 +487,7 @@ int fsl_otg_start_host(struct otg_fsm *fsm, int on)
  			otg_reset_controller();
  			VDBG("host on......\n");
  			if (dev->driver->pm && dev->driver->pm->resume) {
@@ -1289,7 +1333,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  				retval = dev->driver->pm->resume(dev);
  				if (fsm->id) {
  					/* default-b */
-@@ -510,8 +512,11 @@ int fsl_otg_start_host(struct otg_fsm *f
+@@ -510,8 +512,11 @@ int fsl_otg_start_host(struct otg_fsm *fsm, int on)
  		else {
  			VDBG("host off......\n");
  			if (dev && dev->driver) {
@@ -1302,7 +1346,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  				if (fsm->id)
  					/* default-b */
  					fsl_otg_drv_vbus(fsm, 0);
-@@ -539,8 +544,17 @@ int fsl_otg_start_gadget(struct otg_fsm
+@@ -539,8 +544,17 @@ int fsl_otg_start_gadget(struct otg_fsm *fsm, int on)
  	dev = otg->gadget->dev.parent;
  
  	if (on) {
@@ -1321,7 +1365,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	} else {
  		if (dev->driver->suspend)
  			dev->driver->suspend(dev, otg_suspend_state);
-@@ -672,6 +686,10 @@ static void fsl_otg_event(struct work_st
+@@ -672,6 +686,10 @@ static void fsl_otg_event(struct work_struct *work)
  		fsl_otg_start_host(fsm, 0);
  		otg_drv_vbus(fsm, 0);
  		fsl_otg_start_gadget(fsm, 1);
@@ -1332,7 +1376,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	}
  }
  
-@@ -724,6 +742,7 @@ irqreturn_t fsl_otg_isr(int irq, void *d
+@@ -724,6 +742,7 @@ irqreturn_t fsl_otg_isr(int irq, void *dev_id)
  {
  	struct otg_fsm *fsm = &((struct fsl_otg *)dev_id)->fsm;
  	struct usb_otg *otg = ((struct fsl_otg *)dev_id)->phy.otg;
@@ -1340,7 +1384,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	u32 otg_int_src, otg_sc;
  
  	otg_sc = fsl_readl(&usb_dr_regs->otgsc);
-@@ -753,18 +772,8 @@ irqreturn_t fsl_otg_isr(int irq, void *d
+@@ -753,18 +772,8 @@ irqreturn_t fsl_otg_isr(int irq, void *dev_id)
  				otg->gadget->is_a_peripheral = !fsm->id;
  			VDBG("ID int (ID is %d)\n", fsm->id);
  
@@ -1361,7 +1405,7 @@ Signed-off-by: Yangbo Lu <[email protected]>
  			return IRQ_HANDLED;
  		}
  	}
-@@ -923,12 +932,32 @@ int usb_otg_start(struct platform_device
+@@ -923,12 +932,32 @@ int usb_otg_start(struct platform_device *pdev)
  	temp &= ~(PORTSC_PHY_TYPE_SEL | PORTSC_PTW);
  	switch (pdata->phy_mode) {
  	case FSL_USB2_PHY_ULPI:
@@ -1394,6 +1438,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  		temp |= PORTSC_PTS_UTMI;
  		/* fall through */
  	default:
+diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
+index 23149954..c4c08730 100644
 --- a/drivers/usb/phy/phy-fsl-usb.h
 +++ b/drivers/usb/phy/phy-fsl-usb.h
 @@ -199,6 +199,14 @@
@@ -1411,6 +1457,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  
  /* BCSR5 */
  #define BCSR5_INT_USB			(0x02)
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index eba1f10e..c334e281 100644
 --- a/include/linux/usb.h
 +++ b/include/linux/usb.h
 @@ -362,6 +362,7 @@ struct usb_bus {
@@ -1421,6 +1469,8 @@ Signed-off-by: Yangbo Lu <[email protected]>
  	unsigned is_b_host:1;		/* true during some HNP roleswitches */
  	unsigned b_hnp_enable:1;	/* OTG: did A-Host enable HNP? */
  	unsigned no_stop_on_short:1;    /*
+diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
+index 5ff9032e..2a57e0d2 100644
 --- a/include/linux/usb/of.h
 +++ b/include/linux/usb/of.h
 @@ -11,6 +11,8 @@
@@ -1432,3 +1482,6 @@ Signed-off-by: Yangbo Lu <[email protected]>
  #if IS_ENABLED(CONFIG_OF)
  enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0);
  bool of_usb_host_tpl_support(struct device_node *np);
+-- 
+2.14.1
+

部分文件因为文件数量过多而无法显示