Просмотр исходного кода

treewide: drop Linux 6.6 patches, hacks, configs and conditional

Drop all kernel 6.6 patches, hacks, backports and configuration support,
and remove the CONFIG_LINUX_6_6 guarded conditional in filogic.mk.

Signed-off-by: Mieczyslaw Nalewaj <[email protected]>
Link: https://github.com/openwrt/openwrt/pull/20943
Signed-off-by: Christian Marangi <[email protected]>
Mieczyslaw Nalewaj 4 месяцев назад
Родитель
Сommit
8a1ee7577e
100 измененных файлов с 0 добавлено и 18357 удалено
  1. 0 56
      target/linux/generic/backport-6.6/0080-v6.9-smp-Avoid-setup_max_cpus_namespace_collision_shadowing.patch
  2. 0 58
      target/linux/generic/backport-6.6/065-v6.10-compiler_types.h-Define-__retain.patch
  3. 0 65
      target/linux/generic/backport-6.6/066-v6.10-bpf-Harden-__bpf_kfunc-against-linker-removal.patch
  4. 0 59
      target/linux/generic/backport-6.6/192-v6.12-fix-libbpf-Wmaybe-uninitialized.patch
  5. 0 172
      target/linux/generic/backport-6.6/200-v6.11-ARM-9404-1-arm32-enable-HAVE_LD_DEAD_CODE_DATA_ELIMI.patch
  6. 0 51
      target/linux/generic/backport-6.6/300-v6.7-arm64-swiotlb-Reduce-the-default-size-if-no-ZONE_DMA.patch
  7. 0 161
      target/linux/generic/backport-6.6/301-v6.9-kernel.h-removed-REPEAT_BYTE-from-kernel.h.patch
  8. 0 107
      target/linux/generic/backport-6.6/302-v6.9-kernel.h-Move-upper_-_bits-and-lower_-_bits-to-wordp.patch
  9. 0 206
      target/linux/generic/backport-6.6/310-v6.7-mips-kexec-fix-the-incorrect-ifdeffery-and-dependenc.patch
  10. 0 171
      target/linux/generic/backport-6.6/320-v6.11-mips-bmips-rework-and-cache-CBR-addr-handling.patch
  11. 0 111
      target/linux/generic/backport-6.6/321-v6.11-mips-bmips-setup-make-CBR-address-configurable.patch
  12. 0 57
      target/linux/generic/backport-6.6/322-v6.11-mips-bmips-enable-RAC-on-BMIPS4350.patch
  13. 0 60
      target/linux/generic/backport-6.6/330-v6.13-jiffies-Define-secs_to_jiffies.patch
  14. 0 35
      target/linux/generic/backport-6.6/331-v6.14-jiffies-Cast-to-unsigned-long-in-secs_to_jiffies-con.patch
  15. 0 36
      target/linux/generic/backport-6.6/400-v6.9-mtd-rawnand-brcmnand-Support-write-protection-settin.patch
  16. 0 123
      target/linux/generic/backport-6.6/401-v6.9-dt-bindings-mtd-add-basic-bindings-for-UBI.patch
  17. 0 50
      target/linux/generic/backport-6.6/402-v6.9-dt-bindings-mtd-ubi-volume-allow-UBI-volumes-to-prov.patch
  18. 0 285
      target/linux/generic/backport-6.6/403-v6.9-mtd-ubi-block-use-notifier-to-create-ubiblock-from-p.patch
  19. 0 205
      target/linux/generic/backport-6.6/404-v6.9-mtd-ubi-attach-from-device-tree.patch
  20. 0 180
      target/linux/generic/backport-6.6/405-v6.9-mtd-ubi-introduce-pre-removal-notification-for-UBI-v.patch
  21. 0 66
      target/linux/generic/backport-6.6/406-v6.9-mtd-ubi-populate-ubi-volume-fwnode.patch
  22. 0 244
      target/linux/generic/backport-6.6/407-v6.9-mtd-ubi-provide-NVMEM-layer-over-UBI-volumes.patch
  23. 0 34
      target/linux/generic/backport-6.6/408-v6.9-mtd-ubi-fix-NVMEM-over-UBI-volumes-on-32-bit-systems.patch
  24. 0 53
      target/linux/generic/backport-6.6/410-v6.13-01-block-add-support-for-defining-read-only-partitions.patch
  25. 0 94
      target/linux/generic/backport-6.6/410-v6.13-03-block-introduce-add_disk_fwnode.patch
  26. 0 104
      target/linux/generic/backport-6.6/410-v6.13-04-mmc-block-attach-partitions-fwnode-if-found-in-mmc-c.patch
  27. 0 200
      target/linux/generic/backport-6.6/410-v6.13-05-block-add-support-for-partition-table-defined-in-OF.patch
  28. 0 146
      target/linux/generic/backport-6.6/411-v6.7-mtd-spinand-add-support-for-FORESEE-F35SQA002G.patch
  29. 0 38
      target/linux/generic/backport-6.6/412-v6.14-mtd-spinand-add-support-for-FORESEE-F35SQA001G.patch
  30. 0 1026
      target/linux/generic/backport-6.6/413-01-v6.14-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch
  31. 0 880
      target/linux/generic/backport-6.6/413-02-v6.14-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch
  32. 0 2436
      target/linux/generic/backport-6.6/413-03-v6.14-mtd-nand-Add-qpic_common-API-file.patch
  33. 0 198
      target/linux/generic/backport-6.6/413-04-v6.14-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch
  34. 0 64
      target/linux/generic/backport-6.6/414-v6.14-mtd-rawnand-qcom-fix-broken-config-in-qcom_param_pag.patch
  35. 0 77
      target/linux/generic/backport-6.6/415-v6.14-mtd-rawnand-qcom-Fix-build-issue-on-x86-architecture.patch
  36. 0 1737
      target/linux/generic/backport-6.6/416-v6.15-01-spi-spi-qpic-add-driver-for-QCOM-SPI-NAND-flash-Inte.patch
  37. 0 28
      target/linux/generic/backport-6.6/416-v6.15-02-spi-spi-qpic-snand-Fix-ECC_CFG_ECC_DISABLE-shift-in-.patch
  38. 0 35
      target/linux/generic/backport-6.6/416-v6.15-03-spi-spi-qpic-snand-avoid-memleak-in-qcom_spi_ecc_ini.patch
  39. 0 49
      target/linux/generic/backport-6.6/416-v6.15-04-spi-SPI_QPIC_SNAND-should-be-tristate-and-depend-on-.patch
  40. 0 36
      target/linux/generic/backport-6.6/416-v6.15-05-spi-spi-qpic-snand-propagate-errors-from-qcom_spi_block_erase.patch
  41. 0 35
      target/linux/generic/backport-6.6/416-v6.15-06-spi-spi-qpic-snand-fix-NAND_READ_LOCATION_2-register-handling.patch
  42. 0 29
      target/linux/generic/backport-6.6/416-v6.15-07-spi-spi-qpic-snand-use-kmalloc-for-OOB-buffer-alloca.patch
  43. 0 30
      target/linux/generic/backport-6.6/416-v6.16-08-spi-spi-qpic-snand-remove-unused-wlen-member-of-struct-qpic_spi_nand.patch
  44. 0 31
      target/linux/generic/backport-6.6/417-v6.12-mtd-spinand-set-bitflip_threshold-to-75-of-ECC-stren.patch
  45. 0 65
      target/linux/generic/backport-6.6/418-v6.12-mtd-spinand-winbond-add-support-for-W25N01KV.patch
  46. 0 124
      target/linux/generic/backport-6.6/419-v6.17-mtd-spinand-add-support-for-FudanMicro-FM25S01A.patch
  47. 0 75
      target/linux/generic/backport-6.6/600-v6.10-net-Remove-conditional-threaded-NAPI-wakeup-based-on.patch
  48. 0 330
      target/linux/generic/backport-6.6/601-v6.10-net-Allow-to-use-SMP-threads-for-backlog-NAPI.patch
  49. 0 121
      target/linux/generic/backport-6.6/602-v6.10-net-Use-backlog-NAPI-to-clean-up-the-defer_list.patch
  50. 0 164
      target/linux/generic/backport-6.6/603-v6.10-net-Rename-rps_lock-to-backlog_lock.patch
  51. 0 41
      target/linux/generic/backport-6.6/604-01-v6.13-net-phylink-move-manual-flow-control-setting.patch
  52. 0 42
      target/linux/generic/backport-6.6/604-02-v6.13-net-phylink-move-MLO_AN_FIXED-resolve-handling-to-if.patch
  53. 0 37
      target/linux/generic/backport-6.6/604-03-v6.13-net-phylink-move-MLO_AN_PHY-resolve-handling-to-if-s.patch
  54. 0 127
      target/linux/generic/backport-6.6/604-04-v6.13-net-phylink-remove-switch-statement-in-resolve-handl.patch
  55. 0 85
      target/linux/generic/backport-6.6/604-05-v6.13-net-phylink-clean-up-phylink_resolve.patch
  56. 0 166
      target/linux/generic/backport-6.6/605-v6.8-net-phylink-move-phylink_pcs_neg_mode-into-phylink.c.patch
  57. 0 95
      target/linux/generic/backport-6.6/606-01-v6.14-net-phylink-pass-phylink-and-pcs-into-phylink_pcs_ne.patch
  58. 0 281
      target/linux/generic/backport-6.6/606-02-v6.14-net-phylink-split-cur_link_an_mode-into-requested-an.patch
  59. 0 66
      target/linux/generic/backport-6.6/606-03-v6.14-net-phylink-add-debug-for-phylink_major_config.patch
  60. 0 118
      target/linux/generic/backport-6.6/606-04-v6.14-net-phy-add-phy_inband_caps.patch
  61. 0 41
      target/linux/generic/backport-6.6/606-05-v6.14-net-phy-bcm84881-implement-phy_inband_caps-method.patch
  62. 0 63
      target/linux/generic/backport-6.6/606-06-v6.14-net-phy-marvell-implement-phy_inband_caps-method.patch
  63. 0 79
      target/linux/generic/backport-6.6/606-07-v6.14-net-phy-add-phy_config_inband.patch
  64. 0 77
      target/linux/generic/backport-6.6/606-08-v6.14-net-phy-marvell-implement-config_inband-method.patch
  65. 0 159
      target/linux/generic/backport-6.6/606-09-v6.14-net-phylink-add-pcs_inband_caps-method.patch
  66. 0 64
      target/linux/generic/backport-6.6/606-10-v6.14-net-mvneta-implement-pcs_inband_caps-method.patch
  67. 0 62
      target/linux/generic/backport-6.6/606-11-v6.14-net-mvpp2-implement-pcs_inband_caps-method.patch
  68. 0 228
      target/linux/generic/backport-6.6/606-12-v6.14-net-phylink-add-negotiation-of-in-band-capabilities.patch
  69. 0 110
      target/linux/generic/backport-6.6/606-13-v6.14-net-phylink-remove-phylink_phy_no_inband.patch
  70. 0 53
      target/linux/generic/backport-6.6/607-01-v6.14-net-pcs-pcs-lynx-implement-pcs_inband_caps-method.patch
  71. 0 47
      target/linux/generic/backport-6.6/607-02-v6.14-net-pcs-pcs-mtk-lynxi-implement-pcs_inband_caps-meth.patch
  72. 0 58
      target/linux/generic/backport-6.6/607-03-v6.14-net-pcs-xpcs-implement-pcs_inband_caps-method.patch
  73. 0 30
      target/linux/generic/backport-6.6/610-v6.9-net-mdio-add-2.5g-and-5g-related-PMA-speed-constants.patch
  74. 0 94
      target/linux/generic/backport-6.6/611-01-v6.11-udp-Allow-GSO-transmit-from-devices-with-no-checksum.patch
  75. 0 69
      target/linux/generic/backport-6.6/611-02-v6.11-net-Make-USO-depend-on-CSUM-offload.patch
  76. 0 86
      target/linux/generic/backport-6.6/611-03-v6.11-udp-Fall-back-to-software-USO-if-IPv6-extension-head.patch
  77. 0 29
      target/linux/generic/backport-6.6/612-v6.9-net-get-stats64-if-device-if-driver-is-configured.patch
  78. 0 139
      target/linux/generic/backport-6.6/620-01-v6.7-page_pool-fragment-API-support-for-32-bit-arch-with-.patch
  79. 0 183
      target/linux/generic/backport-6.6/620-02-v6.7-page_pool-unify-frag_count-handling-in-page_pool_is_.patch
  80. 0 101
      target/linux/generic/backport-6.6/621-v6.10-01-net-move-skb_gro_receive_list-from-udp-to-core.patch
  81. 0 177
      target/linux/generic/backport-6.6/621-v6.10-02-net-add-support-for-segmenting-TCP-fraglist-GSO-pack.patch
  82. 0 75
      target/linux/generic/backport-6.6/621-v6.10-03-net-add-code-for-TCP-fraglist-GRO.patch
  83. 0 88
      target/linux/generic/backport-6.6/621-v6.10-04-net-create-tcp_gro_lookup-helper-function.patch
  84. 0 166
      target/linux/generic/backport-6.6/621-v6.10-05-net-create-tcp_gro_header_pull-helper-function.patch
  85. 0 140
      target/linux/generic/backport-6.6/621-v6.10-06-net-add-heuristic-for-enabling-TCP-fraglist-GRO.patch
  86. 0 80
      target/linux/generic/backport-6.6/622-v6.12-net-gso-fix-tcp-fraglist-segmentation-after-pull-fro.patch
  87. 0 59
      target/linux/generic/backport-6.6/623-v6.14-net-ipv6-fix-TCP-GSO-segmentation-with-NAT.patch
  88. 0 79
      target/linux/generic/backport-6.6/624-v6.15-ppp-use-IFF_NO_QUEUE-in-virtual-interfaces.patch
  89. 0 41
      target/linux/generic/backport-6.6/625-v6.16-net-bridge-locally-receive-all-multicast-packets-if-.patch
  90. 0 135
      target/linux/generic/backport-6.6/700-v6.10-net-create-a-dummy-net_device-allocator.patch
  91. 0 29
      target/linux/generic/backport-6.6/701-v6.8-netfilter-nf_tables-fix-bidirectional-offload-regres.patch
  92. 0 2386
      target/linux/generic/backport-6.6/702-01-v6.7-net-phy-aquantia-move-to-separate-directory.patch
  93. 0 183
      target/linux/generic/backport-6.6/702-02-v6.7-net-phy-aquantia-move-MMD_VEND-define-to-header.patch
  94. 0 504
      target/linux/generic/backport-6.6/702-03-v6.7-net-phy-aquantia-add-firmware-load-support.patch
  95. 0 101
      target/linux/generic/backport-6.6/703-v6.10-flow_offload-add-control-flag-checking-helpers.patch
  96. 0 40
      target/linux/generic/backport-6.6/704-v6.12-ipv6-Add-ipv6_addr_-cpu_to_be32-be32_to_cpu-helpers.patch
  97. 0 69
      target/linux/generic/backport-6.6/707-v6.8-02-net-phy-at803x-move-disable-WOL-to-specific-at8031-p.patch
  98. 0 129
      target/linux/generic/backport-6.6/707-v6.8-03-net-phy-at803x-raname-hw_stats-functions-to-qca83xx-.patch
  99. 0 155
      target/linux/generic/backport-6.6/707-v6.8-04-net-phy-at803x-move-qca83xx-specific-check-in-dedica.patch
  100. 0 94
      target/linux/generic/backport-6.6/707-v6.8-05-net-phy-at803x-move-specific-DT-option-for-at8031-to.patch

+ 0 - 56
target/linux/generic/backport-6.6/0080-v6.9-smp-Avoid-setup_max_cpus_namespace_collision_shadowing.patch

@@ -1,56 +0,0 @@
-From 4c8a49854130da0117a0fdb858551824919a2389 Mon Sep 17 00:00:00 2001
-From: Ingo Molnar <[email protected]>
-Date: Tue, 27 Feb 2024 09:58:15 +0100
-Subject: [PATCH] smp: Avoid 'setup_max_cpus' namespace collision/shadowing
-
-bringup_nonboot_cpus() gets passed the 'setup_max_cpus'
-variable in init/main.c - which is also the name of the parameter,
-shadowing the name.
-
-To reduce confusion and to allow the 'setup_max_cpus' value
-to be #defined in the <linux/smp.h> header, use the 'max_cpus'
-name for the function parameter name.
-
-Signed-off-by: Ingo Molnar <[email protected]>
-Cc: Thomas Gleixner <[email protected]>
-Cc: [email protected]
----
- include/linux/cpu.h | 2 +-
- kernel/cpu.c        | 6 +++---
- 2 files changed, 4 insertions(+), 4 deletions(-)
-
---- a/include/linux/cpu.h
-+++ b/include/linux/cpu.h
-@@ -113,7 +113,7 @@ void notify_cpu_starting(unsigned int cp
- extern void cpu_maps_update_begin(void);
- extern void cpu_maps_update_done(void);
- int bringup_hibernate_cpu(unsigned int sleep_cpu);
--void bringup_nonboot_cpus(unsigned int setup_max_cpus);
-+void bringup_nonboot_cpus(unsigned int max_cpus);
- 
- #else	/* CONFIG_SMP */
- #define cpuhp_tasks_frozen	0
---- a/kernel/cpu.c
-+++ b/kernel/cpu.c
-@@ -1905,17 +1905,17 @@ static bool __init cpuhp_bringup_cpus_pa
- static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return false; }
- #endif /* CONFIG_HOTPLUG_PARALLEL */
- 
--void __init bringup_nonboot_cpus(unsigned int setup_max_cpus)
-+void __init bringup_nonboot_cpus(unsigned int max_cpus)
- {
--	if (!setup_max_cpus)
-+	if (!max_cpus)
- 		return;
- 
- 	/* Try parallel bringup optimization if enabled */
--	if (cpuhp_bringup_cpus_parallel(setup_max_cpus))
-+	if (cpuhp_bringup_cpus_parallel(max_cpus))
- 		return;
- 
- 	/* Full per CPU serialized bringup */
--	cpuhp_bringup_mask(cpu_present_mask, setup_max_cpus, CPUHP_ONLINE);
-+	cpuhp_bringup_mask(cpu_present_mask, max_cpus, CPUHP_ONLINE);
- }
- 
- #ifdef CONFIG_PM_SLEEP_SMP

+ 0 - 58
target/linux/generic/backport-6.6/065-v6.10-compiler_types.h-Define-__retain.patch

@@ -1,58 +0,0 @@
-From 443df175be581618d6ff781dc3af3aa1a9ba789d Mon Sep 17 00:00:00 2001
-From: Tony Ambardar <[email protected]>
-Date: Fri, 31 May 2024 23:55:55 -0700
-Subject: [PATCH 1/2] compiler_types.h: Define __retain for
- __attribute__((__retain__))
-
-Some code includes the __used macro to prevent functions and data from
-being optimized out. This macro implements __attribute__((__used__)), which
-operates at the compiler and IR-level, and so still allows a linker to
-remove objects intended to be kept.
-
-Compilers supporting __attribute__((__retain__)) can address this gap by
-setting the flag SHF_GNU_RETAIN on the section of a function/variable,
-indicating to the linker the object should be retained. This attribute is
-available since gcc 11, clang 13, and binutils 2.36.
-
-Provide a __retain macro implementing __attribute__((__retain__)), whose
-first user will be the '__bpf_kfunc' tag.
-
-Link: https://lore.kernel.org/bpf/ZlmGoT9KiYLZd91S@krava/T/
-Cc: [email protected] # v6.6+
-Signed-off-by: Tony Ambardar <[email protected]>
----
- include/linux/compiler_types.h | 23 +++++++++++++++++++++++
- 1 file changed, 23 insertions(+)
-
---- a/include/linux/compiler_types.h
-+++ b/include/linux/compiler_types.h
-@@ -145,6 +145,29 @@ static inline void __chk_io_ptr(const vo
- #define __has_builtin(x) (0)
- #endif
- 
-+/*
-+ * Annotating a function/variable with __retain tells the compiler to place
-+ * the object in its own section and set the flag SHF_GNU_RETAIN. This flag
-+ * instructs the linker to retain the object during garbage-cleanup or LTO
-+ * phases.
-+ *
-+ * Note that the __used macro is also used to prevent functions or data
-+ * being optimized out, but operates at the compiler/IR-level and may still
-+ * allow unintended removal of objects during linking.
-+ *
-+ * Optional: only supported since gcc >= 11, clang >= 13
-+ *
-+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-retain-function-attribute
-+ * clang: https://clang.llvm.org/docs/AttributeReference.html#retain
-+ */
-+#if __has_attribute(__retain__) && \
-+	(defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || \
-+	 defined(CONFIG_LTO_CLANG))
-+# define __retain			__attribute__((__retain__))
-+#else
-+# define __retain
-+#endif
-+
- /* Compiler specific macros. */
- #ifdef __clang__
- #include <linux/compiler-clang.h>

+ 0 - 65
target/linux/generic/backport-6.6/066-v6.10-bpf-Harden-__bpf_kfunc-against-linker-removal.patch

@@ -1,65 +0,0 @@
-From ac507ed9882fd91a94657d68fe9ceac04b957103 Mon Sep 17 00:00:00 2001
-From: Tony Ambardar <[email protected]>
-Date: Sat, 1 Jun 2024 00:00:21 -0700
-Subject: [PATCH 2/2] bpf: Harden __bpf_kfunc tag against linker kfunc removal
-
-BPF kfuncs are often not directly referenced and may be inadvertently
-removed by optimization steps during kernel builds, thus the __bpf_kfunc
-tag mitigates against this removal by including the __used macro. However,
-this macro alone does not prevent removal during linking, and may still
-yield build warnings (e.g. on mips64el):
-
-    LD      vmlinux
-    BTFIDS  vmlinux
-  WARN: resolve_btfids: unresolved symbol bpf_verify_pkcs7_signature
-  WARN: resolve_btfids: unresolved symbol bpf_lookup_user_key
-  WARN: resolve_btfids: unresolved symbol bpf_lookup_system_key
-  WARN: resolve_btfids: unresolved symbol bpf_key_put
-  WARN: resolve_btfids: unresolved symbol bpf_iter_task_next
-  WARN: resolve_btfids: unresolved symbol bpf_iter_css_task_new
-  WARN: resolve_btfids: unresolved symbol bpf_get_file_xattr
-  WARN: resolve_btfids: unresolved symbol bpf_ct_insert_entry
-  WARN: resolve_btfids: unresolved symbol bpf_cgroup_release
-  WARN: resolve_btfids: unresolved symbol bpf_cgroup_from_id
-  WARN: resolve_btfids: unresolved symbol bpf_cgroup_acquire
-  WARN: resolve_btfids: unresolved symbol bpf_arena_free_pages
-    NM      System.map
-    SORTTAB vmlinux
-    OBJCOPY vmlinux.32
-
-Update the __bpf_kfunc tag to better guard against linker optimization by
-including the new __retain compiler macro, which fixes the warnings above.
-
-Verify the __retain macro with readelf by checking object flags for 'R':
-
-  $ readelf -Wa kernel/trace/bpf_trace.o
-  Section Headers:
-    [Nr]  Name              Type     Address  Off  Size ES Flg Lk Inf Al
-  ...
-    [178] .text.bpf_key_put PROGBITS 00000000 6420 0050 00 AXR  0   0  8
-  ...
-  Key to Flags:
-  ...
-    R (retain), D (mbind), p (processor specific)
-
-Link: https://lore.kernel.org/bpf/ZlmGoT9KiYLZd91S@krava/T/
-Reported-by: kernel test robot <[email protected]>
-Closes: https://lore.kernel.org/r/[email protected]/
-Fixes: 57e7c169cd6a ("bpf: Add __bpf_kfunc tag for marking kernel functions as kfuncs")
-Cc: [email protected] # v6.6+
-Signed-off-by: Tony Ambardar <[email protected]>
----
- include/linux/btf.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/include/linux/btf.h
-+++ b/include/linux/btf.h
-@@ -81,7 +81,7 @@
-  * as to avoid issues such as the compiler inlining or eliding either a static
-  * kfunc, or a global kfunc in an LTO build.
-  */
--#define __bpf_kfunc __used noinline
-+#define __bpf_kfunc __used __retain noinline
- 
- /*
-  * Return the name of the passed struct, if exists, or halt the build if for

+ 0 - 59
target/linux/generic/backport-6.6/192-v6.12-fix-libbpf-Wmaybe-uninitialized.patch

@@ -1,59 +0,0 @@
-From fab45b962749184e1a1a57c7c583782b78fad539 Mon Sep 17 00:00:00 2001
-From: Sam James <[email protected]>
-Date: Tue, 13 Aug 2024 20:49:06 +0100
-Subject: [PATCH] libbpf: Workaround -Wmaybe-uninitialized false positive
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-In `elf_close`, we get this with GCC 15 -O3 (at least):
-```
-In function ‘elf_close’,
-    inlined from ‘elf_close’ at elf.c:53:6,
-    inlined from ‘elf_find_func_offset_from_file’ at elf.c:384:2:
-elf.c:57:9: warning: ‘elf_fd.elf’ may be used uninitialized [-Wmaybe-uninitialized]
-   57 |         elf_end(elf_fd->elf);
-      |         ^~~~~~~~~~~~~~~~~~~~
-elf.c: In function ‘elf_find_func_offset_from_file’:
-elf.c:377:23: note: ‘elf_fd.elf’ was declared here
-  377 |         struct elf_fd elf_fd;
-      |                       ^~~~~~
-In function ‘elf_close’,
-    inlined from ‘elf_close’ at elf.c:53:6,
-    inlined from ‘elf_find_func_offset_from_file’ at elf.c:384:2:
-elf.c:58:9: warning: ‘elf_fd.fd’ may be used uninitialized [-Wmaybe-uninitialized]
-   58 |         close(elf_fd->fd);
-      |         ^~~~~~~~~~~~~~~~~
-elf.c: In function ‘elf_find_func_offset_from_file’:
-elf.c:377:23: note: ‘elf_fd.fd’ was declared here
-  377 |         struct elf_fd elf_fd;
-      |                       ^~~~~~
-```
-
-In reality, our use is fine, it's just that GCC doesn't model errno
-here (see linked GCC bug). Suppress -Wmaybe-uninitialized accordingly
-by initializing elf_fd.fd to -1 and elf_fd.elf to NULL.
-
-I've done this in two other functions as well given it could easily
-occur there too (same access/use pattern).
-
-Signed-off-by: Sam James <[email protected]>
-Signed-off-by: Andrii Nakryiko <[email protected]>
-Link: https://gcc.gnu.org/PR114952
-Link: https://lore.kernel.org/bpf/14ec488a1cac02794c2fa2b83ae0cef1bce2cb36.1723578546.git.sam@gentoo.org
----
- tools/lib/bpf/elf.c | 3 +++
- 1 file changed, 3 insertions(+)
-
---- a/tools/lib/bpf/elf.c
-+++ b/tools/lib/bpf/elf.c
-@@ -16,6 +16,9 @@ int elf_open(const char *binary_path, st
- 	int fd, ret;
- 	Elf *elf;
- 
-+	elf_fd->elf = NULL;
-+	elf_fd->fd = -1;
-+
- 	if (elf_version(EV_CURRENT) == EV_NONE) {
- 		pr_warn("elf: failed to init libelf for %s\n", binary_path);
- 		return -LIBBPF_ERRNO__LIBELF;

+ 0 - 172
target/linux/generic/backport-6.6/200-v6.11-ARM-9404-1-arm32-enable-HAVE_LD_DEAD_CODE_DATA_ELIMI.patch

@@ -1,172 +0,0 @@
-From ed0f941022515ff40473ea5335769a5dc2524a3f Mon Sep 17 00:00:00 2001
-From: Yuntao Liu <[email protected]>
-Date: Mon, 3 Jun 2024 16:37:50 +0100
-Subject: [PATCH] ARM: 9404/1: arm32: enable HAVE_LD_DEAD_CODE_DATA_ELIMINATION
-
-The current arm32 architecture does not yet support the
-HAVE_LD_DEAD_CODE_DATA_ELIMINATION feature. arm32 is widely used in
-embedded scenarios, and enabling this feature would be beneficial for
-reducing the size of the kernel image.
-
-In order to make this work, we keep the necessary tables by annotating
-them with KEEP, also it requires further changes to linker script to KEEP
-some tables and wildcard compiler generated sections into the right place.
-When using ld.lld for linking, KEEP is not recognized within the OVERLAY
-command, and Ard proposed a concise method to solve this problem.
-
-It boots normally with defconfig, vexpress_defconfig and tinyconfig.
-
-The size comparison of zImage is as follows:
-defconfig       vexpress_defconfig      tinyconfig
-5137712         5138024                 424192          no dce
-5032560         4997824                 298384          dce
-2.0%            2.7%                    29.7%           shrink
-
-When using smaller config file, there is a significant reduction in the
-size of the zImage.
-
-We also tested this patch on a commercially available single-board
-computer, and the comparison is as follows:
-a15eb_config
-2161384         no dce
-2092240         dce
-3.2%            shrink
-
-The zImage size has been reduced by approximately 3.2%, which is 70KB on
-2.1M.
-
-Signed-off-by: Yuntao Liu <[email protected]>
-Tested-by: Arnd Bergmann <[email protected]>
-Reviewed-by: Arnd Bergmann <[email protected]>
-Reviewed-by: Linus Walleij <[email protected]>
-Signed-off-by: Russell King (Oracle) <[email protected]>
----
- arch/arm/Kconfig                       | 1 +
- arch/arm/boot/compressed/vmlinux.lds.S | 2 +-
- arch/arm/include/asm/vmlinux.lds.h     | 2 +-
- arch/arm/kernel/entry-armv.S           | 3 +++
- arch/arm/kernel/vmlinux-xip.lds.S      | 4 ++--
- arch/arm/kernel/vmlinux.lds.S          | 6 +++---
- drivers/firmware/efi/libstub/Makefile  | 4 ++++
- 7 files changed, 15 insertions(+), 7 deletions(-)
-
---- a/arch/arm/Kconfig
-+++ b/arch/arm/Kconfig
-@@ -111,6 +111,7 @@ config ARM
- 	select HAVE_KERNEL_XZ
- 	select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
- 	select HAVE_KRETPROBES if HAVE_KPROBES
-+	select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
- 	select HAVE_MOD_ARCH_SPECIFIC
- 	select HAVE_NMI
- 	select HAVE_OPTPROBES if !THUMB2_KERNEL
---- a/arch/arm/boot/compressed/vmlinux.lds.S
-+++ b/arch/arm/boot/compressed/vmlinux.lds.S
-@@ -125,7 +125,7 @@ SECTIONS
- 
-   . = BSS_START;
-   __bss_start = .;
--  .bss			: { *(.bss) }
-+  .bss			: { *(.bss .bss.*) }
-   _end = .;
- 
-   . = ALIGN(8);		/* the stack must be 64-bit aligned */
---- a/arch/arm/include/asm/vmlinux.lds.h
-+++ b/arch/arm/include/asm/vmlinux.lds.h
-@@ -42,7 +42,7 @@
- #define PROC_INFO							\
- 		. = ALIGN(4);						\
- 		__proc_info_begin = .;					\
--		*(.proc.info.init)					\
-+		KEEP(*(.proc.info.init))				\
- 		__proc_info_end = .;
- 
- #define IDMAP_TEXT							\
---- a/arch/arm/kernel/entry-armv.S
-+++ b/arch/arm/kernel/entry-armv.S
-@@ -1073,6 +1073,7 @@ vector_addrexcptn:
- 	.globl	vector_fiq
- 
- 	.section .vectors, "ax", %progbits
-+	.reloc  .text, R_ARM_NONE, .
- 	W(b)	vector_rst
- 	W(b)	vector_und
- ARM(	.reloc	., R_ARM_LDR_PC_G0, .L__vector_swi		)
-@@ -1086,6 +1087,7 @@ THUMB(	.reloc	., R_ARM_THM_PC12, .L__vec
- 
- #ifdef CONFIG_HARDEN_BRANCH_HISTORY
- 	.section .vectors.bhb.loop8, "ax", %progbits
-+	.reloc  .text, R_ARM_NONE, .
- 	W(b)	vector_rst
- 	W(b)	vector_bhb_loop8_und
- ARM(	.reloc	., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi	)
-@@ -1098,6 +1100,7 @@ THUMB(	.reloc	., R_ARM_THM_PC12, .L__vec
- 	W(b)	vector_bhb_loop8_fiq
- 
- 	.section .vectors.bhb.bpiall, "ax", %progbits
-+	.reloc  .text, R_ARM_NONE, .
- 	W(b)	vector_rst
- 	W(b)	vector_bhb_bpiall_und
- ARM(	.reloc	., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi	)
---- a/arch/arm/kernel/vmlinux-xip.lds.S
-+++ b/arch/arm/kernel/vmlinux-xip.lds.S
-@@ -63,7 +63,7 @@ SECTIONS
- 	. = ALIGN(4);
- 	__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
- 		__start___ex_table = .;
--		ARM_MMU_KEEP(*(__ex_table))
-+		ARM_MMU_KEEP(KEEP(*(__ex_table)))
- 		__stop___ex_table = .;
- 	}
- 
-@@ -83,7 +83,7 @@ SECTIONS
- 	}
- 	.init.arch.info : {
- 		__arch_info_begin = .;
--		*(.arch.info.init)
-+		KEEP(*(.arch.info.init))
- 		__arch_info_end = .;
- 	}
- 	.init.tagtable : {
---- a/arch/arm/kernel/vmlinux.lds.S
-+++ b/arch/arm/kernel/vmlinux.lds.S
-@@ -74,7 +74,7 @@ SECTIONS
- 	. = ALIGN(4);
- 	__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
- 		__start___ex_table = .;
--		ARM_MMU_KEEP(*(__ex_table))
-+		ARM_MMU_KEEP(KEEP(*(__ex_table)))
- 		__stop___ex_table = .;
- 	}
- 
-@@ -99,7 +99,7 @@ SECTIONS
- 	}
- 	.init.arch.info : {
- 		__arch_info_begin = .;
--		*(.arch.info.init)
-+		KEEP(*(.arch.info.init))
- 		__arch_info_end = .;
- 	}
- 	.init.tagtable : {
-@@ -116,7 +116,7 @@ SECTIONS
- #endif
- 	.init.pv_table : {
- 		__pv_table_begin = .;
--		*(.pv_table)
-+		KEEP(*(.pv_table))
- 		__pv_table_end = .;
- 	}
- 
---- a/drivers/firmware/efi/libstub/Makefile
-+++ b/drivers/firmware/efi/libstub/Makefile
-@@ -67,6 +67,10 @@ OBJECT_FILES_NON_STANDARD	:= y
- # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
- KCOV_INSTRUMENT			:= n
- 
-+# The .data section would be renamed to .data.efistub, therefore, remove
-+# `-fdata-sections` flag from KBUILD_CFLAGS_KERNEL
-+KBUILD_CFLAGS_KERNEL := $(filter-out -fdata-sections, $(KBUILD_CFLAGS_KERNEL))
-+
- lib-y				:= efi-stub-helper.o gop.o secureboot.o tpm.o \
- 				   file.o mem.o random.o randomalloc.o pci.o \
- 				   skip_spaces.o lib-cmdline.o lib-ctype.o \

+ 0 - 51
target/linux/generic/backport-6.6/300-v6.7-arm64-swiotlb-Reduce-the-default-size-if-no-ZONE_DMA.patch

@@ -1,51 +0,0 @@
-From 65033574ade97afccba074d837fd269903a83a9a Mon Sep 17 00:00:00 2001
-From: Catalin Marinas <[email protected]>
-Date: Thu, 5 Oct 2023 16:40:30 +0100
-Subject: [PATCH] arm64: swiotlb: Reduce the default size if no ZONE_DMA
- bouncing needed
-
-With CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC enabled, the arm64 kernel still
-allocates the default SWIOTLB buffer (64MB) even if ZONE_DMA is disabled
-or all the RAM fits into this zone. However, this potentially wastes a
-non-negligible amount of memory on platforms with little RAM.
-
-Reduce the SWIOTLB size to 1MB per 1GB of RAM if only needed for
-kmalloc() buffer bouncing.
-
-Signed-off-by: Catalin Marinas <[email protected]>
-Suggested-by: Ross Burton <[email protected]>
-Cc: Ross Burton <[email protected]>
-Cc: Will Deacon <[email protected]>
-Reviewed-by: Robin Murphy <[email protected]>
----
- arch/arm64/mm/init.c | 11 ++++++++++-
- 1 file changed, 10 insertions(+), 1 deletion(-)
-
---- a/arch/arm64/mm/init.c
-+++ b/arch/arm64/mm/init.c
-@@ -16,6 +16,7 @@
- #include <linux/nodemask.h>
- #include <linux/initrd.h>
- #include <linux/gfp.h>
-+#include <linux/math.h>
- #include <linux/memblock.h>
- #include <linux/sort.h>
- #include <linux/of.h>
-@@ -493,8 +494,16 @@ void __init mem_init(void)
- {
- 	bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit);
- 
--	if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC))
-+	if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && !swiotlb) {
-+		/*
-+		 * If no bouncing needed for ZONE_DMA, reduce the swiotlb
-+		 * buffer for kmalloc() bouncing to 1MB per 1GB of RAM.
-+		 */
-+		unsigned long size =
-+			DIV_ROUND_UP(memblock_phys_mem_size(), 1024);
-+		swiotlb_adjust_size(min(swiotlb_size_or_default(), size));
- 		swiotlb = true;
-+	}
- 
- 	swiotlb_init(swiotlb, SWIOTLB_VERBOSE);
- 

+ 0 - 161
target/linux/generic/backport-6.6/301-v6.9-kernel.h-removed-REPEAT_BYTE-from-kernel.h.patch

@@ -1,161 +0,0 @@
-From 66a5c40f60f5d88ad8d47ba6a4ba05892853fa1f Mon Sep 17 00:00:00 2001
-From: Tanzir Hasan <[email protected]>
-Date: Tue, 26 Dec 2023 18:00:00 +0000
-Subject: [PATCH] kernel.h: removed REPEAT_BYTE from kernel.h
-
-This patch creates wordpart.h and includes it in asm/word-at-a-time.h
-for all architectures. WORD_AT_A_TIME_CONSTANTS depends on kernel.h
-because of REPEAT_BYTE. Moving this to another header and including it
-where necessary allows us to not include the bloated kernel.h. Making
-this implicit dependency on REPEAT_BYTE explicit allows for later
-improvements in the lib/string.c inclusion list.
-
-Suggested-by: Al Viro <[email protected]>
-Suggested-by: Andy Shevchenko <[email protected]>
-Signed-off-by: Tanzir Hasan <[email protected]>
-Reviewed-by: Andy Shevchenko <[email protected]>
-Link: https://lore.kernel.org/r/[email protected]
-Signed-off-by: Kees Cook <[email protected]>
----
- arch/arm/include/asm/word-at-a-time.h     |  3 ++-
- arch/arm64/include/asm/word-at-a-time.h   |  3 ++-
- arch/powerpc/include/asm/word-at-a-time.h |  4 ++--
- arch/riscv/include/asm/word-at-a-time.h   |  3 ++-
- arch/s390/include/asm/word-at-a-time.h    |  3 ++-
- arch/sh/include/asm/word-at-a-time.h      |  2 ++
- arch/x86/include/asm/word-at-a-time.h     |  3 ++-
- arch/x86/kvm/mmu/mmu.c                    |  1 +
- fs/namei.c                                |  2 +-
- include/asm-generic/word-at-a-time.h      |  3 ++-
- include/linux/kernel.h                    |  8 --------
- include/linux/wordpart.h                  | 13 +++++++++++++
- 12 files changed, 31 insertions(+), 17 deletions(-)
- create mode 100644 include/linux/wordpart.h
-
---- a/arch/arm/include/asm/word-at-a-time.h
-+++ b/arch/arm/include/asm/word-at-a-time.h
-@@ -8,7 +8,8 @@
-  * Little-endian word-at-a-time zero byte handling.
-  * Heavily based on the x86 algorithm.
-  */
--#include <linux/kernel.h>
-+#include <linux/bitops.h>
-+#include <linux/wordpart.h>
- 
- struct word_at_a_time {
- 	const unsigned long one_bits, high_bits;
---- a/arch/arm64/include/asm/word-at-a-time.h
-+++ b/arch/arm64/include/asm/word-at-a-time.h
-@@ -9,7 +9,8 @@
- 
- #ifndef __AARCH64EB__
- 
--#include <linux/kernel.h>
-+#include <linux/bitops.h>
-+#include <linux/wordpart.h>
- 
- struct word_at_a_time {
- 	const unsigned long one_bits, high_bits;
---- a/arch/powerpc/include/asm/word-at-a-time.h
-+++ b/arch/powerpc/include/asm/word-at-a-time.h
-@@ -4,8 +4,8 @@
- /*
-  * Word-at-a-time interfaces for PowerPC.
-  */
--
--#include <linux/kernel.h>
-+#include <linux/bitops.h>
-+#include <linux/wordpart.h>
- #include <asm/asm-compat.h>
- #include <asm/extable.h>
- 
---- a/arch/sh/include/asm/word-at-a-time.h
-+++ b/arch/sh/include/asm/word-at-a-time.h
-@@ -5,6 +5,8 @@
- #ifdef CONFIG_CPU_BIG_ENDIAN
- # include <asm-generic/word-at-a-time.h>
- #else
-+#include <linux/bitops.h>
-+#include <linux/wordpart.h>
- /*
-  * Little-endian version cribbed from x86.
-  */
---- a/arch/x86/include/asm/word-at-a-time.h
-+++ b/arch/x86/include/asm/word-at-a-time.h
-@@ -2,7 +2,8 @@
- #ifndef _ASM_WORD_AT_A_TIME_H
- #define _ASM_WORD_AT_A_TIME_H
- 
--#include <linux/kernel.h>
-+#include <linux/bitops.h>
-+#include <linux/wordpart.h>
- 
- /*
-  * This is largely generic for little-endian machines, but the
---- a/arch/x86/kvm/mmu/mmu.c
-+++ b/arch/x86/kvm/mmu/mmu.c
-@@ -47,6 +47,7 @@
- #include <linux/kern_levels.h>
- #include <linux/kstrtox.h>
- #include <linux/kthread.h>
-+#include <linux/wordpart.h>
- 
- #include <asm/page.h>
- #include <asm/memtype.h>
---- a/fs/namei.c
-+++ b/fs/namei.c
-@@ -17,8 +17,8 @@
- 
- #include <linux/init.h>
- #include <linux/export.h>
--#include <linux/kernel.h>
- #include <linux/slab.h>
-+#include <linux/wordpart.h>
- #include <linux/fs.h>
- #include <linux/filelock.h>
- #include <linux/namei.h>
---- a/include/asm-generic/word-at-a-time.h
-+++ b/include/asm-generic/word-at-a-time.h
-@@ -2,7 +2,8 @@
- #ifndef _ASM_WORD_AT_A_TIME_H
- #define _ASM_WORD_AT_A_TIME_H
- 
--#include <linux/kernel.h>
-+#include <linux/bitops.h>
-+#include <linux/wordpart.h>
- #include <asm/byteorder.h>
- 
- #ifdef __BIG_ENDIAN
---- a/include/linux/kernel.h
-+++ b/include/linux/kernel.h
-@@ -38,14 +38,6 @@
- 
- #define STACK_MAGIC	0xdeadbeef
- 
--/**
-- * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
-- * @x: value to repeat
-- *
-- * NOTE: @x is not checked for > 0xff; larger values produce odd results.
-- */
--#define REPEAT_BYTE(x)	((~0ul / 0xff) * (x))
--
- /* generic data direction definitions */
- #define READ			0
- #define WRITE			1
---- /dev/null
-+++ b/include/linux/wordpart.h
-@@ -0,0 +1,13 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+
-+#ifndef _LINUX_WORDPART_H
-+#define _LINUX_WORDPART_H
-+/**
-+ * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
-+ * @x: value to repeat
-+ *
-+ * NOTE: @x is not checked for > 0xff; larger values produce odd results.
-+ */
-+#define REPEAT_BYTE(x)	((~0ul / 0xff) * (x))
-+
-+#endif // _LINUX_WORDPART_H

+ 0 - 107
target/linux/generic/backport-6.6/302-v6.9-kernel.h-Move-upper_-_bits-and-lower_-_bits-to-wordp.patch

@@ -1,107 +0,0 @@
-From adeb04362d74188c1e22ccb824b15a0a7b3de2f4 Mon Sep 17 00:00:00 2001
-From: Andy Shevchenko <[email protected]>
-Date: Wed, 14 Feb 2024 19:26:32 +0200
-Subject: [PATCH] kernel.h: Move upper_*_bits() and lower_*_bits() to
- wordpart.h
-
-The wordpart.h header is collecting APIs related to the handling
-parts of the word (usually in byte granularity). The upper_*_bits()
-and lower_*_bits() are good candidates to be moved to there.
-
-This helps to clean up header dependency hell with regard to kernel.h
-as the latter gathers completely unrelated stuff together and slows
-down compilation (especially when it's included into other header).
-
-Signed-off-by: Andy Shevchenko <[email protected]>
-Link: https://lore.kernel.org/r/[email protected]
-Reviewed-by: Randy Dunlap <[email protected]>
-Signed-off-by: Kees Cook <[email protected]>
----
- include/linux/kernel.h   | 30 ++----------------------------
- include/linux/wordpart.h | 29 +++++++++++++++++++++++++++++
- 2 files changed, 31 insertions(+), 28 deletions(-)
-
---- a/include/linux/kernel.h
-+++ b/include/linux/kernel.h
-@@ -32,6 +32,8 @@
- #include <linux/sprintf.h>
- #include <linux/static_call_types.h>
- #include <linux/instruction_pointer.h>
-+#include <linux/wordpart.h>
-+
- #include <asm/byteorder.h>
- 
- #include <uapi/linux/kernel.h>
-@@ -57,34 +59,6 @@
- }					\
- )
- 
--/**
-- * upper_32_bits - return bits 32-63 of a number
-- * @n: the number we're accessing
-- *
-- * A basic shift-right of a 64- or 32-bit quantity.  Use this to suppress
-- * the "right shift count >= width of type" warning when that quantity is
-- * 32-bits.
-- */
--#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
--
--/**
-- * lower_32_bits - return bits 0-31 of a number
-- * @n: the number we're accessing
-- */
--#define lower_32_bits(n) ((u32)((n) & 0xffffffff))
--
--/**
-- * upper_16_bits - return bits 16-31 of a number
-- * @n: the number we're accessing
-- */
--#define upper_16_bits(n) ((u16)((n) >> 16))
--
--/**
-- * lower_16_bits - return bits 0-15 of a number
-- * @n: the number we're accessing
-- */
--#define lower_16_bits(n) ((u16)((n) & 0xffff))
--
- struct completion;
- struct user;
- 
---- a/include/linux/wordpart.h
-+++ b/include/linux/wordpart.h
-@@ -2,6 +2,35 @@
- 
- #ifndef _LINUX_WORDPART_H
- #define _LINUX_WORDPART_H
-+
-+/**
-+ * upper_32_bits - return bits 32-63 of a number
-+ * @n: the number we're accessing
-+ *
-+ * A basic shift-right of a 64- or 32-bit quantity.  Use this to suppress
-+ * the "right shift count >= width of type" warning when that quantity is
-+ * 32-bits.
-+ */
-+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
-+
-+/**
-+ * lower_32_bits - return bits 0-31 of a number
-+ * @n: the number we're accessing
-+ */
-+#define lower_32_bits(n) ((u32)((n) & 0xffffffff))
-+
-+/**
-+ * upper_16_bits - return bits 16-31 of a number
-+ * @n: the number we're accessing
-+ */
-+#define upper_16_bits(n) ((u16)((n) >> 16))
-+
-+/**
-+ * lower_16_bits - return bits 0-15 of a number
-+ * @n: the number we're accessing
-+ */
-+#define lower_16_bits(n) ((u16)((n) & 0xffff))
-+
- /**
-  * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
-  * @x: value to repeat

+ 0 - 206
target/linux/generic/backport-6.6/310-v6.7-mips-kexec-fix-the-incorrect-ifdeffery-and-dependenc.patch

@@ -1,206 +0,0 @@
-From 8cd2accb71f5eb8e92d775fc1978d3779875c2e5 Mon Sep 17 00:00:00 2001
-From: Baoquan He <[email protected]>
-Date: Fri, 8 Dec 2023 15:30:34 +0800
-Subject: [PATCH] mips, kexec: fix the incorrect ifdeffery and dependency of
- CONFIG_KEXEC
-
-The select of KEXEC for CRASH_DUMP in kernel/Kconfig.kexec will be
-dropped, then compiling errors will be triggered if below config items are
-set:
-
-===
-CONFIG_CRASH_CORE=y
-CONFIG_KEXEC_CORE=y
-CONFIG_CRASH_DUMP=y
-===
-
---------------------------------------------------------------------
-mipsel-linux-ld: kernel/kexec_core.o: in function `kimage_free':
-kernel/kexec_core.c:(.text+0x2200): undefined reference to `machine_kexec_cleanup'
-mipsel-linux-ld: kernel/kexec_core.o: in function `__crash_kexec':
-kernel/kexec_core.c:(.text+0x2480): undefined reference to `machine_crash_shutdown'
-mipsel-linux-ld: kernel/kexec_core.c:(.text+0x2488): undefined reference to `machine_kexec'
-mipsel-linux-ld: kernel/kexec_core.o: in function `kernel_kexec':
-kernel/kexec_core.c:(.text+0x29b8): undefined reference to `machine_shutdown'
-mipsel-linux-ld: kernel/kexec_core.c:(.text+0x29c0): undefined reference to `machine_kexec'
---------------------------------------------------------------------
-
-Here, change the dependency of building kexec_core related object files,
-and the ifdeffery in mips from CONFIG_KEXEC to CONFIG_KEXEC_CORE.
-
-Link: https://lkml.kernel.org/r/[email protected]
-Signed-off-by: Baoquan He <[email protected]>
-Reported-by: kernel test robot <[email protected]>
-Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/
-Cc: Eric DeVolder <[email protected]>
-Cc: Ignat Korchagin <[email protected]>
-Cc: Stephen Rothwell <[email protected]>
-Signed-off-by: Andrew Morton <[email protected]>
----
- arch/mips/cavium-octeon/smp.c   |  4 ++--
- arch/mips/include/asm/kexec.h   |  2 +-
- arch/mips/include/asm/smp-ops.h |  2 +-
- arch/mips/include/asm/smp.h     |  2 +-
- arch/mips/kernel/Makefile       |  2 +-
- arch/mips/kernel/smp-bmips.c    |  4 ++--
- arch/mips/kernel/smp-cps.c      | 10 +++++-----
- arch/mips/loongson64/reset.c    |  4 ++--
- arch/mips/loongson64/smp.c      |  2 +-
- 9 files changed, 16 insertions(+), 16 deletions(-)
-
---- a/arch/mips/cavium-octeon/smp.c
-+++ b/arch/mips/cavium-octeon/smp.c
-@@ -422,7 +422,7 @@ static const struct plat_smp_ops octeon_
- 	.cpu_disable		= octeon_cpu_disable,
- 	.cpu_die		= octeon_cpu_die,
- #endif
--#ifdef CONFIG_KEXEC
-+#ifdef CONFIG_KEXEC_CORE
- 	.kexec_nonboot_cpu	= kexec_nonboot_cpu_jump,
- #endif
- };
-@@ -502,7 +502,7 @@ static const struct plat_smp_ops octeon_
- 	.cpu_disable		= octeon_cpu_disable,
- 	.cpu_die		= octeon_cpu_die,
- #endif
--#ifdef CONFIG_KEXEC
-+#ifdef CONFIG_KEXEC_CORE
- 	.kexec_nonboot_cpu	= kexec_nonboot_cpu_jump,
- #endif
- };
---- a/arch/mips/include/asm/kexec.h
-+++ b/arch/mips/include/asm/kexec.h
-@@ -31,7 +31,7 @@ static inline void crash_setup_regs(stru
- 		prepare_frametrace(newregs);
- }
- 
--#ifdef CONFIG_KEXEC
-+#ifdef CONFIG_KEXEC_CORE
- struct kimage;
- extern unsigned long kexec_args[4];
- extern int (*_machine_kexec_prepare)(struct kimage *);
---- a/arch/mips/include/asm/smp-ops.h
-+++ b/arch/mips/include/asm/smp-ops.h
-@@ -35,7 +35,7 @@ struct plat_smp_ops {
- 	void (*cpu_die)(unsigned int cpu);
- 	void (*cleanup_dead_cpu)(unsigned cpu);
- #endif
--#ifdef CONFIG_KEXEC
-+#ifdef CONFIG_KEXEC_CORE
- 	void (*kexec_nonboot_cpu)(void);
- #endif
- };
---- a/arch/mips/include/asm/smp.h
-+++ b/arch/mips/include/asm/smp.h
-@@ -93,7 +93,7 @@ static inline void __cpu_die(unsigned in
- extern void __noreturn play_dead(void);
- #endif
- 
--#ifdef CONFIG_KEXEC
-+#ifdef CONFIG_KEXEC_CORE
- static inline void kexec_nonboot_cpu(void)
- {
- 	extern const struct plat_smp_ops *mp_ops;	/* private */
---- a/arch/mips/kernel/Makefile
-+++ b/arch/mips/kernel/Makefile
-@@ -90,7 +90,7 @@ obj-$(CONFIG_GPIO_TXX9)		+= gpio_txx9.o
- 
- obj-$(CONFIG_RELOCATABLE)	+= relocate.o
- 
--obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o crash.o
-+obj-$(CONFIG_KEXEC_CORE)	+= machine_kexec.o relocate_kernel.o crash.o
- obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
- obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
- obj-$(CONFIG_EARLY_PRINTK_8250)	+= early_printk_8250.o
---- a/arch/mips/kernel/smp-bmips.c
-+++ b/arch/mips/kernel/smp-bmips.c
-@@ -434,7 +434,7 @@ const struct plat_smp_ops bmips43xx_smp_
- 	.cpu_disable		= bmips_cpu_disable,
- 	.cpu_die		= bmips_cpu_die,
- #endif
--#ifdef CONFIG_KEXEC
-+#ifdef CONFIG_KEXEC_CORE
- 	.kexec_nonboot_cpu	= kexec_nonboot_cpu_jump,
- #endif
- };
-@@ -451,7 +451,7 @@ const struct plat_smp_ops bmips5000_smp_
- 	.cpu_disable		= bmips_cpu_disable,
- 	.cpu_die		= bmips_cpu_die,
- #endif
--#ifdef CONFIG_KEXEC
-+#ifdef CONFIG_KEXEC_CORE
- 	.kexec_nonboot_cpu	= kexec_nonboot_cpu_jump,
- #endif
- };
---- a/arch/mips/kernel/smp-cps.c
-+++ b/arch/mips/kernel/smp-cps.c
-@@ -395,7 +395,7 @@ static void cps_smp_finish(void)
- 	local_irq_enable();
- }
- 
--#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
-+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE)
- 
- enum cpu_death {
- 	CPU_DEATH_HALT,
-@@ -432,7 +432,7 @@ static void cps_shutdown_this_cpu(enum c
- 	}
- }
- 
--#ifdef CONFIG_KEXEC
-+#ifdef CONFIG_KEXEC_CORE
- 
- static void cps_kexec_nonboot_cpu(void)
- {
-@@ -442,9 +442,9 @@ static void cps_kexec_nonboot_cpu(void)
- 		cps_shutdown_this_cpu(CPU_DEATH_POWER);
- }
- 
--#endif /* CONFIG_KEXEC */
-+#endif /* CONFIG_KEXEC_CORE */
- 
--#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC */
-+#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC_CORE */
- 
- #ifdef CONFIG_HOTPLUG_CPU
- 
-@@ -613,7 +613,7 @@ static const struct plat_smp_ops cps_smp
- 	.cpu_die		= cps_cpu_die,
- 	.cleanup_dead_cpu	= cps_cleanup_dead_cpu,
- #endif
--#ifdef CONFIG_KEXEC
-+#ifdef CONFIG_KEXEC_CORE
- 	.kexec_nonboot_cpu	= cps_kexec_nonboot_cpu,
- #endif
- };
---- a/arch/mips/loongson64/reset.c
-+++ b/arch/mips/loongson64/reset.c
-@@ -39,7 +39,7 @@ static int firmware_poweroff(struct sys_
- 	return NOTIFY_DONE;
- }
- 
--#ifdef CONFIG_KEXEC
-+#ifdef CONFIG_KEXEC_CORE
- 
- /* 0X80000000~0X80200000 is safe */
- #define MAX_ARGS	64
-@@ -152,7 +152,7 @@ static int __init mips_reboot_setup(void
- 				 firmware_poweroff, NULL);
- 	}
- 
--#ifdef CONFIG_KEXEC
-+#ifdef CONFIG_KEXEC_CORE
- 	kexec_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL);
- 	if (WARN_ON(!kexec_argv))
- 		return -ENOMEM;
---- a/arch/mips/loongson64/smp.c
-+++ b/arch/mips/loongson64/smp.c
-@@ -883,7 +883,7 @@ const struct plat_smp_ops loongson3_smp_
- 	.cpu_disable = loongson3_cpu_disable,
- 	.cpu_die = loongson3_cpu_die,
- #endif
--#ifdef CONFIG_KEXEC
-+#ifdef CONFIG_KEXEC_CORE
- 	.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
- #endif
- };

+ 0 - 171
target/linux/generic/backport-6.6/320-v6.11-mips-bmips-rework-and-cache-CBR-addr-handling.patch

@@ -1,171 +0,0 @@
-From a5c05453a13ab324ad8719e8a23dfb6af01f3652 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <[email protected]>
-Date: Thu, 20 Jun 2024 17:26:42 +0200
-Subject: [PATCH 1/4] mips: bmips: rework and cache CBR addr handling
-
-Rework the handling of the CBR address and cache it. This address
-doesn't change and can be cached instead of reading the register every
-time.
-
-This is in preparation of permitting to tweak the CBR address in DT with
-broken SoC or bootloader.
-
-bmips_cbr_addr is defined in setup.c for each arch to keep compatibility
-with legacy brcm47xx/brcm63xx and generic BMIPS target.
-
-Acked-by: Florian Fainelli <[email protected]>
-Signed-off-by: Christian Marangi <[email protected]>
-Signed-off-by: Thomas Bogendoerfer <[email protected]>
----
- arch/mips/bcm47xx/prom.c      | 3 +++
- arch/mips/bcm47xx/setup.c     | 4 ++++
- arch/mips/bcm63xx/prom.c      | 3 +++
- arch/mips/bcm63xx/setup.c     | 4 ++++
- arch/mips/bmips/dma.c         | 2 +-
- arch/mips/bmips/setup.c       | 7 ++++++-
- arch/mips/include/asm/bmips.h | 1 +
- arch/mips/kernel/smp-bmips.c  | 4 ++--
- 8 files changed, 24 insertions(+), 4 deletions(-)
-
---- a/arch/mips/bcm47xx/prom.c
-+++ b/arch/mips/bcm47xx/prom.c
-@@ -32,6 +32,7 @@
- #include <linux/ssb/ssb_driver_chipcommon.h>
- #include <linux/ssb/ssb_regs.h>
- #include <linux/smp.h>
-+#include <asm/bmips.h>
- #include <asm/bootinfo.h>
- #include <bcm47xx.h>
- #include <bcm47xx_board.h>
-@@ -109,6 +110,8 @@ static __init void prom_init_mem(void)
- 
- void __init prom_init(void)
- {
-+	/* Cache CBR addr before CPU/DMA setup */
-+	bmips_cbr_addr = BMIPS_GET_CBR();
- 	prom_init_mem();
- 	setup_8250_early_printk_port(CKSEG1ADDR(BCM47XX_SERIAL_ADDR), 0, 0);
- }
---- a/arch/mips/bcm47xx/setup.c
-+++ b/arch/mips/bcm47xx/setup.c
-@@ -37,6 +37,7 @@
- #include <linux/ssb/ssb.h>
- #include <linux/ssb/ssb_embedded.h>
- #include <linux/bcma/bcma_soc.h>
-+#include <asm/bmips.h>
- #include <asm/bootinfo.h>
- #include <asm/idle.h>
- #include <asm/prom.h>
-@@ -45,6 +46,9 @@
- #include <bcm47xx.h>
- #include <bcm47xx_board.h>
- 
-+/* CBR addr doesn't change and we can cache it */
-+void __iomem *bmips_cbr_addr __read_mostly;
-+
- union bcm47xx_bus bcm47xx_bus;
- EXPORT_SYMBOL(bcm47xx_bus);
- 
---- a/arch/mips/bcm63xx/prom.c
-+++ b/arch/mips/bcm63xx/prom.c
-@@ -22,6 +22,9 @@ void __init prom_init(void)
- {
- 	u32 reg, mask;
- 
-+	/* Cache CBR addr before CPU/DMA setup */
-+	bmips_cbr_addr = BMIPS_GET_CBR();
-+
- 	bcm63xx_cpu_init();
- 
- 	/* stop any running watchdog */
---- a/arch/mips/bcm63xx/setup.c
-+++ b/arch/mips/bcm63xx/setup.c
-@@ -12,6 +12,7 @@
- #include <linux/memblock.h>
- #include <linux/ioport.h>
- #include <linux/pm.h>
-+#include <asm/bmips.h>
- #include <asm/bootinfo.h>
- #include <asm/time.h>
- #include <asm/reboot.h>
-@@ -22,6 +23,9 @@
- #include <bcm63xx_io.h>
- #include <bcm63xx_gpio.h>
- 
-+/* CBR addr doesn't change and we can cache it */
-+void __iomem *bmips_cbr_addr __read_mostly;
-+
- void bcm63xx_machine_halt(void)
- {
- 	pr_info("System halted\n");
---- a/arch/mips/bmips/dma.c
-+++ b/arch/mips/bmips/dma.c
-@@ -9,7 +9,7 @@ bool bmips_rac_flush_disable;
- 
- void arch_sync_dma_for_cpu_all(void)
- {
--	void __iomem *cbr = BMIPS_GET_CBR();
-+	void __iomem *cbr = bmips_cbr_addr;
- 	u32 cfg;
- 
- 	if (boot_cpu_type() != CPU_BMIPS3300 &&
---- a/arch/mips/bmips/setup.c
-+++ b/arch/mips/bmips/setup.c
-@@ -34,6 +34,9 @@
- #define REG_BCM6328_OTP		((void __iomem *)CKSEG1ADDR(0x1000062c))
- #define BCM6328_TP1_DISABLED	BIT(9)
- 
-+/* CBR addr doesn't change and we can cache it */
-+void __iomem *bmips_cbr_addr __read_mostly;
-+
- extern bool bmips_rac_flush_disable;
- 
- static const unsigned long kbase = VMLINUX_LOAD_ADDRESS & 0xfff00000;
-@@ -111,7 +114,7 @@ static void bcm6358_quirks(void)
- 	 * because the bootloader is not initializing it properly.
- 	 */
- 	bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31)) ||
--				  !!BMIPS_GET_CBR();
-+				  !!bmips_cbr_addr;
- }
- 
- static void bcm6368_quirks(void)
-@@ -144,6 +147,8 @@ static void __init bmips_init_cfe(void)
- 
- void __init prom_init(void)
- {
-+	/* Cache CBR addr before CPU/DMA setup */
-+	bmips_cbr_addr = BMIPS_GET_CBR();
- 	bmips_init_cfe();
- 	bmips_cpu_setup();
- 	register_bmips_smp_ops();
---- a/arch/mips/include/asm/bmips.h
-+++ b/arch/mips/include/asm/bmips.h
-@@ -81,6 +81,7 @@ extern char bmips_smp_movevec[];
- extern char bmips_smp_int_vec[];
- extern char bmips_smp_int_vec_end[];
- 
-+extern void __iomem *bmips_cbr_addr;
- extern int bmips_smp_enabled;
- extern int bmips_cpu_offset;
- extern cpumask_t bmips_booted_mask;
---- a/arch/mips/kernel/smp-bmips.c
-+++ b/arch/mips/kernel/smp-bmips.c
-@@ -518,7 +518,7 @@ static void bmips_set_reset_vec(int cpu,
- 		info.val = val;
- 		bmips_set_reset_vec_remote(&info);
- 	} else {
--		void __iomem *cbr = BMIPS_GET_CBR();
-+		void __iomem *cbr = bmips_cbr_addr;
- 
- 		if (cpu == 0)
- 			__raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_0);
-@@ -591,7 +591,7 @@ asmlinkage void __weak plat_wired_tlb_se
- 
- void bmips_cpu_setup(void)
- {
--	void __iomem __maybe_unused *cbr = BMIPS_GET_CBR();
-+	void __iomem __maybe_unused *cbr = bmips_cbr_addr;
- 	u32 __maybe_unused cfg;
- 
- 	switch (current_cpu_type()) {

+ 0 - 111
target/linux/generic/backport-6.6/321-v6.11-mips-bmips-setup-make-CBR-address-configurable.patch

@@ -1,111 +0,0 @@
-From b95b30e50aed225d26e20737873ae2404941901c Mon Sep 17 00:00:00 2001
-From: Christian Marangi <[email protected]>
-Date: Thu, 20 Jun 2024 17:26:44 +0200
-Subject: [PATCH 3/4] mips: bmips: setup: make CBR address configurable
-
-Add support to provide CBR address from DT to handle broken
-SoC/Bootloader that doesn't correctly init it. This permits to use the
-RAC flush even in these condition.
-
-To provide a CBR address from DT, the property "brcm,bmips-cbr-reg"
-needs to be set in the "cpus" node. On DT init, this property presence
-will be checked and will set the bmips_cbr_addr value accordingly. Also
-bmips_rac_flush_disable will be set to false as RAC flush can be
-correctly supported.
-
-The CBR address from DT will overwrite the cached one and the
-one set in the CBR register will be ignored.
-
-Also the DT CBR address is validated on being outside DRAM window.
-
-Signed-off-by: Christian Marangi <[email protected]>
-Acked-by: Florian Fainelli <[email protected]>
-Signed-off-by: Thomas Bogendoerfer <[email protected]>
----
- arch/mips/bcm47xx/setup.c |  6 +++++-
- arch/mips/bcm63xx/setup.c |  6 +++++-
- arch/mips/bmips/setup.c   | 30 ++++++++++++++++++++++++++++--
- 3 files changed, 38 insertions(+), 4 deletions(-)
-
---- a/arch/mips/bcm47xx/setup.c
-+++ b/arch/mips/bcm47xx/setup.c
-@@ -46,7 +46,11 @@
- #include <bcm47xx.h>
- #include <bcm47xx_board.h>
- 
--/* CBR addr doesn't change and we can cache it */
-+/*
-+ * CBR addr doesn't change and we can cache it.
-+ * For broken SoC/Bootloader CBR addr might also be provided via DT
-+ * with "brcm,bmips-cbr-reg" in the "cpus" node.
-+ */
- void __iomem *bmips_cbr_addr __read_mostly;
- 
- union bcm47xx_bus bcm47xx_bus;
---- a/arch/mips/bcm63xx/setup.c
-+++ b/arch/mips/bcm63xx/setup.c
-@@ -23,7 +23,11 @@
- #include <bcm63xx_io.h>
- #include <bcm63xx_gpio.h>
- 
--/* CBR addr doesn't change and we can cache it */
-+/*
-+ * CBR addr doesn't change and we can cache it.
-+ * For broken SoC/Bootloader CBR addr might also be provided via DT
-+ * with "brcm,bmips-cbr-reg" in the "cpus" node.
-+ */
- void __iomem *bmips_cbr_addr __read_mostly;
- 
- void bcm63xx_machine_halt(void)
---- a/arch/mips/bmips/setup.c
-+++ b/arch/mips/bmips/setup.c
-@@ -34,7 +34,11 @@
- #define REG_BCM6328_OTP		((void __iomem *)CKSEG1ADDR(0x1000062c))
- #define BCM6328_TP1_DISABLED	BIT(9)
- 
--/* CBR addr doesn't change and we can cache it */
-+/*
-+ * CBR addr doesn't change and we can cache it.
-+ * For broken SoC/Bootloader CBR addr might also be provided via DT
-+ * with "brcm,bmips-cbr-reg" in the "cpus" node.
-+ */
- void __iomem *bmips_cbr_addr __read_mostly;
- 
- extern bool bmips_rac_flush_disable;
-@@ -208,13 +212,35 @@ void __init plat_mem_setup(void)
- void __init device_tree_init(void)
- {
- 	struct device_node *np;
-+	u32 addr;
- 
- 	unflatten_and_copy_device_tree();
- 
- 	/* Disable SMP boot unless both CPUs are listed in DT and !disabled */
- 	np = of_find_node_by_name(NULL, "cpus");
--	if (np && of_get_available_child_count(np) <= 1)
-+	if (!np)
-+		return;
-+
-+	if (of_get_available_child_count(np) <= 1)
- 		bmips_smp_enabled = 0;
-+
-+	/* Check if DT provide a CBR address */
-+	if (of_property_read_u32(np, "brcm,bmips-cbr-reg", &addr))
-+		goto exit;
-+
-+	/* Make sure CBR address is outside DRAM window */
-+	if (addr >= (u32)memblock_start_of_DRAM() &&
-+	    addr < (u32)memblock_end_of_DRAM()) {
-+		WARN(1, "DT CBR %x inside DRAM window. Ignoring DT CBR.\n",
-+		     addr);
-+		goto exit;
-+	}
-+
-+	bmips_cbr_addr = (void __iomem *)addr;
-+	/* Since CBR is provided by DT, enable RAC flush */
-+	bmips_rac_flush_disable = false;
-+
-+exit:
- 	of_node_put(np);
- }
- 

+ 0 - 57
target/linux/generic/backport-6.6/322-v6.11-mips-bmips-enable-RAC-on-BMIPS4350.patch

@@ -1,57 +0,0 @@
-From 04f38d1a4db017f17e82442727b91ce03dd72759 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Daniel=20Gonz=C3=A1lez=20Cabanelas?= <[email protected]>
-Date: Thu, 20 Jun 2024 17:26:45 +0200
-Subject: [PATCH 4/4] mips: bmips: enable RAC on BMIPS4350
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-The data RAC is left disabled by the bootloader in some SoCs, at least in
-the core it boots from.
-Enabling this feature increases the performance up to +30% depending on the
-task.
-
-Signed-off-by: Daniel González Cabanelas <[email protected]>
-Signed-off-by: Álvaro Fernández Rojas <[email protected]>
-[ rework code and reduce code duplication ]
-Acked-by: Florian Fainelli <[email protected]>
-Signed-off-by: Christian Marangi <[email protected]>
-Signed-off-by: Thomas Bogendoerfer <[email protected]>
----
- arch/mips/kernel/smp-bmips.c | 18 ++++++++++++++++++
- 1 file changed, 18 insertions(+)
-
---- a/arch/mips/kernel/smp-bmips.c
-+++ b/arch/mips/kernel/smp-bmips.c
-@@ -592,6 +592,7 @@ asmlinkage void __weak plat_wired_tlb_se
- void bmips_cpu_setup(void)
- {
- 	void __iomem __maybe_unused *cbr = bmips_cbr_addr;
-+	u32 __maybe_unused rac_addr;
- 	u32 __maybe_unused cfg;
- 
- 	switch (current_cpu_type()) {
-@@ -620,6 +621,23 @@ void bmips_cpu_setup(void)
- 		__raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
- 		break;
- 
-+	case CPU_BMIPS4350:
-+		rac_addr = BMIPS_RAC_CONFIG_1;
-+
-+		if (!(read_c0_brcm_cmt_local() & (1 << 31)))
-+			rac_addr = BMIPS_RAC_CONFIG;
-+
-+		/* Enable data RAC */
-+		cfg = __raw_readl(cbr + rac_addr);
-+		__raw_writel(cfg | 0xf, cbr + rac_addr);
-+		__raw_readl(cbr + rac_addr);
-+
-+		/* Flush stale data out of the readahead cache */
-+		cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
-+		__raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
-+		__raw_readl(cbr + BMIPS_RAC_CONFIG);
-+		break;
-+
- 	case CPU_BMIPS4380:
- 		/* CBG workaround for early BMIPS4380 CPUs */
- 		switch (read_c0_prid()) {

+ 0 - 60
target/linux/generic/backport-6.6/330-v6.13-jiffies-Define-secs_to_jiffies.patch

@@ -1,60 +0,0 @@
-From b35108a51cf7bab58d7eace1267d7965978bcdb8 Mon Sep 17 00:00:00 2001
-From: Easwar Hariharan <[email protected]>
-Date: Wed, 30 Oct 2024 17:47:35 +0000
-Subject: [PATCH] jiffies: Define secs_to_jiffies()
-
-secs_to_jiffies() is defined in hci_event.c and cannot be reused by
-other call sites. Hoist it into the core code to allow conversion of the
-~1150 usages of msecs_to_jiffies() that either:
-
- - use a multiplier value of 1000 or equivalently MSEC_PER_SEC, or
- - have timeouts that are denominated in seconds (i.e. end in 000)
-
-It's implemented as a macro to allow usage in static initializers.
-
-This will also allow conversion of yet more sites that use (sec * HZ)
-directly, and improve their readability.
-
-Suggested-by: Michael Kelley <[email protected]>
-Signed-off-by: Easwar Hariharan <[email protected]>
-Signed-off-by: Thomas Gleixner <[email protected]>
-Reviewed-by: Luiz Augusto von Dentz <[email protected]>
-Link: https://lore.kernel.org/all/20241030-open-coded-timeouts-v3-1-9ba123facf88@linux.microsoft.com
----
- include/linux/jiffies.h   | 13 +++++++++++++
- net/bluetooth/hci_event.c |  2 --
- 2 files changed, 13 insertions(+), 2 deletions(-)
-
---- a/include/linux/jiffies.h
-+++ b/include/linux/jiffies.h
-@@ -523,6 +523,19 @@ static __always_inline unsigned long mse
- 	}
- }
- 
-+/**
-+ * secs_to_jiffies: - convert seconds to jiffies
-+ * @_secs: time in seconds
-+ *
-+ * Conversion is done by simple multiplication with HZ
-+ *
-+ * secs_to_jiffies() is defined as a macro rather than a static inline
-+ * function so it can be used in static initializers.
-+ *
-+ * Return: jiffies value
-+ */
-+#define secs_to_jiffies(_secs) ((_secs) * HZ)
-+
- extern unsigned long __usecs_to_jiffies(const unsigned int u);
- #if !(USEC_PER_SEC % HZ)
- static inline unsigned long _usecs_to_jiffies(const unsigned int u)
---- a/net/bluetooth/hci_event.c
-+++ b/net/bluetooth/hci_event.c
-@@ -43,8 +43,6 @@
- #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
- 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
- 
--#define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
--
- /* Handle HCI Event packets */
- 
- static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,

+ 0 - 35
target/linux/generic/backport-6.6/331-v6.14-jiffies-Cast-to-unsigned-long-in-secs_to_jiffies-con.patch

@@ -1,35 +0,0 @@
-From bb2784d9ab49587ba4fbff37a319fff2924db289 Mon Sep 17 00:00:00 2001
-From: Easwar Hariharan <[email protected]>
-Date: Thu, 30 Jan 2025 19:26:58 +0000
-Subject: [PATCH] jiffies: Cast to unsigned long in secs_to_jiffies()
- conversion
-
-While converting users of msecs_to_jiffies(), lkp reported that some range
-checks would always be true because of the mismatch between the implied int
-value of secs_to_jiffies() vs the unsigned long return value of the
-msecs_to_jiffies() calls it was replacing.
-
-Fix this by casting the secs_to_jiffies() input value to unsigned long.
-
-Fixes: b35108a51cf7ba ("jiffies: Define secs_to_jiffies()")
-Reported-by: kernel test robot <[email protected]>
-Signed-off-by: Easwar Hariharan <[email protected]>
-Signed-off-by: Thomas Gleixner <[email protected]>
-Cc: [email protected]
-Link: https://lore.kernel.org/all/[email protected]
-Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/
----
- include/linux/jiffies.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/include/linux/jiffies.h
-+++ b/include/linux/jiffies.h
-@@ -534,7 +534,7 @@ static __always_inline unsigned long mse
-  *
-  * Return: jiffies value
-  */
--#define secs_to_jiffies(_secs) ((_secs) * HZ)
-+#define secs_to_jiffies(_secs) (unsigned long)((_secs) * HZ)
- 
- extern unsigned long __usecs_to_jiffies(const unsigned int u);
- #if !(USEC_PER_SEC % HZ)

+ 0 - 36
target/linux/generic/backport-6.6/400-v6.9-mtd-rawnand-brcmnand-Support-write-protection-settin.patch

@@ -1,36 +0,0 @@
-From 8e7daa85641c9559c113f6b217bdc923397de77c Mon Sep 17 00:00:00 2001
-From: William Zhang <[email protected]>
-Date: Thu, 22 Feb 2024 19:47:58 -0800
-Subject: [PATCH] mtd: rawnand: brcmnand: Support write protection setting from
- dts
-
-The write protection feature is controlled by the module parameter wp_on
-with default set to enabled. But not all the board use this feature
-especially in BCMBCA broadband board. And module parameter is not
-sufficient as different board can have different option.  Add a device
-tree property and allow this feature to be configured through the board
-dts on per board basis.
-
-Signed-off-by: William Zhang <[email protected]>
-Reviewed-by: Florian Fainelli <[email protected]>
-Reviewed-by: Kamal Dasu <[email protected]>
-Reviewed-by: David Regan <[email protected]>
-Signed-off-by: Miquel Raynal <[email protected]>
-Link: https://lore.kernel.org/linux-mtd/[email protected]
----
- drivers/mtd/nand/raw/brcmnand/brcmnand.c | 4 ++++
- 1 file changed, 4 insertions(+)
-
---- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
-+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
-@@ -3194,6 +3194,10 @@ int brcmnand_probe(struct platform_devic
- 	/* Disable XOR addressing */
- 	brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
- 
-+	/* Check if the board connects the WP pin */
-+	if (of_property_read_bool(dn, "brcm,wp-not-connected"))
-+		wp_on = 0;
-+
- 	if (ctrl->features & BRCMNAND_HAS_WP) {
- 		/* Permanently disable write protection */
- 		if (wp_on == 2)

+ 0 - 123
target/linux/generic/backport-6.6/401-v6.9-dt-bindings-mtd-add-basic-bindings-for-UBI.patch

@@ -1,123 +0,0 @@
-From 25d88bfd35bac3196eafa666e3b05033b46ffa21 Mon Sep 17 00:00:00 2001
-From: Daniel Golle <[email protected]>
-Date: Tue, 19 Dec 2023 02:32:00 +0000
-Subject: [PATCH 1/8] dt-bindings: mtd: add basic bindings for UBI
-
-Add basic bindings for UBI devices and volumes.
-
-Signed-off-by: Daniel Golle <[email protected]>
-Reviewed-by: Rob Herring <[email protected]>
-Signed-off-by: Richard Weinberger <[email protected]>
----
- .../bindings/mtd/partitions/linux,ubi.yaml    | 65 +++++++++++++++++++
- .../bindings/mtd/partitions/ubi-volume.yaml   | 35 ++++++++++
- 2 files changed, 100 insertions(+)
- create mode 100644 Documentation/devicetree/bindings/mtd/partitions/linux,ubi.yaml
- create mode 100644 Documentation/devicetree/bindings/mtd/partitions/ubi-volume.yaml
-
---- /dev/null
-+++ b/Documentation/devicetree/bindings/mtd/partitions/linux,ubi.yaml
-@@ -0,0 +1,65 @@
-+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
-+%YAML 1.2
-+---
-+$id: http://devicetree.org/schemas/mtd/partitions/linux,ubi.yaml#
-+$schema: http://devicetree.org/meta-schemas/core.yaml#
-+
-+title: Unsorted Block Images
-+
-+description: |
-+  UBI ("Unsorted Block Images") is a volume management system for raw
-+  flash devices which manages multiple logical volumes on a single
-+  physical flash device and spreads the I/O load (i.e wear-leveling)
-+  across the whole flash chip.
-+
-+maintainers:
-+  - Daniel Golle <[email protected]>
-+
-+allOf:
-+  - $ref: partition.yaml#
-+
-+properties:
-+  compatible:
-+    const: linux,ubi
-+
-+  volumes:
-+    type: object
-+    description: UBI Volumes
-+
-+    patternProperties:
-+      "^ubi-volume-.*$":
-+        $ref: /schemas/mtd/partitions/ubi-volume.yaml#
-+
-+    unevaluatedProperties: false
-+
-+required:
-+  - compatible
-+
-+unevaluatedProperties: false
-+
-+examples:
-+  - |
-+    partitions {
-+        compatible = "fixed-partitions";
-+        #address-cells = <1>;
-+        #size-cells = <1>;
-+
-+        partition@0 {
-+            reg = <0x0 0x100000>;
-+            label = "bootloader";
-+            read-only;
-+        };
-+
-+        partition@100000 {
-+            reg = <0x100000 0x1ff00000>;
-+            label = "ubi";
-+            compatible = "linux,ubi";
-+
-+            volumes {
-+                ubi-volume-caldata {
-+                    volid = <2>;
-+                    volname = "rf";
-+                };
-+            };
-+        };
-+    };
---- /dev/null
-+++ b/Documentation/devicetree/bindings/mtd/partitions/ubi-volume.yaml
-@@ -0,0 +1,35 @@
-+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
-+%YAML 1.2
-+---
-+$id: http://devicetree.org/schemas/mtd/partitions/ubi-volume.yaml#
-+$schema: http://devicetree.org/meta-schemas/core.yaml#
-+
-+title: UBI volume
-+
-+description: |
-+  This binding describes a single UBI volume. Volumes can be matches either
-+  by their ID or their name, or both.
-+
-+maintainers:
-+  - Daniel Golle <[email protected]>
-+
-+properties:
-+  volid:
-+    $ref: /schemas/types.yaml#/definitions/uint32
-+    description:
-+      Match UBI volume ID
-+
-+  volname:
-+    $ref: /schemas/types.yaml#/definitions/string
-+    description:
-+      Match UBI volume ID
-+
-+anyOf:
-+  - required:
-+      - volid
-+
-+  - required:
-+      - volname
-+
-+# This is a generic file other binding inherit from and extend
-+additionalProperties: true

+ 0 - 50
target/linux/generic/backport-6.6/402-v6.9-dt-bindings-mtd-ubi-volume-allow-UBI-volumes-to-prov.patch

@@ -1,50 +0,0 @@
-From 95b113222b5164ac0887eb5c514ff3970a0136f0 Mon Sep 17 00:00:00 2001
-From: Daniel Golle <[email protected]>
-Date: Tue, 19 Dec 2023 02:32:11 +0000
-Subject: [PATCH 2/8] dt-bindings: mtd: ubi-volume: allow UBI volumes to
- provide NVMEM
-
-UBI volumes may be used to contain NVMEM bits, typically device MAC
-addresses or wireless radio calibration data.
-
-Signed-off-by: Daniel Golle <[email protected]>
-Reviewed-by: Rob Herring <[email protected]>
-Signed-off-by: Richard Weinberger <[email protected]>
----
- .../devicetree/bindings/mtd/partitions/linux,ubi.yaml  | 10 ++++++++++
- .../devicetree/bindings/mtd/partitions/ubi-volume.yaml |  5 +++++
- 2 files changed, 15 insertions(+)
-
---- a/Documentation/devicetree/bindings/mtd/partitions/linux,ubi.yaml
-+++ b/Documentation/devicetree/bindings/mtd/partitions/linux,ubi.yaml
-@@ -59,6 +59,16 @@ examples:
-                 ubi-volume-caldata {
-                     volid = <2>;
-                     volname = "rf";
-+
-+                    nvmem-layout {
-+                        compatible = "fixed-layout";
-+                        #address-cells = <1>;
-+                        #size-cells = <1>;
-+
-+                        eeprom@0 {
-+                            reg = <0x0 0x1000>;
-+                        };
-+                    };
-                 };
-             };
-         };
---- a/Documentation/devicetree/bindings/mtd/partitions/ubi-volume.yaml
-+++ b/Documentation/devicetree/bindings/mtd/partitions/ubi-volume.yaml
-@@ -24,6 +24,11 @@ properties:
-     description:
-       Match UBI volume ID
- 
-+  nvmem-layout:
-+    $ref: /schemas/nvmem/layouts/nvmem-layout.yaml#
-+    description:
-+      This container may reference an NVMEM layout parser.
-+
- anyOf:
-   - required:
-       - volid

+ 0 - 285
target/linux/generic/backport-6.6/403-v6.9-mtd-ubi-block-use-notifier-to-create-ubiblock-from-p.patch

@@ -1,285 +0,0 @@
-From 2bba1cdcfcd2907d0696cc0139f1bd078d36ee81 Mon Sep 17 00:00:00 2001
-From: Daniel Golle <[email protected]>
-Date: Tue, 19 Dec 2023 02:32:35 +0000
-Subject: [PATCH 3/8] mtd: ubi: block: use notifier to create ubiblock from
- parameter
-
-Use UBI_VOLUME_ADDED notification to create ubiblock device specified
-on kernel cmdline or module parameter.
-This makes thing more simple and has the advantage that ubiblock devices
-on volumes which are not present at the time the ubi module is probed
-will still be created.
-
-Suggested-by: Zhihao Cheng <[email protected]>
-Signed-off-by: Daniel Golle <[email protected]>
-Signed-off-by: Richard Weinberger <[email protected]>
----
- drivers/mtd/ubi/block.c | 136 ++++++++++++++++++++--------------------
- drivers/mtd/ubi/kapi.c  |  54 +++++++++++-----
- drivers/mtd/ubi/ubi.h   |   1 +
- 3 files changed, 106 insertions(+), 85 deletions(-)
-
---- a/drivers/mtd/ubi/block.c
-+++ b/drivers/mtd/ubi/block.c
-@@ -65,10 +65,10 @@ struct ubiblock_pdu {
- };
- 
- /* Numbers of elements set in the @ubiblock_param array */
--static int ubiblock_devs __initdata;
-+static int ubiblock_devs;
- 
- /* MTD devices specification parameters */
--static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
-+static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES];
- 
- struct ubiblock {
- 	struct ubi_volume_desc *desc;
-@@ -532,6 +532,70 @@ static int ubiblock_resize(struct ubi_vo
- 	return 0;
- }
- 
-+static bool
-+match_volume_desc(struct ubi_volume_info *vi, const char *name, int ubi_num, int vol_id)
-+{
-+	int err, len, cur_ubi_num, cur_vol_id;
-+
-+	if (ubi_num == -1) {
-+		/* No ubi num, name must be a vol device path */
-+		err = ubi_get_num_by_path(name, &cur_ubi_num, &cur_vol_id);
-+		if (err || vi->ubi_num != cur_ubi_num || vi->vol_id != cur_vol_id)
-+			return false;
-+
-+		return true;
-+	}
-+
-+	if (vol_id == -1) {
-+		/* Got ubi_num, but no vol_id, name must be volume name */
-+		if (vi->ubi_num != ubi_num)
-+			return false;
-+
-+		len = strnlen(name, UBI_VOL_NAME_MAX + 1);
-+		if (len < 1 || vi->name_len != len)
-+			return false;
-+
-+		if (strcmp(name, vi->name))
-+			return false;
-+
-+		return true;
-+	}
-+
-+	if (vi->ubi_num != ubi_num)
-+		return false;
-+
-+	if (vi->vol_id != vol_id)
-+		return false;
-+
-+	return true;
-+}
-+
-+static void
-+ubiblock_create_from_param(struct ubi_volume_info *vi)
-+{
-+	int i, ret = 0;
-+	struct ubiblock_param *p;
-+
-+	/*
-+	 * Iterate over ubiblock cmdline parameters. If a parameter matches the
-+	 * newly added volume create the ubiblock device for it.
-+	 */
-+	for (i = 0; i < ubiblock_devs; i++) {
-+		p = &ubiblock_param[i];
-+
-+		if (!match_volume_desc(vi, p->name, p->ubi_num, p->vol_id))
-+			continue;
-+
-+		ret = ubiblock_create(vi);
-+		if (ret) {
-+			pr_err(
-+			       "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
-+			       vi->name, p->ubi_num, p->vol_id, ret);
-+		}
-+		break;
-+	}
-+}
-+
- static int ubiblock_notify(struct notifier_block *nb,
- 			 unsigned long notification_type, void *ns_ptr)
- {
-@@ -539,10 +603,7 @@ static int ubiblock_notify(struct notifi
- 
- 	switch (notification_type) {
- 	case UBI_VOLUME_ADDED:
--		/*
--		 * We want to enforce explicit block device creation for
--		 * volumes, so when a volume is added we do nothing.
--		 */
-+		ubiblock_create_from_param(&nt->vi);
- 		break;
- 	case UBI_VOLUME_REMOVED:
- 		ubiblock_remove(&nt->vi);
-@@ -568,56 +629,6 @@ static struct notifier_block ubiblock_no
- 	.notifier_call = ubiblock_notify,
- };
- 
--static struct ubi_volume_desc * __init
--open_volume_desc(const char *name, int ubi_num, int vol_id)
--{
--	if (ubi_num == -1)
--		/* No ubi num, name must be a vol device path */
--		return ubi_open_volume_path(name, UBI_READONLY);
--	else if (vol_id == -1)
--		/* No vol_id, must be vol_name */
--		return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
--	else
--		return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
--}
--
--static void __init ubiblock_create_from_param(void)
--{
--	int i, ret = 0;
--	struct ubiblock_param *p;
--	struct ubi_volume_desc *desc;
--	struct ubi_volume_info vi;
--
--	/*
--	 * If there is an error creating one of the ubiblocks, continue on to
--	 * create the following ubiblocks. This helps in a circumstance where
--	 * the kernel command-line specifies multiple block devices and some
--	 * may be broken, but we still want the working ones to come up.
--	 */
--	for (i = 0; i < ubiblock_devs; i++) {
--		p = &ubiblock_param[i];
--
--		desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
--		if (IS_ERR(desc)) {
--			pr_err(
--			       "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
--			       p->ubi_num, p->vol_id, PTR_ERR(desc));
--			continue;
--		}
--
--		ubi_get_volume_info(desc, &vi);
--		ubi_close_volume(desc);
--
--		ret = ubiblock_create(&vi);
--		if (ret) {
--			pr_err(
--			       "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
--			       vi.name, p->ubi_num, p->vol_id, ret);
--			continue;
--		}
--	}
--}
--
- static void ubiblock_remove_all(void)
- {
- 	struct ubiblock *next;
-@@ -643,18 +654,7 @@ int __init ubiblock_init(void)
- 	if (ubiblock_major < 0)
- 		return ubiblock_major;
- 
--	/*
--	 * Attach block devices from 'block=' module param.
--	 * Even if one block device in the param list fails to come up,
--	 * still allow the module to load and leave any others up.
--	 */
--	ubiblock_create_from_param();
--
--	/*
--	 * Block devices are only created upon user requests, so we ignore
--	 * existing volumes.
--	 */
--	ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
-+	ret = ubi_register_volume_notifier(&ubiblock_notifier, 0);
- 	if (ret)
- 		goto err_unreg;
- 	return 0;
---- a/drivers/mtd/ubi/kapi.c
-+++ b/drivers/mtd/ubi/kapi.c
-@@ -280,6 +280,41 @@ struct ubi_volume_desc *ubi_open_volume_
- EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
- 
- /**
-+ * ubi_get_num_by_path - get UBI device and volume number from device path
-+ * @pathname: volume character device node path
-+ * @ubi_num: pointer to UBI device number to be set
-+ * @vol_id: pointer to UBI volume ID to be set
-+ *
-+ * Returns 0 on success and sets ubi_num and vol_id, returns error otherwise.
-+ */
-+int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id)
-+{
-+	int error;
-+	struct path path;
-+	struct kstat stat;
-+
-+	error = kern_path(pathname, LOOKUP_FOLLOW, &path);
-+	if (error)
-+		return error;
-+
-+	error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
-+	path_put(&path);
-+	if (error)
-+		return error;
-+
-+	if (!S_ISCHR(stat.mode))
-+		return -EINVAL;
-+
-+	*ubi_num = ubi_major2num(MAJOR(stat.rdev));
-+	*vol_id = MINOR(stat.rdev) - 1;
-+
-+	if (*vol_id < 0 || *ubi_num < 0)
-+		return -ENODEV;
-+
-+	return 0;
-+}
-+
-+/**
-  * ubi_open_volume_path - open UBI volume by its character device node path.
-  * @pathname: volume character device node path
-  * @mode: open mode
-@@ -290,32 +325,17 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
- struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
- {
- 	int error, ubi_num, vol_id;
--	struct path path;
--	struct kstat stat;
- 
- 	dbg_gen("open volume %s, mode %d", pathname, mode);
- 
- 	if (!pathname || !*pathname)
- 		return ERR_PTR(-EINVAL);
- 
--	error = kern_path(pathname, LOOKUP_FOLLOW, &path);
--	if (error)
--		return ERR_PTR(error);
--
--	error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
--	path_put(&path);
-+	error = ubi_get_num_by_path(pathname, &ubi_num, &vol_id);
- 	if (error)
- 		return ERR_PTR(error);
- 
--	if (!S_ISCHR(stat.mode))
--		return ERR_PTR(-EINVAL);
--
--	ubi_num = ubi_major2num(MAJOR(stat.rdev));
--	vol_id = MINOR(stat.rdev) - 1;
--
--	if (vol_id >= 0 && ubi_num >= 0)
--		return ubi_open_volume(ubi_num, vol_id, mode);
--	return ERR_PTR(-ENODEV);
-+	return ubi_open_volume(ubi_num, vol_id, mode);
- }
- EXPORT_SYMBOL_GPL(ubi_open_volume_path);
- 
---- a/drivers/mtd/ubi/ubi.h
-+++ b/drivers/mtd/ubi/ubi.h
-@@ -956,6 +956,7 @@ void ubi_free_internal_volumes(struct ub
- void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
- void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
- 			    struct ubi_volume_info *vi);
-+int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id);
- /* scan.c */
- int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
- 		      int pnum, const struct ubi_vid_hdr *vid_hdr);

+ 0 - 205
target/linux/generic/backport-6.6/404-v6.9-mtd-ubi-attach-from-device-tree.patch

@@ -1,205 +0,0 @@
-From 6e331888643887ce85657527bc03f97d46235e71 Mon Sep 17 00:00:00 2001
-From: Daniel Golle <[email protected]>
-Date: Tue, 19 Dec 2023 02:33:14 +0000
-Subject: [PATCH 4/8] mtd: ubi: attach from device tree
-
-Introduce device tree compatible 'linux,ubi' and attach compatible MTD
-devices using the MTD add notifier. This is needed for a UBI device to
-be available early at boot (and not only after late_initcall), so
-volumes on them can be used eg. as NVMEM providers for other drivers.
-
-Signed-off-by: Daniel Golle <[email protected]>
-Signed-off-by: Richard Weinberger <[email protected]>
----
- drivers/mtd/ubi/build.c | 135 ++++++++++++++++++++++++++++------------
- 1 file changed, 96 insertions(+), 39 deletions(-)
-
---- a/drivers/mtd/ubi/build.c
-+++ b/drivers/mtd/ubi/build.c
-@@ -27,6 +27,7 @@
- #include <linux/log2.h>
- #include <linux/kthread.h>
- #include <linux/kernel.h>
-+#include <linux/of.h>
- #include <linux/slab.h>
- #include <linux/major.h>
- #include "ubi.h"
-@@ -1214,43 +1215,43 @@ static struct mtd_info * __init open_mtd
- 	return mtd;
- }
- 
--static int __init ubi_init(void)
-+static void ubi_notify_add(struct mtd_info *mtd)
- {
--	int err, i, k;
-+	struct device_node *np = mtd_get_of_node(mtd);
-+	int err;
- 
--	/* Ensure that EC and VID headers have correct size */
--	BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
--	BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
-+	if (!of_device_is_compatible(np, "linux,ubi"))
-+		return;
- 
--	if (mtd_devs > UBI_MAX_DEVICES) {
--		pr_err("UBI error: too many MTD devices, maximum is %d\n",
--		       UBI_MAX_DEVICES);
--		return -EINVAL;
--	}
-+	/*
-+	 * we are already holding &mtd_table_mutex, but still need
-+	 * to bump refcount
-+	 */
-+	err = __get_mtd_device(mtd);
-+	if (err)
-+		return;
- 
--	/* Create base sysfs directory and sysfs files */
--	err = class_register(&ubi_class);
-+	/* called while holding mtd_table_mutex */
-+	mutex_lock_nested(&ubi_devices_mutex, SINGLE_DEPTH_NESTING);
-+	err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 0, 0, false);
-+	mutex_unlock(&ubi_devices_mutex);
- 	if (err < 0)
--		return err;
--
--	err = misc_register(&ubi_ctrl_cdev);
--	if (err) {
--		pr_err("UBI error: cannot register device\n");
--		goto out;
--	}
-+		__put_mtd_device(mtd);
-+}
- 
--	ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
--					      sizeof(struct ubi_wl_entry),
--					      0, 0, NULL);
--	if (!ubi_wl_entry_slab) {
--		err = -ENOMEM;
--		goto out_dev_unreg;
--	}
-+static void ubi_notify_remove(struct mtd_info *mtd)
-+{
-+	/* do nothing for now */
-+}
- 
--	err = ubi_debugfs_init();
--	if (err)
--		goto out_slab;
-+static struct mtd_notifier ubi_mtd_notifier = {
-+	.add = ubi_notify_add,
-+	.remove = ubi_notify_remove,
-+};
- 
-+static int __init ubi_init_attach(void)
-+{
-+	int err, i, k;
- 
- 	/* Attach MTD devices */
- 	for (i = 0; i < mtd_devs; i++) {
-@@ -1298,25 +1299,79 @@ static int __init ubi_init(void)
- 		}
- 	}
- 
-+	return 0;
-+
-+out_detach:
-+	for (k = 0; k < i; k++)
-+		if (ubi_devices[k]) {
-+			mutex_lock(&ubi_devices_mutex);
-+			ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
-+			mutex_unlock(&ubi_devices_mutex);
-+		}
-+	return err;
-+}
-+#ifndef CONFIG_MTD_UBI_MODULE
-+late_initcall(ubi_init_attach);
-+#endif
-+
-+static int __init ubi_init(void)
-+{
-+	int err;
-+
-+	/* Ensure that EC and VID headers have correct size */
-+	BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
-+	BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
-+
-+	if (mtd_devs > UBI_MAX_DEVICES) {
-+		pr_err("UBI error: too many MTD devices, maximum is %d\n",
-+		       UBI_MAX_DEVICES);
-+		return -EINVAL;
-+	}
-+
-+	/* Create base sysfs directory and sysfs files */
-+	err = class_register(&ubi_class);
-+	if (err < 0)
-+		return err;
-+
-+	err = misc_register(&ubi_ctrl_cdev);
-+	if (err) {
-+		pr_err("UBI error: cannot register device\n");
-+		goto out;
-+	}
-+
-+	ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
-+					      sizeof(struct ubi_wl_entry),
-+					      0, 0, NULL);
-+	if (!ubi_wl_entry_slab) {
-+		err = -ENOMEM;
-+		goto out_dev_unreg;
-+	}
-+
-+	err = ubi_debugfs_init();
-+	if (err)
-+		goto out_slab;
-+
- 	err = ubiblock_init();
- 	if (err) {
- 		pr_err("UBI error: block: cannot initialize, error %d\n", err);
- 
- 		/* See comment above re-ubi_is_module(). */
- 		if (ubi_is_module())
--			goto out_detach;
-+			goto out_slab;
-+	}
-+
-+	register_mtd_user(&ubi_mtd_notifier);
-+
-+	if (ubi_is_module()) {
-+		err = ubi_init_attach();
-+		if (err)
-+			goto out_mtd_notifier;
- 	}
- 
- 	return 0;
- 
--out_detach:
--	for (k = 0; k < i; k++)
--		if (ubi_devices[k]) {
--			mutex_lock(&ubi_devices_mutex);
--			ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
--			mutex_unlock(&ubi_devices_mutex);
--		}
--	ubi_debugfs_exit();
-+out_mtd_notifier:
-+	unregister_mtd_user(&ubi_mtd_notifier);
- out_slab:
- 	kmem_cache_destroy(ubi_wl_entry_slab);
- out_dev_unreg:
-@@ -1326,13 +1381,15 @@ out:
- 	pr_err("UBI error: cannot initialize UBI, error %d\n", err);
- 	return err;
- }
--late_initcall(ubi_init);
-+device_initcall(ubi_init);
-+
- 
- static void __exit ubi_exit(void)
- {
- 	int i;
- 
- 	ubiblock_exit();
-+	unregister_mtd_user(&ubi_mtd_notifier);
- 
- 	for (i = 0; i < UBI_MAX_DEVICES; i++)
- 		if (ubi_devices[i]) {

+ 0 - 180
target/linux/generic/backport-6.6/405-v6.9-mtd-ubi-introduce-pre-removal-notification-for-UBI-v.patch

@@ -1,180 +0,0 @@
-From 924731fbed3247e3b82b8ab17db587ee28c2e781 Mon Sep 17 00:00:00 2001
-From: Daniel Golle <[email protected]>
-Date: Tue, 19 Dec 2023 02:33:24 +0000
-Subject: [PATCH 5/8] mtd: ubi: introduce pre-removal notification for UBI
- volumes
-
-Introduce a new notification type UBI_VOLUME_SHUTDOWN to inform users
-that a volume is just about to be removed.
-This is needed because users (such as the NVMEM subsystem) expect that
-at the time their removal function is called, the parenting device is
-still available (for removal of sysfs nodes, for example, in case of
-NVMEM which otherwise WARNs on volume removal).
-
-Signed-off-by: Daniel Golle <[email protected]>
-Signed-off-by: Richard Weinberger <[email protected]>
----
- drivers/mtd/ubi/build.c | 19 ++++++++++++++-----
- drivers/mtd/ubi/kapi.c  |  2 +-
- drivers/mtd/ubi/ubi.h   |  2 ++
- drivers/mtd/ubi/vmt.c   | 17 +++++++++++++++--
- include/linux/mtd/ubi.h |  2 ++
- 5 files changed, 34 insertions(+), 8 deletions(-)
-
---- a/drivers/mtd/ubi/build.c
-+++ b/drivers/mtd/ubi/build.c
-@@ -91,7 +91,7 @@ static struct ubi_device *ubi_devices[UB
- /* Serializes UBI devices creations and removals */
- DEFINE_MUTEX(ubi_devices_mutex);
- 
--/* Protects @ubi_devices and @ubi->ref_count */
-+/* Protects @ubi_devices, @ubi->ref_count and @ubi->is_dead */
- static DEFINE_SPINLOCK(ubi_devices_lock);
- 
- /* "Show" method for files in '/<sysfs>/class/ubi/' */
-@@ -259,6 +259,9 @@ struct ubi_device *ubi_get_device(int ub
- 
- 	spin_lock(&ubi_devices_lock);
- 	ubi = ubi_devices[ubi_num];
-+	if (ubi && ubi->is_dead)
-+		ubi = NULL;
-+
- 	if (ubi) {
- 		ubi_assert(ubi->ref_count >= 0);
- 		ubi->ref_count += 1;
-@@ -296,7 +299,7 @@ struct ubi_device *ubi_get_by_major(int
- 	spin_lock(&ubi_devices_lock);
- 	for (i = 0; i < UBI_MAX_DEVICES; i++) {
- 		ubi = ubi_devices[i];
--		if (ubi && MAJOR(ubi->cdev.dev) == major) {
-+		if (ubi && !ubi->is_dead && MAJOR(ubi->cdev.dev) == major) {
- 			ubi_assert(ubi->ref_count >= 0);
- 			ubi->ref_count += 1;
- 			get_device(&ubi->dev);
-@@ -325,7 +328,7 @@ int ubi_major2num(int major)
- 	for (i = 0; i < UBI_MAX_DEVICES; i++) {
- 		struct ubi_device *ubi = ubi_devices[i];
- 
--		if (ubi && MAJOR(ubi->cdev.dev) == major) {
-+		if (ubi && !ubi->is_dead && MAJOR(ubi->cdev.dev) == major) {
- 			ubi_num = ubi->ubi_num;
- 			break;
- 		}
-@@ -512,7 +515,7 @@ static void ubi_free_volumes_from(struct
- 	int i;
- 
- 	for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
--		if (!ubi->volumes[i])
-+		if (!ubi->volumes[i] || ubi->volumes[i]->is_dead)
- 			continue;
- 		ubi_eba_replace_table(ubi->volumes[i], NULL);
- 		ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
-@@ -1094,7 +1097,6 @@ int ubi_detach_mtd_dev(int ubi_num, int
- 		return -EINVAL;
- 
- 	spin_lock(&ubi_devices_lock);
--	put_device(&ubi->dev);
- 	ubi->ref_count -= 1;
- 	if (ubi->ref_count) {
- 		if (!anyway) {
-@@ -1105,6 +1107,13 @@ int ubi_detach_mtd_dev(int ubi_num, int
- 		ubi_err(ubi, "%s reference count %d, destroy anyway",
- 			ubi->ubi_name, ubi->ref_count);
- 	}
-+	ubi->is_dead = true;
-+	spin_unlock(&ubi_devices_lock);
-+
-+	ubi_notify_all(ubi, UBI_VOLUME_SHUTDOWN, NULL);
-+
-+	spin_lock(&ubi_devices_lock);
-+	put_device(&ubi->dev);
- 	ubi_devices[ubi_num] = NULL;
- 	spin_unlock(&ubi_devices_lock);
- 
---- a/drivers/mtd/ubi/kapi.c
-+++ b/drivers/mtd/ubi/kapi.c
-@@ -152,7 +152,7 @@ struct ubi_volume_desc *ubi_open_volume(
- 
- 	spin_lock(&ubi->volumes_lock);
- 	vol = ubi->volumes[vol_id];
--	if (!vol)
-+	if (!vol || vol->is_dead)
- 		goto out_unlock;
- 
- 	err = -EBUSY;
---- a/drivers/mtd/ubi/ubi.h
-+++ b/drivers/mtd/ubi/ubi.h
-@@ -345,6 +345,7 @@ struct ubi_volume {
- 	int writers;
- 	int exclusive;
- 	int metaonly;
-+	bool is_dead;
- 
- 	int reserved_pebs;
- 	int vol_type;
-@@ -564,6 +565,7 @@ struct ubi_device {
- 	spinlock_t volumes_lock;
- 	int ref_count;
- 	int image_seq;
-+	bool is_dead;
- 
- 	int rsvd_pebs;
- 	int avail_pebs;
---- a/drivers/mtd/ubi/vmt.c
-+++ b/drivers/mtd/ubi/vmt.c
-@@ -59,7 +59,7 @@ static ssize_t vol_attribute_show(struct
- 	struct ubi_device *ubi = vol->ubi;
- 
- 	spin_lock(&ubi->volumes_lock);
--	if (!ubi->volumes[vol->vol_id]) {
-+	if (!ubi->volumes[vol->vol_id] || ubi->volumes[vol->vol_id]->is_dead) {
- 		spin_unlock(&ubi->volumes_lock);
- 		return -ENODEV;
- 	}
-@@ -189,7 +189,7 @@ int ubi_create_volume(struct ubi_device
- 
- 	/* Ensure that the name is unique */
- 	for (i = 0; i < ubi->vtbl_slots; i++)
--		if (ubi->volumes[i] &&
-+		if (ubi->volumes[i] && !ubi->volumes[i]->is_dead &&
- 		    ubi->volumes[i]->name_len == req->name_len &&
- 		    !strcmp(ubi->volumes[i]->name, req->name)) {
- 			ubi_err(ubi, "volume \"%s\" exists (ID %d)",
-@@ -352,6 +352,19 @@ int ubi_remove_volume(struct ubi_volume_
- 		err = -EBUSY;
- 		goto out_unlock;
- 	}
-+
-+	/*
-+	 * Mark volume as dead at this point to prevent that anyone
-+	 * can take a reference to the volume from now on.
-+	 * This is necessary as we have to release the spinlock before
-+	 * calling ubi_volume_notify.
-+	 */
-+	vol->is_dead = true;
-+	spin_unlock(&ubi->volumes_lock);
-+
-+	ubi_volume_notify(ubi, vol, UBI_VOLUME_SHUTDOWN);
-+
-+	spin_lock(&ubi->volumes_lock);
- 	ubi->volumes[vol_id] = NULL;
- 	spin_unlock(&ubi->volumes_lock);
- 
---- a/include/linux/mtd/ubi.h
-+++ b/include/linux/mtd/ubi.h
-@@ -192,6 +192,7 @@ struct ubi_device_info {
-  *			or a volume was removed)
-  * @UBI_VOLUME_RESIZED: a volume has been re-sized
-  * @UBI_VOLUME_RENAMED: a volume has been re-named
-+ * @UBI_VOLUME_SHUTDOWN: a volume is going to removed, shutdown users
-  * @UBI_VOLUME_UPDATED: data has been written to a volume
-  *
-  * These constants define which type of event has happened when a volume
-@@ -202,6 +203,7 @@ enum {
- 	UBI_VOLUME_REMOVED,
- 	UBI_VOLUME_RESIZED,
- 	UBI_VOLUME_RENAMED,
-+	UBI_VOLUME_SHUTDOWN,
- 	UBI_VOLUME_UPDATED,
- };
- 

+ 0 - 66
target/linux/generic/backport-6.6/406-v6.9-mtd-ubi-populate-ubi-volume-fwnode.patch

@@ -1,66 +0,0 @@
-From 1c54542170819e36baa43c17ca55bb3d7da89a53 Mon Sep 17 00:00:00 2001
-From: Daniel Golle <[email protected]>
-Date: Tue, 19 Dec 2023 02:33:38 +0000
-Subject: [PATCH 6/8] mtd: ubi: populate ubi volume fwnode
-
-Look for the 'volumes' subnode of an MTD partition attached to a UBI
-device and attach matching child nodes to UBI volumes.
-This allows UBI volumes to be referenced in device tree, e.g. for use
-as NVMEM providers.
-
-Signed-off-by: Daniel Golle <[email protected]>
-Signed-off-by: Richard Weinberger <[email protected]>
----
- drivers/mtd/ubi/vmt.c | 27 +++++++++++++++++++++++++++
- 1 file changed, 27 insertions(+)
-
---- a/drivers/mtd/ubi/vmt.c
-+++ b/drivers/mtd/ubi/vmt.c
-@@ -124,6 +124,31 @@ static void vol_release(struct device *d
- 	kfree(vol);
- }
- 
-+static struct fwnode_handle *find_volume_fwnode(struct ubi_volume *vol)
-+{
-+	struct fwnode_handle *fw_vols, *fw_vol;
-+	const char *volname;
-+	u32 volid;
-+
-+	fw_vols = device_get_named_child_node(vol->dev.parent->parent, "volumes");
-+	if (!fw_vols)
-+		return NULL;
-+
-+	fwnode_for_each_child_node(fw_vols, fw_vol) {
-+		if (!fwnode_property_read_string(fw_vol, "volname", &volname) &&
-+		    strncmp(volname, vol->name, vol->name_len))
-+			continue;
-+
-+		if (!fwnode_property_read_u32(fw_vol, "volid", &volid) &&
-+		    vol->vol_id != volid)
-+			continue;
-+
-+		return fw_vol;
-+	}
-+
-+	return NULL;
-+}
-+
- /**
-  * ubi_create_volume - create volume.
-  * @ubi: UBI device description object
-@@ -223,6 +248,7 @@ int ubi_create_volume(struct ubi_device
- 	vol->name_len  = req->name_len;
- 	memcpy(vol->name, req->name, vol->name_len);
- 	vol->ubi = ubi;
-+	device_set_node(&vol->dev, find_volume_fwnode(vol));
- 
- 	/*
- 	 * Finish all pending erases because there may be some LEBs belonging
-@@ -605,6 +631,7 @@ int ubi_add_volume(struct ubi_device *ub
- 	vol->dev.class = &ubi_class;
- 	vol->dev.groups = volume_dev_groups;
- 	dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
-+	device_set_node(&vol->dev, find_volume_fwnode(vol));
- 	err = device_register(&vol->dev);
- 	if (err) {
- 		cdev_del(&vol->cdev);

+ 0 - 244
target/linux/generic/backport-6.6/407-v6.9-mtd-ubi-provide-NVMEM-layer-over-UBI-volumes.patch

@@ -1,244 +0,0 @@
-From 15fc7dc926c91c871f6c0305b2938dbdeb14203b Mon Sep 17 00:00:00 2001
-From: Daniel Golle <[email protected]>
-Date: Tue, 19 Dec 2023 02:33:48 +0000
-Subject: [PATCH 7/8] mtd: ubi: provide NVMEM layer over UBI volumes
-
-In an ideal world we would like UBI to be used where ever possible on a
-NAND chip. And with UBI support in ARM Trusted Firmware and U-Boot it
-is possible to achieve an (almost-)all-UBI flash layout. Hence the need
-for a way to also use UBI volumes to store board-level constants, such
-as MAC addresses and calibration data of wireless interfaces.
-
-Add UBI volume NVMEM driver module exposing UBI volumes as NVMEM
-providers. Allow UBI devices to have a "volumes" firmware subnode with
-volumes which may be compatible with "nvmem-cells".
-Access to UBI volumes via the NVMEM interface at this point is
-read-only, and it is slow, opening and closing the UBI volume for each
-access due to limitations of the NVMEM provider API.
-
-Signed-off-by: Daniel Golle <[email protected]>
-Signed-off-by: Richard Weinberger <[email protected]>
----
- drivers/mtd/ubi/Kconfig  |  12 +++
- drivers/mtd/ubi/Makefile |   1 +
- drivers/mtd/ubi/nvmem.c  | 188 +++++++++++++++++++++++++++++++++++++++
- 3 files changed, 201 insertions(+)
- create mode 100644 drivers/mtd/ubi/nvmem.c
-
---- a/drivers/mtd/ubi/Kconfig
-+++ b/drivers/mtd/ubi/Kconfig
-@@ -104,4 +104,16 @@ config MTD_UBI_BLOCK
- 
- 	   If in doubt, say "N".
- 
-+config MTD_UBI_NVMEM
-+	tristate "UBI virtual NVMEM"
-+	default n
-+	depends on NVMEM
-+	help
-+	   This option enabled an additional driver exposing UBI volumes as NVMEM
-+	   providers, intended for platforms where UBI is part of the firmware
-+	   specification and used to store also e.g. MAC addresses or board-
-+	   specific Wi-Fi calibration data.
-+
-+	   If in doubt, say "N".
-+
- endif # MTD_UBI
---- a/drivers/mtd/ubi/Makefile
-+++ b/drivers/mtd/ubi/Makefile
-@@ -7,3 +7,4 @@ ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap
- ubi-$(CONFIG_MTD_UBI_BLOCK) += block.o
- 
- obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
-+obj-$(CONFIG_MTD_UBI_NVMEM) += nvmem.o
---- /dev/null
-+++ b/drivers/mtd/ubi/nvmem.c
-@@ -0,0 +1,188 @@
-+// SPDX-License-Identifier: GPL-2.0-or-later
-+/*
-+ * Copyright (c) 2023 Daniel Golle <[email protected]>
-+ */
-+
-+/* UBI NVMEM provider */
-+#include "ubi.h"
-+#include <linux/nvmem-provider.h>
-+#include <asm/div64.h>
-+
-+/* List of all NVMEM devices */
-+static LIST_HEAD(nvmem_devices);
-+static DEFINE_MUTEX(devices_mutex);
-+
-+struct ubi_nvmem {
-+	struct nvmem_device *nvmem;
-+	int ubi_num;
-+	int vol_id;
-+	int usable_leb_size;
-+	struct list_head list;
-+};
-+
-+static int ubi_nvmem_reg_read(void *priv, unsigned int from,
-+			      void *val, size_t bytes)
-+{
-+	int err = 0, lnum = from, offs, bytes_left = bytes, to_read;
-+	struct ubi_nvmem *unv = priv;
-+	struct ubi_volume_desc *desc;
-+
-+	desc = ubi_open_volume(unv->ubi_num, unv->vol_id, UBI_READONLY);
-+	if (IS_ERR(desc))
-+		return PTR_ERR(desc);
-+
-+	offs = do_div(lnum, unv->usable_leb_size);
-+	while (bytes_left) {
-+		to_read = unv->usable_leb_size - offs;
-+
-+		if (to_read > bytes_left)
-+			to_read = bytes_left;
-+
-+		err = ubi_read(desc, lnum, val, offs, to_read);
-+		if (err)
-+			break;
-+
-+		lnum += 1;
-+		offs = 0;
-+		bytes_left -= to_read;
-+		val += to_read;
-+	}
-+	ubi_close_volume(desc);
-+
-+	if (err)
-+		return err;
-+
-+	return bytes_left == 0 ? 0 : -EIO;
-+}
-+
-+static int ubi_nvmem_add(struct ubi_volume_info *vi)
-+{
-+	struct device_node *np = dev_of_node(vi->dev);
-+	struct nvmem_config config = {};
-+	struct ubi_nvmem *unv;
-+	int ret;
-+
-+	if (!np)
-+		return 0;
-+
-+	if (!of_get_child_by_name(np, "nvmem-layout"))
-+		return 0;
-+
-+	if (WARN_ON_ONCE(vi->usable_leb_size <= 0) ||
-+	    WARN_ON_ONCE(vi->size <= 0))
-+		return -EINVAL;
-+
-+	unv = kzalloc(sizeof(struct ubi_nvmem), GFP_KERNEL);
-+	if (!unv)
-+		return -ENOMEM;
-+
-+	config.id = NVMEM_DEVID_NONE;
-+	config.dev = vi->dev;
-+	config.name = dev_name(vi->dev);
-+	config.owner = THIS_MODULE;
-+	config.priv = unv;
-+	config.reg_read = ubi_nvmem_reg_read;
-+	config.size = vi->usable_leb_size * vi->size;
-+	config.word_size = 1;
-+	config.stride = 1;
-+	config.read_only = true;
-+	config.root_only = true;
-+	config.ignore_wp = true;
-+	config.of_node = np;
-+
-+	unv->ubi_num = vi->ubi_num;
-+	unv->vol_id = vi->vol_id;
-+	unv->usable_leb_size = vi->usable_leb_size;
-+	unv->nvmem = nvmem_register(&config);
-+	if (IS_ERR(unv->nvmem)) {
-+		ret = dev_err_probe(vi->dev, PTR_ERR(unv->nvmem),
-+				    "Failed to register NVMEM device\n");
-+		kfree(unv);
-+		return ret;
-+	}
-+
-+	mutex_lock(&devices_mutex);
-+	list_add_tail(&unv->list, &nvmem_devices);
-+	mutex_unlock(&devices_mutex);
-+
-+	return 0;
-+}
-+
-+static void ubi_nvmem_remove(struct ubi_volume_info *vi)
-+{
-+	struct ubi_nvmem *unv_c, *unv = NULL;
-+
-+	mutex_lock(&devices_mutex);
-+	list_for_each_entry(unv_c, &nvmem_devices, list)
-+		if (unv_c->ubi_num == vi->ubi_num && unv_c->vol_id == vi->vol_id) {
-+			unv = unv_c;
-+			break;
-+		}
-+
-+	if (!unv) {
-+		mutex_unlock(&devices_mutex);
-+		return;
-+	}
-+
-+	list_del(&unv->list);
-+	mutex_unlock(&devices_mutex);
-+	nvmem_unregister(unv->nvmem);
-+	kfree(unv);
-+}
-+
-+/**
-+ * nvmem_notify - UBI notification handler.
-+ * @nb: registered notifier block
-+ * @l: notification type
-+ * @ns_ptr: pointer to the &struct ubi_notification object
-+ */
-+static int nvmem_notify(struct notifier_block *nb, unsigned long l,
-+			 void *ns_ptr)
-+{
-+	struct ubi_notification *nt = ns_ptr;
-+
-+	switch (l) {
-+	case UBI_VOLUME_RESIZED:
-+		ubi_nvmem_remove(&nt->vi);
-+		fallthrough;
-+	case UBI_VOLUME_ADDED:
-+		ubi_nvmem_add(&nt->vi);
-+		break;
-+	case UBI_VOLUME_SHUTDOWN:
-+		ubi_nvmem_remove(&nt->vi);
-+		break;
-+	default:
-+		break;
-+	}
-+	return NOTIFY_OK;
-+}
-+
-+static struct notifier_block nvmem_notifier = {
-+	.notifier_call = nvmem_notify,
-+};
-+
-+static int __init ubi_nvmem_init(void)
-+{
-+	return ubi_register_volume_notifier(&nvmem_notifier, 0);
-+}
-+
-+static void __exit ubi_nvmem_exit(void)
-+{
-+	struct ubi_nvmem *unv, *tmp;
-+
-+	mutex_lock(&devices_mutex);
-+	list_for_each_entry_safe(unv, tmp, &nvmem_devices, list) {
-+		nvmem_unregister(unv->nvmem);
-+		list_del(&unv->list);
-+		kfree(unv);
-+	}
-+	mutex_unlock(&devices_mutex);
-+
-+	ubi_unregister_volume_notifier(&nvmem_notifier);
-+}
-+
-+module_init(ubi_nvmem_init);
-+module_exit(ubi_nvmem_exit);
-+MODULE_DESCRIPTION("NVMEM layer over UBI volumes");
-+MODULE_AUTHOR("Daniel Golle");
-+MODULE_LICENSE("GPL");

+ 0 - 34
target/linux/generic/backport-6.6/408-v6.9-mtd-ubi-fix-NVMEM-over-UBI-volumes-on-32-bit-systems.patch

@@ -1,34 +0,0 @@
-From 04231c61dcd51db0f12061e49bb761b197109f2f Mon Sep 17 00:00:00 2001
-From: Daniel Golle <[email protected]>
-Date: Thu, 29 Feb 2024 03:47:24 +0000
-Subject: [PATCH 8/8] mtd: ubi: fix NVMEM over UBI volumes on 32-bit systems
-
-A compiler warning related to sizeof(int) != 8 when calling do_div()
-is triggered when building on 32-bit platforms.
-Address this by using integer types having a well-defined size.
-
-Fixes: 3ce485803da1 ("mtd: ubi: provide NVMEM layer over UBI volumes")
-Signed-off-by: Daniel Golle <[email protected]>
-Reviewed-by: Zhihao Cheng <[email protected]>
-Tested-by: Randy Dunlap <[email protected]>
-Signed-off-by: Richard Weinberger <[email protected]>
----
- drivers/mtd/ubi/nvmem.c | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
-
---- a/drivers/mtd/ubi/nvmem.c
-+++ b/drivers/mtd/ubi/nvmem.c
-@@ -23,9 +23,12 @@ struct ubi_nvmem {
- static int ubi_nvmem_reg_read(void *priv, unsigned int from,
- 			      void *val, size_t bytes)
- {
--	int err = 0, lnum = from, offs, bytes_left = bytes, to_read;
-+	size_t to_read, bytes_left = bytes;
- 	struct ubi_nvmem *unv = priv;
- 	struct ubi_volume_desc *desc;
-+	uint32_t offs;
-+	uint64_t lnum = from;
-+	int err = 0;
- 
- 	desc = ubi_open_volume(unv->ubi_num, unv->vol_id, UBI_READONLY);
- 	if (IS_ERR(desc))

+ 0 - 53
target/linux/generic/backport-6.6/410-v6.13-01-block-add-support-for-defining-read-only-partitions.patch

@@ -1,53 +0,0 @@
-From 03cb793b26834ddca170ba87057c8f883772dd45 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <[email protected]>
-Date: Thu, 3 Oct 2024 00:11:41 +0200
-Subject: [PATCH 1/5] block: add support for defining read-only partitions
-
-Add support for defining read-only partitions and complete support for
-it in the cmdline partition parser as the additional "ro" after a
-partition is scanned but never actually applied.
-
-Signed-off-by: Christian Marangi <[email protected]>
-Reviewed-by: Christoph Hellwig <[email protected]>
-Link: https://lore.kernel.org/r/[email protected]
-Signed-off-by: Jens Axboe <[email protected]>
----
- block/blk.h                | 1 +
- block/partitions/cmdline.c | 3 +++
- block/partitions/core.c    | 3 +++
- 3 files changed, 7 insertions(+)
-
---- a/block/blk.h
-+++ b/block/blk.h
-@@ -424,6 +424,7 @@ void blk_free_ext_minor(unsigned int min
- #define ADDPART_FLAG_NONE	0
- #define ADDPART_FLAG_RAID	1
- #define ADDPART_FLAG_WHOLEDISK	2
-+#define ADDPART_FLAG_READONLY	4
- int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
- 		sector_t length);
- int bdev_del_partition(struct gendisk *disk, int partno);
---- a/block/partitions/cmdline.c
-+++ b/block/partitions/cmdline.c
-@@ -237,6 +237,9 @@ static int add_part(int slot, struct cmd
- 	put_partition(state, slot, subpart->from >> 9,
- 		      subpart->size >> 9);
- 
-+	if (subpart->flags & PF_RDONLY)
-+		state->parts[slot].flags |= ADDPART_FLAG_READONLY;
-+
- 	info = &state->parts[slot].info;
- 
- 	strscpy(info->volname, subpart->name, sizeof(info->volname));
---- a/block/partitions/core.c
-+++ b/block/partitions/core.c
-@@ -392,6 +392,9 @@ static struct block_device *add_partitio
- 			goto out_del;
- 	}
- 
-+	if (flags & ADDPART_FLAG_READONLY)
-+		bdev->bd_read_only = true;
-+
- 	/* everything is up and running, commence */
- 	err = xa_insert(&disk->part_tbl, partno, bdev, GFP_KERNEL);
- 	if (err)

+ 0 - 94
target/linux/generic/backport-6.6/410-v6.13-03-block-introduce-add_disk_fwnode.patch

@@ -1,94 +0,0 @@
-From e5f587242b6072ffab4f4a084a459a59f3035873 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <[email protected]>
-Date: Thu, 3 Oct 2024 00:11:43 +0200
-Subject: [PATCH 3/5] block: introduce add_disk_fwnode()
-
-Introduce add_disk_fwnode() as a replacement of device_add_disk() that
-permits to pass and attach a fwnode to disk dev.
-
-This variant can be useful for eMMC that might have the partition table
-for the disk defined in DT. A parser can later make use of the attached
-fwnode to parse the related table and init the hardcoded partition for
-the disk.
-
-device_add_disk() is converted to a simple wrapper of add_disk_fwnode()
-with the fwnode entry set as NULL.
-
-Signed-off-by: Christian Marangi <[email protected]>
-Reviewed-by: Christoph Hellwig <[email protected]>
-Link: https://lore.kernel.org/r/[email protected]
-Signed-off-by: Jens Axboe <[email protected]>
----
- block/genhd.c          | 28 ++++++++++++++++++++++++----
- include/linux/blkdev.h |  3 +++
- 2 files changed, 27 insertions(+), 4 deletions(-)
-
---- a/block/genhd.c
-+++ b/block/genhd.c
-@@ -383,16 +383,18 @@ int disk_scan_partitions(struct gendisk
- }
- 
- /**
-- * device_add_disk - add disk information to kernel list
-+ * add_disk_fwnode - add disk information to kernel list with fwnode
-  * @parent: parent device for the disk
-  * @disk: per-device partitioning information
-  * @groups: Additional per-device sysfs groups
-+ * @fwnode: attached disk fwnode
-  *
-  * This function registers the partitioning information in @disk
-- * with the kernel.
-+ * with the kernel. Also attach a fwnode to the disk device.
-  */
--int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
--				 const struct attribute_group **groups)
-+int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
-+				 const struct attribute_group **groups,
-+				 struct fwnode_handle *fwnode)
- 
- {
- 	struct device *ddev = disk_to_dev(disk);
-@@ -451,6 +453,8 @@ int __must_check device_add_disk(struct
- 	ddev->parent = parent;
- 	ddev->groups = groups;
- 	dev_set_name(ddev, "%s", disk->disk_name);
-+	if (fwnode)
-+		device_set_node(ddev, fwnode);
- 	if (!(disk->flags & GENHD_FL_HIDDEN))
- 		ddev->devt = MKDEV(disk->major, disk->first_minor);
- 	ret = device_add(ddev);
-@@ -552,6 +556,22 @@ out_exit_elevator:
- 		elevator_exit(disk->queue);
- 	return ret;
- }
-+EXPORT_SYMBOL_GPL(add_disk_fwnode);
-+
-+/**
-+ * device_add_disk - add disk information to kernel list
-+ * @parent: parent device for the disk
-+ * @disk: per-device partitioning information
-+ * @groups: Additional per-device sysfs groups
-+ *
-+ * This function registers the partitioning information in @disk
-+ * with the kernel.
-+ */
-+int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
-+				 const struct attribute_group **groups)
-+{
-+	return add_disk_fwnode(parent, disk, groups, NULL);
-+}
- EXPORT_SYMBOL(device_add_disk);
- 
- static void blk_report_disk_dead(struct gendisk *disk, bool surprise)
---- a/include/linux/blkdev.h
-+++ b/include/linux/blkdev.h
-@@ -741,6 +741,9 @@ static inline unsigned int blk_queue_dep
- #define for_each_bio(_bio)		\
- 	for (; _bio; _bio = _bio->bi_next)
- 
-+int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
-+				 const struct attribute_group **groups,
-+				 struct fwnode_handle *fwnode);
- int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
- 				 const struct attribute_group **groups);
- static inline int __must_check add_disk(struct gendisk *disk)

+ 0 - 104
target/linux/generic/backport-6.6/410-v6.13-04-mmc-block-attach-partitions-fwnode-if-found-in-mmc-c.patch

@@ -1,104 +0,0 @@
-From 45ff6c340ddfc2dade74d5b7a8962c778ab7042c Mon Sep 17 00:00:00 2001
-From: Christian Marangi <[email protected]>
-Date: Thu, 3 Oct 2024 00:11:44 +0200
-Subject: [PATCH 4/5] mmc: block: attach partitions fwnode if found in mmc-card
-
-Attach partitions fwnode if found in mmc-card and register disk with it.
-
-This permits block partition to reference the node and register a
-partition table defined in DT for the special case for embedded device
-that doesn't have a partition table flashed but have an hardcoded
-partition table passed from the system.
-
-JEDEC BOOT partition boot0/boot1 are supported but in DT we refer with
-the JEDEC name of boot1 and boot2 to better adhere to documentation.
-
-Also JEDEC GP partition gp0/1/2/3 are supported but in DT we refer with
-the JEDEC name of gp1/2/3/4 to better adhere to documentration.
-
-Signed-off-by: Christian Marangi <[email protected]>
-Reviewed-by: Linus Walleij <[email protected]>
-Link: https://lore.kernel.org/r/[email protected]
-Signed-off-by: Jens Axboe <[email protected]>
----
- drivers/mmc/core/block.c | 55 +++++++++++++++++++++++++++++++++++++++-
- 1 file changed, 54 insertions(+), 1 deletion(-)
-
---- a/drivers/mmc/core/block.c
-+++ b/drivers/mmc/core/block.c
-@@ -2455,6 +2455,56 @@ static inline int mmc_blk_readonly(struc
- 	       !(card->csd.cmdclass & CCC_BLOCK_WRITE);
- }
- 
-+/*
-+ * Search for a declared partitions node for the disk in mmc-card related node.
-+ *
-+ * This is to permit support for partition table defined in DT in special case
-+ * where a partition table is not written in the disk and is expected to be
-+ * passed from the running system.
-+ *
-+ * For the user disk, "partitions" node is searched.
-+ * For the special HW disk, "partitions-" node with the appended name is used
-+ * following this conversion table (to adhere to JEDEC naming)
-+ * - boot0 -> partitions-boot1
-+ * - boot1 -> partitions-boot2
-+ * - gp0 -> partitions-gp1
-+ * - gp1 -> partitions-gp2
-+ * - gp2 -> partitions-gp3
-+ * - gp3 -> partitions-gp4
-+ */
-+static struct fwnode_handle *mmc_blk_get_partitions_node(struct device *mmc_dev,
-+							 const char *subname)
-+{
-+	const char *node_name = "partitions";
-+
-+	if (subname) {
-+		mmc_dev = mmc_dev->parent;
-+
-+		/*
-+		 * Check if we are allocating a BOOT disk boot0/1 disk.
-+		 * In DT we use the JEDEC naming boot1/2.
-+		 */
-+		if (!strcmp(subname, "boot0"))
-+			node_name = "partitions-boot1";
-+		if (!strcmp(subname, "boot1"))
-+			node_name = "partitions-boot2";
-+		/*
-+		 * Check if we are allocating a GP disk gp0/1/2/3 disk.
-+		 * In DT we use the JEDEC naming gp1/2/3/4.
-+		 */
-+		if (!strcmp(subname, "gp0"))
-+			node_name = "partitions-gp1";
-+		if (!strcmp(subname, "gp1"))
-+			node_name = "partitions-gp2";
-+		if (!strcmp(subname, "gp2"))
-+			node_name = "partitions-gp3";
-+		if (!strcmp(subname, "gp3"))
-+			node_name = "partitions-gp4";
-+	}
-+
-+	return device_get_named_child_node(mmc_dev, node_name);
-+}
-+
- static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
- 					      struct device *parent,
- 					      sector_t size,
-@@ -2463,6 +2513,7 @@ static struct mmc_blk_data *mmc_blk_allo
- 					      int area_type,
- 					      unsigned int part_type)
- {
-+	struct fwnode_handle *disk_fwnode;
- 	struct mmc_blk_data *md;
- 	int devidx, ret;
- 	char cap_str[10];
-@@ -2568,7 +2619,9 @@ static struct mmc_blk_data *mmc_blk_allo
- 	/* used in ->open, must be set before add_disk: */
- 	if (area_type == MMC_BLK_DATA_AREA_MAIN)
- 		dev_set_drvdata(&card->dev, md);
--	ret = device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
-+	disk_fwnode = mmc_blk_get_partitions_node(parent, subname);
-+	ret = add_disk_fwnode(md->parent, md->disk, mmc_disk_attr_groups,
-+			      disk_fwnode);
- 	if (ret)
- 		goto err_put_disk;
- 	return md;

+ 0 - 200
target/linux/generic/backport-6.6/410-v6.13-05-block-add-support-for-partition-table-defined-in-OF.patch

@@ -1,200 +0,0 @@
-From 884555b557e5e6d41c866e2cd8d7b32f50ec974b Mon Sep 17 00:00:00 2001
-From: Christian Marangi <[email protected]>
-Date: Thu, 3 Oct 2024 00:11:45 +0200
-Subject: [PATCH 5/5] block: add support for partition table defined in OF
-
-Add support for partition table defined in Device Tree. Similar to how
-it's done with MTD, add support for defining a fixed partition table in
-device tree.
-
-A common scenario for this is fixed block (eMMC) embedded devices that
-have no MBR or GPT partition table to save storage space. Bootloader
-access the block device with absolute address of data.
-
-This is to complete the functionality with an equivalent implementation
-with providing partition table with bootargs, for case where the booargs
-can't be modified and tweaking the Device Tree is the only solution to
-have an usabe partition table.
-
-The implementation follow the fixed-partitions parser used on MTD
-devices where a "partitions" node is expected to be declared with
-"fixed-partitions" compatible in the OF node of the disk device
-(mmc-card for eMMC for example) and each child node declare a label
-and a reg with offset and size. If label is not declared, the node name
-is used as fallback. Eventually is also possible to declare the read-only
-property to flag the partition as read-only.
-
-Signed-off-by: Christian Marangi <[email protected]>
-Reviewed-by: Christoph Hellwig <[email protected]>
-Link: https://lore.kernel.org/r/[email protected]
-Signed-off-by: Jens Axboe <[email protected]>
----
- block/partitions/Kconfig  |   9 ++++
- block/partitions/Makefile |   1 +
- block/partitions/check.h  |   1 +
- block/partitions/core.c   |   3 ++
- block/partitions/of.c     | 110 ++++++++++++++++++++++++++++++++++++++
- 5 files changed, 124 insertions(+)
- create mode 100644 block/partitions/of.c
-
---- a/block/partitions/Kconfig
-+++ b/block/partitions/Kconfig
-@@ -270,4 +270,13 @@ config CMDLINE_PARTITION
- 	  Say Y here if you want to read the partition table from bootargs.
- 	  The format for the command line is just like mtdparts.
- 
-+config OF_PARTITION
-+	bool "Device Tree partition support" if PARTITION_ADVANCED
-+	depends on OF
-+	help
-+	  Say Y here if you want to enable support for partition table
-+	  defined in Device Tree. (mainly for eMMC)
-+	  The format for the device tree node is just like MTD fixed-partition
-+	  schema.
-+
- endmenu
---- a/block/partitions/Makefile
-+++ b/block/partitions/Makefile
-@@ -12,6 +12,7 @@ obj-$(CONFIG_CMDLINE_PARTITION) += cmdli
- obj-$(CONFIG_MAC_PARTITION) += mac.o
- obj-$(CONFIG_LDM_PARTITION) += ldm.o
- obj-$(CONFIG_MSDOS_PARTITION) += msdos.o
-+obj-$(CONFIG_OF_PARTITION) += of.o
- obj-$(CONFIG_OSF_PARTITION) += osf.o
- obj-$(CONFIG_SGI_PARTITION) += sgi.o
- obj-$(CONFIG_SUN_PARTITION) += sun.o
---- a/block/partitions/check.h
-+++ b/block/partitions/check.h
-@@ -62,6 +62,7 @@ int karma_partition(struct parsed_partit
- int ldm_partition(struct parsed_partitions *state);
- int mac_partition(struct parsed_partitions *state);
- int msdos_partition(struct parsed_partitions *state);
-+int of_partition(struct parsed_partitions *state);
- int osf_partition(struct parsed_partitions *state);
- int sgi_partition(struct parsed_partitions *state);
- int sun_partition(struct parsed_partitions *state);
---- a/block/partitions/core.c
-+++ b/block/partitions/core.c
-@@ -43,6 +43,9 @@ static int (*const check_part[])(struct
- #ifdef CONFIG_CMDLINE_PARTITION
- 	cmdline_partition,
- #endif
-+#ifdef CONFIG_OF_PARTITION
-+	of_partition,		/* cmdline have priority to OF */
-+#endif
- #ifdef CONFIG_EFI_PARTITION
- 	efi_partition,		/* this must come before msdos */
- #endif
---- /dev/null
-+++ b/block/partitions/of.c
-@@ -0,0 +1,110 @@
-+// SPDX-License-Identifier: GPL-2.0
-+
-+#include <linux/blkdev.h>
-+#include <linux/major.h>
-+#include <linux/of.h>
-+#include <linux/string.h>
-+#include "check.h"
-+
-+static int validate_of_partition(struct device_node *np, int slot)
-+{
-+	u64 offset, size;
-+	int len;
-+
-+	const __be32 *reg = of_get_property(np, "reg", &len);
-+	int a_cells = of_n_addr_cells(np);
-+	int s_cells = of_n_size_cells(np);
-+
-+	/* Make sure reg len match the expected addr and size cells */
-+	if (len / sizeof(*reg) != a_cells + s_cells)
-+		return -EINVAL;
-+
-+	/* Validate offset conversion from bytes to sectors */
-+	offset = of_read_number(reg, a_cells);
-+	if (offset % SECTOR_SIZE)
-+		return -EINVAL;
-+
-+	/* Validate size conversion from bytes to sectors */
-+	size = of_read_number(reg + a_cells, s_cells);
-+	if (!size || size % SECTOR_SIZE)
-+		return -EINVAL;
-+
-+	return 0;
-+}
-+
-+static void add_of_partition(struct parsed_partitions *state, int slot,
-+			     struct device_node *np)
-+{
-+	struct partition_meta_info *info;
-+	char tmp[sizeof(info->volname) + 4];
-+	const char *partname;
-+	int len;
-+
-+	const __be32 *reg = of_get_property(np, "reg", &len);
-+	int a_cells = of_n_addr_cells(np);
-+	int s_cells = of_n_size_cells(np);
-+
-+	/* Convert bytes to sector size */
-+	u64 offset = of_read_number(reg, a_cells) / SECTOR_SIZE;
-+	u64 size = of_read_number(reg + a_cells, s_cells) / SECTOR_SIZE;
-+
-+	put_partition(state, slot, offset, size);
-+
-+	if (of_property_read_bool(np, "read-only"))
-+		state->parts[slot].flags |= ADDPART_FLAG_READONLY;
-+
-+	/*
-+	 * Follow MTD label logic, search for label property,
-+	 * fallback to node name if not found.
-+	 */
-+	info = &state->parts[slot].info;
-+	partname = of_get_property(np, "label", &len);
-+	if (!partname)
-+		partname = of_get_property(np, "name", &len);
-+	strscpy(info->volname, partname, sizeof(info->volname));
-+
-+	snprintf(tmp, sizeof(tmp), "(%s)", info->volname);
-+	strlcat(state->pp_buf, tmp, PAGE_SIZE);
-+}
-+
-+int of_partition(struct parsed_partitions *state)
-+{
-+	struct device *ddev = disk_to_dev(state->disk);
-+	struct device_node *np;
-+	int slot;
-+
-+	struct device_node *partitions_np = of_node_get(ddev->of_node);
-+
-+	if (!partitions_np ||
-+	    !of_device_is_compatible(partitions_np, "fixed-partitions"))
-+		return 0;
-+
-+	slot = 1;
-+	/* Validate parition offset and size */
-+	for_each_child_of_node(partitions_np, np) {
-+		if (validate_of_partition(np, slot)) {
-+			of_node_put(np);
-+			of_node_put(partitions_np);
-+
-+			return -1;
-+		}
-+
-+		slot++;
-+	}
-+
-+	slot = 1;
-+	for_each_child_of_node(partitions_np, np) {
-+		if (slot >= state->limit) {
-+			of_node_put(np);
-+			break;
-+		}
-+
-+		add_of_partition(state, slot, np);
-+
-+		slot++;
-+	}
-+
-+	strlcat(state->pp_buf, "\n", PAGE_SIZE);
-+
-+	return 1;
-+}

+ 0 - 146
target/linux/generic/backport-6.6/411-v6.7-mtd-spinand-add-support-for-FORESEE-F35SQA002G.patch

@@ -1,146 +0,0 @@
-From 49c8e854869d673df8452f24dfa8989cd0f615a8 Mon Sep 17 00:00:00 2001
-From: Martin Kurbanov <[email protected]>
-Date: Mon, 2 Oct 2023 17:04:58 +0300
-Subject: [PATCH] mtd: spinand: add support for FORESEE F35SQA002G
-
-Add support for FORESEE F35SQA002G SPI NAND.
-Datasheet:
-  https://www.longsys.com/uploads/LM-00006FORESEEF35SQA002GDatasheet_1650183701.pdf
-
-Signed-off-by: Martin Kurbanov <[email protected]>
-Signed-off-by: Miquel Raynal <[email protected]>
-Link: https://lore.kernel.org/linux-mtd/[email protected]
----
- drivers/mtd/nand/spi/Makefile  |  2 +-
- drivers/mtd/nand/spi/core.c    |  1 +
- drivers/mtd/nand/spi/foresee.c | 95 ++++++++++++++++++++++++++++++++++
- include/linux/mtd/spinand.h    |  1 +
- 4 files changed, 98 insertions(+), 1 deletion(-)
- create mode 100644 drivers/mtd/nand/spi/foresee.c
-
---- a/drivers/mtd/nand/spi/Makefile
-+++ b/drivers/mtd/nand/spi/Makefile
-@@ -1,4 +1,4 @@
- # SPDX-License-Identifier: GPL-2.0
--spinand-objs := core.o alliancememory.o ato.o esmt.o gigadevice.o macronix.o
-+spinand-objs := core.o alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o
- spinand-objs += micron.o paragon.o toshiba.o winbond.o xtx.o
- obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
---- a/drivers/mtd/nand/spi/core.c
-+++ b/drivers/mtd/nand/spi/core.c
-@@ -943,6 +943,7 @@ static const struct spinand_manufacturer
- 	&alliancememory_spinand_manufacturer,
- 	&ato_spinand_manufacturer,
- 	&esmt_c8_spinand_manufacturer,
-+	&foresee_spinand_manufacturer,
- 	&gigadevice_spinand_manufacturer,
- 	&macronix_spinand_manufacturer,
- 	&micron_spinand_manufacturer,
---- /dev/null
-+++ b/drivers/mtd/nand/spi/foresee.c
-@@ -0,0 +1,95 @@
-+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
-+/*
-+ * Copyright (c) 2023, SberDevices. All Rights Reserved.
-+ *
-+ * Author: Martin Kurbanov <[email protected]>
-+ */
-+
-+#include <linux/device.h>
-+#include <linux/kernel.h>
-+#include <linux/mtd/spinand.h>
-+
-+#define SPINAND_MFR_FORESEE		0xCD
-+
-+static SPINAND_OP_VARIANTS(read_cache_variants,
-+		SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
-+		SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
-+		SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
-+		SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
-+
-+static SPINAND_OP_VARIANTS(write_cache_variants,
-+		SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
-+		SPINAND_PROG_LOAD(true, 0, NULL, 0));
-+
-+static SPINAND_OP_VARIANTS(update_cache_variants,
-+		SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
-+		SPINAND_PROG_LOAD(false, 0, NULL, 0));
-+
-+static int f35sqa002g_ooblayout_ecc(struct mtd_info *mtd, int section,
-+				    struct mtd_oob_region *region)
-+{
-+	return -ERANGE;
-+}
-+
-+static int f35sqa002g_ooblayout_free(struct mtd_info *mtd, int section,
-+				     struct mtd_oob_region *region)
-+{
-+	if (section)
-+		return -ERANGE;
-+
-+	/* Reserve 2 bytes for the BBM. */
-+	region->offset = 2;
-+	region->length = 62;
-+
-+	return 0;
-+}
-+
-+static const struct mtd_ooblayout_ops f35sqa002g_ooblayout = {
-+	.ecc = f35sqa002g_ooblayout_ecc,
-+	.free = f35sqa002g_ooblayout_free,
-+};
-+
-+static int f35sqa002g_ecc_get_status(struct spinand_device *spinand, u8 status)
-+{
-+	struct nand_device *nand = spinand_to_nand(spinand);
-+
-+	switch (status & STATUS_ECC_MASK) {
-+	case STATUS_ECC_NO_BITFLIPS:
-+		return 0;
-+
-+	case STATUS_ECC_HAS_BITFLIPS:
-+		return nanddev_get_ecc_conf(nand)->strength;
-+
-+	default:
-+		break;
-+	}
-+
-+	/* More than 1-bit error was detected in one or more sectors and
-+	 * cannot be corrected.
-+	 */
-+	return -EBADMSG;
-+}
-+
-+static const struct spinand_info foresee_spinand_table[] = {
-+	SPINAND_INFO("F35SQA002G",
-+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x72, 0x72),
-+		     NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
-+		     NAND_ECCREQ(1, 512),
-+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
-+					      &write_cache_variants,
-+					      &update_cache_variants),
-+		     SPINAND_HAS_QE_BIT,
-+		     SPINAND_ECCINFO(&f35sqa002g_ooblayout,
-+				     f35sqa002g_ecc_get_status)),
-+};
-+
-+static const struct spinand_manufacturer_ops foresee_spinand_manuf_ops = {
-+};
-+
-+const struct spinand_manufacturer foresee_spinand_manufacturer = {
-+	.id = SPINAND_MFR_FORESEE,
-+	.name = "FORESEE",
-+	.chips = foresee_spinand_table,
-+	.nchips = ARRAY_SIZE(foresee_spinand_table),
-+	.ops = &foresee_spinand_manuf_ops,
-+};
---- a/include/linux/mtd/spinand.h
-+++ b/include/linux/mtd/spinand.h
-@@ -263,6 +263,7 @@ struct spinand_manufacturer {
- extern const struct spinand_manufacturer alliancememory_spinand_manufacturer;
- extern const struct spinand_manufacturer ato_spinand_manufacturer;
- extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer;
-+extern const struct spinand_manufacturer foresee_spinand_manufacturer;
- extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
- extern const struct spinand_manufacturer macronix_spinand_manufacturer;
- extern const struct spinand_manufacturer micron_spinand_manufacturer;

+ 0 - 38
target/linux/generic/backport-6.6/412-v6.14-mtd-spinand-add-support-for-FORESEE-F35SQA001G.patch

@@ -1,38 +0,0 @@
-From ae461cde5c559675fc4c0ba351c7c31ace705f56 Mon Sep 17 00:00:00 2001
-From: Bohdan Chubuk <[email protected]>
-Date: Sun, 10 Nov 2024 22:50:47 +0200
-Subject: [PATCH] mtd: spinand: add support for FORESEE F35SQA001G
-
-Add support for FORESEE F35SQA001G SPI NAND.
-
-Similar to F35SQA002G, but differs in capacity.
-Datasheet:
-  -  https://cdn.ozdisan.com/ETicaret_Dosya/704795_871495.pdf
-
-Tested on Xiaomi AX3000T flashed with OpenWRT.
-
-Signed-off-by: Bohdan Chubuk <[email protected]>
-Signed-off-by: Miquel Raynal <[email protected]>
----
- drivers/mtd/nand/spi/foresee.c | 10 ++++++++++
- 1 file changed, 10 insertions(+)
-
---- a/drivers/mtd/nand/spi/foresee.c
-+++ b/drivers/mtd/nand/spi/foresee.c
-@@ -81,6 +81,16 @@ static const struct spinand_info foresee
- 		     SPINAND_HAS_QE_BIT,
- 		     SPINAND_ECCINFO(&f35sqa002g_ooblayout,
- 				     f35sqa002g_ecc_get_status)),
-+	SPINAND_INFO("F35SQA001G",
-+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x71, 0x71),
-+		     NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
-+		     NAND_ECCREQ(1, 512),
-+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
-+					      &write_cache_variants,
-+					      &update_cache_variants),
-+		     SPINAND_HAS_QE_BIT,
-+		     SPINAND_ECCINFO(&f35sqa002g_ooblayout,
-+				     f35sqa002g_ecc_get_status)),
- };
- 
- static const struct spinand_manufacturer_ops foresee_spinand_manuf_ops = {

+ 0 - 1026
target/linux/generic/backport-6.6/413-01-v6.14-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch

@@ -1,1026 +0,0 @@
-From 8c52932da5e6756fa66f52f0720da283fba13aa6 Mon Sep 17 00:00:00 2001
-From: Md Sadre Alam <[email protected]>
-Date: Wed, 20 Nov 2024 14:45:00 +0530
-Subject: [PATCH 1/4] mtd: rawnand: qcom: cleanup qcom_nandc driver
-
-Perform a global cleanup of the Qualcomm NAND
-controller driver with the following improvements:
-
-- Remove register value indirection API
-
-- Remove set_reg() API
-
-- Convert read_loc_first & read_loc_last macro to functions
-
-- Rename multiple variables
-
-Signed-off-by: Md Sadre Alam <[email protected]>
-Signed-off-by: Miquel Raynal <[email protected]>
----
- drivers/mtd/nand/raw/qcom_nandc.c | 516 ++++++++++++++----------------
- 1 file changed, 234 insertions(+), 282 deletions(-)
-
---- a/drivers/mtd/nand/raw/qcom_nandc.c
-+++ b/drivers/mtd/nand/raw/qcom_nandc.c
-@@ -189,17 +189,6 @@
- #define	ECC_BCH_4BIT	BIT(2)
- #define	ECC_BCH_8BIT	BIT(3)
- 
--#define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc)	\
--nandc_set_reg(chip, reg,			\
--	      ((cw_offset) << READ_LOCATION_OFFSET) |		\
--	      ((read_size) << READ_LOCATION_SIZE) |			\
--	      ((is_last_read_loc) << READ_LOCATION_LAST))
--
--#define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc)	\
--nandc_set_reg(chip, reg,			\
--	      ((cw_offset) << READ_LOCATION_OFFSET) |		\
--	      ((read_size) << READ_LOCATION_SIZE) |			\
--	      ((is_last_read_loc) << READ_LOCATION_LAST))
- /*
-  * Returns the actual register address for all NAND_DEV_ registers
-  * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
-@@ -257,8 +246,6 @@ nandc_set_reg(chip, reg,			\
-  * @tx_sgl_start - start index in data sgl for tx.
-  * @rx_sgl_pos - current index in data sgl for rx.
-  * @rx_sgl_start - start index in data sgl for rx.
-- * @wait_second_completion - wait for second DMA desc completion before making
-- *			     the NAND transfer completion.
-  */
- struct bam_transaction {
- 	struct bam_cmd_element *bam_ce;
-@@ -275,7 +262,6 @@ struct bam_transaction {
- 	u32 tx_sgl_start;
- 	u32 rx_sgl_pos;
- 	u32 rx_sgl_start;
--	bool wait_second_completion;
- };
- 
- /*
-@@ -471,9 +457,9 @@ struct qcom_op {
- 	unsigned int data_instr_idx;
- 	unsigned int rdy_timeout_ms;
- 	unsigned int rdy_delay_ns;
--	u32 addr1_reg;
--	u32 addr2_reg;
--	u32 cmd_reg;
-+	__le32 addr1_reg;
-+	__le32 addr2_reg;
-+	__le32 cmd_reg;
- 	u8 flag;
- };
- 
-@@ -549,17 +535,17 @@ struct qcom_nand_host {
-  * among different NAND controllers.
-  * @ecc_modes - ecc mode for NAND
-  * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
-- * @is_bam - whether NAND controller is using BAM
-- * @is_qpic - whether NAND CTRL is part of qpic IP
-- * @qpic_v2 - flag to indicate QPIC IP version 2
-+ * @supports_bam - whether NAND controller is using Bus Access Manager (BAM)
-+ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
-+ * @qpic_version2 - flag to indicate QPIC IP version 2
-  * @use_codeword_fixup - whether NAND has different layout for boot partitions
-  */
- struct qcom_nandc_props {
- 	u32 ecc_modes;
- 	u32 dev_cmd_reg_start;
--	bool is_bam;
--	bool is_qpic;
--	bool qpic_v2;
-+	bool supports_bam;
-+	bool nandc_part_of_qpic;
-+	bool qpic_version2;
- 	bool use_codeword_fixup;
- };
- 
-@@ -613,19 +599,11 @@ static void clear_bam_transaction(struct
- {
- 	struct bam_transaction *bam_txn = nandc->bam_txn;
- 
--	if (!nandc->props->is_bam)
-+	if (!nandc->props->supports_bam)
- 		return;
- 
--	bam_txn->bam_ce_pos = 0;
--	bam_txn->bam_ce_start = 0;
--	bam_txn->cmd_sgl_pos = 0;
--	bam_txn->cmd_sgl_start = 0;
--	bam_txn->tx_sgl_pos = 0;
--	bam_txn->tx_sgl_start = 0;
--	bam_txn->rx_sgl_pos = 0;
--	bam_txn->rx_sgl_start = 0;
-+	memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
- 	bam_txn->last_data_desc = NULL;
--	bam_txn->wait_second_completion = false;
- 
- 	sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
- 		      QPIC_PER_CW_CMD_SGL);
-@@ -640,46 +618,35 @@ static void qpic_bam_dma_done(void *data
- {
- 	struct bam_transaction *bam_txn = data;
- 
--	/*
--	 * In case of data transfer with NAND, 2 callbacks will be generated.
--	 * One for command channel and another one for data channel.
--	 * If current transaction has data descriptors
--	 * (i.e. wait_second_completion is true), then set this to false
--	 * and wait for second DMA descriptor completion.
--	 */
--	if (bam_txn->wait_second_completion)
--		bam_txn->wait_second_completion = false;
--	else
--		complete(&bam_txn->txn_done);
-+	complete(&bam_txn->txn_done);
- }
- 
--static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
-+static struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
- {
- 	return container_of(chip, struct qcom_nand_host, chip);
- }
- 
--static inline struct qcom_nand_controller *
-+static struct qcom_nand_controller *
- get_qcom_nand_controller(struct nand_chip *chip)
- {
- 	return container_of(chip->controller, struct qcom_nand_controller,
- 			    controller);
- }
- 
--static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
-+static u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
- {
- 	return ioread32(nandc->base + offset);
- }
- 
--static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
--			       u32 val)
-+static void nandc_write(struct qcom_nand_controller *nandc, int offset,
-+			u32 val)
- {
- 	iowrite32(val, nandc->base + offset);
- }
- 
--static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
--					  bool is_cpu)
-+static void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
- {
--	if (!nandc->props->is_bam)
-+	if (!nandc->props->supports_bam)
- 		return;
- 
- 	if (is_cpu)
-@@ -694,93 +661,90 @@ static inline void nandc_read_buffer_syn
- 					   DMA_FROM_DEVICE);
- }
- 
--static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
--{
--	switch (offset) {
--	case NAND_FLASH_CMD:
--		return &regs->cmd;
--	case NAND_ADDR0:
--		return &regs->addr0;
--	case NAND_ADDR1:
--		return &regs->addr1;
--	case NAND_FLASH_CHIP_SELECT:
--		return &regs->chip_sel;
--	case NAND_EXEC_CMD:
--		return &regs->exec;
--	case NAND_FLASH_STATUS:
--		return &regs->clrflashstatus;
--	case NAND_DEV0_CFG0:
--		return &regs->cfg0;
--	case NAND_DEV0_CFG1:
--		return &regs->cfg1;
--	case NAND_DEV0_ECC_CFG:
--		return &regs->ecc_bch_cfg;
--	case NAND_READ_STATUS:
--		return &regs->clrreadstatus;
--	case NAND_DEV_CMD1:
--		return &regs->cmd1;
--	case NAND_DEV_CMD1_RESTORE:
--		return &regs->orig_cmd1;
--	case NAND_DEV_CMD_VLD:
--		return &regs->vld;
--	case NAND_DEV_CMD_VLD_RESTORE:
--		return &regs->orig_vld;
--	case NAND_EBI2_ECC_BUF_CFG:
--		return &regs->ecc_buf_cfg;
--	case NAND_READ_LOCATION_0:
--		return &regs->read_location0;
--	case NAND_READ_LOCATION_1:
--		return &regs->read_location1;
--	case NAND_READ_LOCATION_2:
--		return &regs->read_location2;
--	case NAND_READ_LOCATION_3:
--		return &regs->read_location3;
--	case NAND_READ_LOCATION_LAST_CW_0:
--		return &regs->read_location_last0;
--	case NAND_READ_LOCATION_LAST_CW_1:
--		return &regs->read_location_last1;
--	case NAND_READ_LOCATION_LAST_CW_2:
--		return &regs->read_location_last2;
--	case NAND_READ_LOCATION_LAST_CW_3:
--		return &regs->read_location_last3;
--	default:
--		return NULL;
--	}
--}
--
--static void nandc_set_reg(struct nand_chip *chip, int offset,
--			  u32 val)
--{
--	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
--	struct nandc_regs *regs = nandc->regs;
--	__le32 *reg;
--
--	reg = offset_to_nandc_reg(regs, offset);
--
--	if (reg)
--		*reg = cpu_to_le32(val);
--}
--
--/* Helper to check the code word, whether it is last cw or not */
-+/* Helper to check whether this is the last CW or not */
- static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
- {
- 	return cw == (ecc->steps - 1);
- }
- 
-+/**
-+ * nandc_set_read_loc_first() - to set read location first register
-+ * @chip:		NAND Private Flash Chip Data
-+ * @reg_base:		location register base
-+ * @cw_offset:		code word offset
-+ * @read_size:		code word read length
-+ * @is_last_read_loc:	is this the last read location
-+ *
-+ * This function will set location register value
-+ */
-+static void nandc_set_read_loc_first(struct nand_chip *chip,
-+				     int reg_base, u32 cw_offset,
-+				     u32 read_size, u32 is_last_read_loc)
-+{
-+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-+	__le32 locreg_val;
-+	u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
-+		  ((read_size) << READ_LOCATION_SIZE) |
-+		  ((is_last_read_loc) << READ_LOCATION_LAST));
-+
-+	locreg_val = cpu_to_le32(val);
-+
-+	if (reg_base == NAND_READ_LOCATION_0)
-+		nandc->regs->read_location0 = locreg_val;
-+	else if (reg_base == NAND_READ_LOCATION_1)
-+		nandc->regs->read_location1 = locreg_val;
-+	else if (reg_base == NAND_READ_LOCATION_2)
-+		nandc->regs->read_location2 = locreg_val;
-+	else if (reg_base == NAND_READ_LOCATION_3)
-+		nandc->regs->read_location3 = locreg_val;
-+}
-+
-+/**
-+ * nandc_set_read_loc_last - to set read location last register
-+ * @chip:		NAND Private Flash Chip Data
-+ * @reg_base:		location register base
-+ * @cw_offset:		code word offset
-+ * @read_size:		code word read length
-+ * @is_last_read_loc:	is this the last read location
-+ *
-+ * This function will set location last register value
-+ */
-+static void nandc_set_read_loc_last(struct nand_chip *chip,
-+				    int reg_base, u32 cw_offset,
-+				    u32 read_size, u32 is_last_read_loc)
-+{
-+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-+	__le32 locreg_val;
-+	u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
-+		  ((read_size) << READ_LOCATION_SIZE) |
-+		  ((is_last_read_loc) << READ_LOCATION_LAST));
-+
-+	locreg_val = cpu_to_le32(val);
-+
-+	if (reg_base == NAND_READ_LOCATION_LAST_CW_0)
-+		nandc->regs->read_location_last0 = locreg_val;
-+	else if (reg_base == NAND_READ_LOCATION_LAST_CW_1)
-+		nandc->regs->read_location_last1 = locreg_val;
-+	else if (reg_base == NAND_READ_LOCATION_LAST_CW_2)
-+		nandc->regs->read_location_last2 = locreg_val;
-+	else if (reg_base == NAND_READ_LOCATION_LAST_CW_3)
-+		nandc->regs->read_location_last3 = locreg_val;
-+}
-+
- /* helper to configure location register values */
- static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
--			       int cw_offset, int read_size, int is_last_read_loc)
-+			       u32 cw_offset, u32 read_size, u32 is_last_read_loc)
- {
- 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- 	struct nand_ecc_ctrl *ecc = &chip->ecc;
- 	int reg_base = NAND_READ_LOCATION_0;
- 
--	if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
-+	if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
- 		reg_base = NAND_READ_LOCATION_LAST_CW_0;
- 
- 	reg_base += reg * 4;
- 
--	if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
-+	if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
- 		return nandc_set_read_loc_last(chip, reg_base, cw_offset,
- 				read_size, is_last_read_loc);
- 	else
-@@ -792,12 +756,13 @@ static void nandc_set_read_loc(struct na
- static void set_address(struct qcom_nand_host *host, u16 column, int page)
- {
- 	struct nand_chip *chip = &host->chip;
-+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- 
- 	if (chip->options & NAND_BUSWIDTH_16)
- 		column >>= 1;
- 
--	nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
--	nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
-+	nandc->regs->addr0 = cpu_to_le32(page << 16 | column);
-+	nandc->regs->addr1 = cpu_to_le32(page >> 16 & 0xff);
- }
- 
- /*
-@@ -811,41 +776,43 @@ static void set_address(struct qcom_nand
- static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
- {
- 	struct nand_chip *chip = &host->chip;
--	u32 cmd, cfg0, cfg1, ecc_bch_cfg;
-+	__le32 cmd, cfg0, cfg1, ecc_bch_cfg;
- 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- 
- 	if (read) {
- 		if (host->use_ecc)
--			cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
-+			cmd = cpu_to_le32(OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE);
- 		else
--			cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
-+			cmd = cpu_to_le32(OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
- 	} else {
--		cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
-+		cmd = cpu_to_le32(OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE);
- 	}
- 
- 	if (host->use_ecc) {
--		cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
--				(num_cw - 1) << CW_PER_PAGE;
-+		cfg0 = cpu_to_le32((host->cfg0 & ~(7U << CW_PER_PAGE)) |
-+				(num_cw - 1) << CW_PER_PAGE);
- 
--		cfg1 = host->cfg1;
--		ecc_bch_cfg = host->ecc_bch_cfg;
-+		cfg1 = cpu_to_le32(host->cfg1);
-+		ecc_bch_cfg = cpu_to_le32(host->ecc_bch_cfg);
- 	} else {
--		cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
--				(num_cw - 1) << CW_PER_PAGE;
-+		cfg0 = cpu_to_le32((host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
-+				(num_cw - 1) << CW_PER_PAGE);
- 
--		cfg1 = host->cfg1_raw;
--		ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
-+		cfg1 = cpu_to_le32(host->cfg1_raw);
-+		ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
- 	}
- 
--	nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
--	nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
--	nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
--	nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
--	if (!nandc->props->qpic_v2)
--		nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
--	nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
--	nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
--	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-+	nandc->regs->cmd = cmd;
-+	nandc->regs->cfg0 = cfg0;
-+	nandc->regs->cfg1 = cfg1;
-+	nandc->regs->ecc_bch_cfg = ecc_bch_cfg;
-+
-+	if (!nandc->props->qpic_version2)
-+		nandc->regs->ecc_buf_cfg = cpu_to_le32(host->ecc_buf_cfg);
-+
-+	nandc->regs->clrflashstatus = cpu_to_le32(host->clrflashstatus);
-+	nandc->regs->clrreadstatus = cpu_to_le32(host->clrreadstatus);
-+	nandc->regs->exec = cpu_to_le32(1);
- 
- 	if (read)
- 		nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
-@@ -1121,7 +1088,7 @@ static int read_reg_dma(struct qcom_nand
- 	if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
- 		first = dev_cmd_reg_addr(nandc, first);
- 
--	if (nandc->props->is_bam)
-+	if (nandc->props->supports_bam)
- 		return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
- 					     num_regs, flags);
- 
-@@ -1136,25 +1103,16 @@ static int read_reg_dma(struct qcom_nand
-  * write_reg_dma:	prepares a descriptor to write a given number of
-  *			contiguous registers
-  *
-+ * @vaddr:		contiguous memory from where register value will
-+ *			be written
-  * @first:		offset of the first register in the contiguous block
-  * @num_regs:		number of registers to write
-  * @flags:		flags to control DMA descriptor preparation
-  */
--static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
--			 int num_regs, unsigned int flags)
-+static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
-+			 int first, int num_regs, unsigned int flags)
- {
- 	bool flow_control = false;
--	struct nandc_regs *regs = nandc->regs;
--	void *vaddr;
--
--	vaddr = offset_to_nandc_reg(regs, first);
--
--	if (first == NAND_ERASED_CW_DETECT_CFG) {
--		if (flags & NAND_ERASED_CW_SET)
--			vaddr = &regs->erased_cw_detect_cfg_set;
--		else
--			vaddr = &regs->erased_cw_detect_cfg_clr;
--	}
- 
- 	if (first == NAND_EXEC_CMD)
- 		flags |= NAND_BAM_NWD;
-@@ -1165,7 +1123,7 @@ static int write_reg_dma(struct qcom_nan
- 	if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
- 		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
- 
--	if (nandc->props->is_bam)
-+	if (nandc->props->supports_bam)
- 		return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
- 					     num_regs, flags);
- 
-@@ -1188,7 +1146,7 @@ static int write_reg_dma(struct qcom_nan
- static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
- 			 const u8 *vaddr, int size, unsigned int flags)
- {
--	if (nandc->props->is_bam)
-+	if (nandc->props->supports_bam)
- 		return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
- 
- 	return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
-@@ -1206,7 +1164,7 @@ static int read_data_dma(struct qcom_nan
- static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
- 			  const u8 *vaddr, int size, unsigned int flags)
- {
--	if (nandc->props->is_bam)
-+	if (nandc->props->supports_bam)
- 		return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
- 
- 	return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
-@@ -1220,13 +1178,14 @@ static void config_nand_page_read(struct
- {
- 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- 
--	write_reg_dma(nandc, NAND_ADDR0, 2, 0);
--	write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
--	if (!nandc->props->qpic_v2)
--		write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
--	write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
--	write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
--		      NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
-+	write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
-+	write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+	if (!nandc->props->qpic_version2)
-+		write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
-+	write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
-+		      NAND_ERASED_CW_DETECT_CFG, 1, 0);
-+	write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
-+		      NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
- }
- 
- /*
-@@ -1239,16 +1198,16 @@ config_nand_cw_read(struct nand_chip *ch
- 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- 	struct nand_ecc_ctrl *ecc = &chip->ecc;
- 
--	int reg = NAND_READ_LOCATION_0;
-+	__le32 *reg = &nandc->regs->read_location0;
- 
--	if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
--		reg = NAND_READ_LOCATION_LAST_CW_0;
-+	if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
-+		reg = &nandc->regs->read_location_last0;
- 
--	if (nandc->props->is_bam)
--		write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
-+	if (nandc->props->supports_bam)
-+		write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
- 
--	write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
--	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- 
- 	if (use_ecc) {
- 		read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
-@@ -1279,10 +1238,10 @@ static void config_nand_page_write(struc
- {
- 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- 
--	write_reg_dma(nandc, NAND_ADDR0, 2, 0);
--	write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
--	if (!nandc->props->qpic_v2)
--		write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
-+	write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
-+	write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+	if (!nandc->props->qpic_version2)
-+		write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
- 			      NAND_BAM_NEXT_SGL);
- }
- 
-@@ -1294,13 +1253,13 @@ static void config_nand_cw_write(struct
- {
- 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- 
--	write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
--	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- 
- 	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- 
--	write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
--	write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
-+	write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
-+	write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
- }
- 
- /* helpers to submit/free our list of dma descriptors */
-@@ -1311,7 +1270,7 @@ static int submit_descs(struct qcom_nand
- 	struct bam_transaction *bam_txn = nandc->bam_txn;
- 	int ret = 0;
- 
--	if (nandc->props->is_bam) {
-+	if (nandc->props->supports_bam) {
- 		if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
- 			ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
- 			if (ret)
-@@ -1336,14 +1295,9 @@ static int submit_descs(struct qcom_nand
- 	list_for_each_entry(desc, &nandc->desc_list, node)
- 		cookie = dmaengine_submit(desc->dma_desc);
- 
--	if (nandc->props->is_bam) {
-+	if (nandc->props->supports_bam) {
- 		bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
- 		bam_txn->last_cmd_desc->callback_param = bam_txn;
--		if (bam_txn->last_data_desc) {
--			bam_txn->last_data_desc->callback = qpic_bam_dma_done;
--			bam_txn->last_data_desc->callback_param = bam_txn;
--			bam_txn->wait_second_completion = true;
--		}
- 
- 		dma_async_issue_pending(nandc->tx_chan);
- 		dma_async_issue_pending(nandc->rx_chan);
-@@ -1365,7 +1319,7 @@ err_unmap_free_desc:
- 	list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
- 		list_del(&desc->node);
- 
--		if (nandc->props->is_bam)
-+		if (nandc->props->supports_bam)
- 			dma_unmap_sg(nandc->dev, desc->bam_sgl,
- 				     desc->sgl_cnt, desc->dir);
- 		else
-@@ -1382,7 +1336,7 @@ err_unmap_free_desc:
- static void clear_read_regs(struct qcom_nand_controller *nandc)
- {
- 	nandc->reg_read_pos = 0;
--	nandc_read_buffer_sync(nandc, false);
-+	nandc_dev_to_mem(nandc, false);
- }
- 
- /*
-@@ -1446,7 +1400,7 @@ static int check_flash_errors(struct qco
- 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- 	int i;
- 
--	nandc_read_buffer_sync(nandc, true);
-+	nandc_dev_to_mem(nandc, true);
- 
- 	for (i = 0; i < cw_cnt; i++) {
- 		u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
-@@ -1476,7 +1430,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *
- 	clear_read_regs(nandc);
- 	host->use_ecc = false;
- 
--	if (nandc->props->qpic_v2)
-+	if (nandc->props->qpic_version2)
- 		raw_cw = ecc->steps - 1;
- 
- 	clear_bam_transaction(nandc);
-@@ -1497,7 +1451,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *
- 		oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
- 	}
- 
--	if (nandc->props->is_bam) {
-+	if (nandc->props->supports_bam) {
- 		nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
- 		read_loc += data_size1;
- 
-@@ -1621,7 +1575,7 @@ static int parse_read_errors(struct qcom
- 	u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
- 
- 	buf = (struct read_stats *)nandc->reg_read_buf;
--	nandc_read_buffer_sync(nandc, true);
-+	nandc_dev_to_mem(nandc, true);
- 
- 	for (i = 0; i < ecc->steps; i++, buf++) {
- 		u32 flash, buffer, erased_cw;
-@@ -1734,7 +1688,7 @@ static int read_page_ecc(struct qcom_nan
- 			oob_size = host->ecc_bytes_hw + host->spare_bytes;
- 		}
- 
--		if (nandc->props->is_bam) {
-+		if (nandc->props->supports_bam) {
- 			if (data_buf && oob_buf) {
- 				nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
- 				nandc_set_read_loc(chip, i, 1, data_size,
-@@ -2455,14 +2409,14 @@ static int qcom_nand_attach_chip(struct
- 
- 	mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
- 	/* Free the initially allocated BAM transaction for reading the ONFI params */
--	if (nandc->props->is_bam)
-+	if (nandc->props->supports_bam)
- 		free_bam_transaction(nandc);
- 
- 	nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
- 				     cwperpage);
- 
- 	/* Now allocate the BAM transaction based on updated max_cwperpage */
--	if (nandc->props->is_bam) {
-+	if (nandc->props->supports_bam) {
- 		nandc->bam_txn = alloc_bam_transaction(nandc);
- 		if (!nandc->bam_txn) {
- 			dev_err(nandc->dev,
-@@ -2522,7 +2476,7 @@ static int qcom_nand_attach_chip(struct
- 				| ecc_mode << ECC_MODE
- 				| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
- 
--	if (!nandc->props->qpic_v2)
-+	if (!nandc->props->qpic_version2)
- 		host->ecc_buf_cfg = 0x203 << NUM_STEPS;
- 
- 	host->clrflashstatus = FS_READY_BSY_N;
-@@ -2556,7 +2510,7 @@ static int qcom_op_cmd_mapping(struct na
- 		cmd = OP_FETCH_ID;
- 		break;
- 	case NAND_CMD_PARAM:
--		if (nandc->props->qpic_v2)
-+		if (nandc->props->qpic_version2)
- 			cmd = OP_PAGE_READ_ONFI_READ;
- 		else
- 			cmd = OP_PAGE_READ;
-@@ -2609,7 +2563,7 @@ static int qcom_parse_instructions(struc
- 			if (ret < 0)
- 				return ret;
- 
--			q_op->cmd_reg = ret;
-+			q_op->cmd_reg = cpu_to_le32(ret);
- 			q_op->rdy_delay_ns = instr->delay_ns;
- 			break;
- 
-@@ -2619,10 +2573,10 @@ static int qcom_parse_instructions(struc
- 			addrs = &instr->ctx.addr.addrs[offset];
- 
- 			for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
--				q_op->addr1_reg |= addrs[i] << (i * 8);
-+				q_op->addr1_reg |= cpu_to_le32(addrs[i] << (i * 8));
- 
- 			if (naddrs > 4)
--				q_op->addr2_reg |= addrs[4];
-+				q_op->addr2_reg |= cpu_to_le32(addrs[4]);
- 
- 			q_op->rdy_delay_ns = instr->delay_ns;
- 			break;
-@@ -2663,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nan
- 	unsigned long start = jiffies + msecs_to_jiffies(time_ms);
- 	u32 flash;
- 
--	nandc_read_buffer_sync(nandc, true);
-+	nandc_dev_to_mem(nandc, true);
- 
- 	do {
- 		flash = le32_to_cpu(nandc->reg_read_buf[0]);
-@@ -2706,11 +2660,11 @@ static int qcom_read_status_exec(struct
- 	clear_read_regs(nandc);
- 	clear_bam_transaction(nandc);
- 
--	nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
--	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-+	nandc->regs->cmd = q_op.cmd_reg;
-+	nandc->regs->exec = cpu_to_le32(1);
- 
--	write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
--	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- 	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- 
- 	ret = submit_descs(nandc);
-@@ -2719,7 +2673,7 @@ static int qcom_read_status_exec(struct
- 		goto err_out;
- 	}
- 
--	nandc_read_buffer_sync(nandc, true);
-+	nandc_dev_to_mem(nandc, true);
- 
- 	for (i = 0; i < num_cw; i++) {
- 		flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
-@@ -2763,16 +2717,14 @@ static int qcom_read_id_type_exec(struct
- 	clear_read_regs(nandc);
- 	clear_bam_transaction(nandc);
- 
--	nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
--	nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
--	nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
--	nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
--		      nandc->props->is_bam ? 0 : DM_EN);
-+	nandc->regs->cmd = q_op.cmd_reg;
-+	nandc->regs->addr0 = q_op.addr1_reg;
-+	nandc->regs->addr1 = q_op.addr2_reg;
-+	nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
-+	nandc->regs->exec = cpu_to_le32(1);
- 
--	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
--
--	write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
--	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
-+	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- 
- 	read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
- 
-@@ -2786,7 +2738,7 @@ static int qcom_read_id_type_exec(struct
- 	op_id = q_op.data_instr_idx;
- 	len = nand_subop_get_data_len(subop, op_id);
- 
--	nandc_read_buffer_sync(nandc, true);
-+	nandc_dev_to_mem(nandc, true);
- 	memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
- 
- err_out:
-@@ -2807,15 +2759,14 @@ static int qcom_misc_cmd_type_exec(struc
- 
- 	if (q_op.flag == OP_PROGRAM_PAGE) {
- 		goto wait_rdy;
--	} else if (q_op.cmd_reg == OP_BLOCK_ERASE) {
--		q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
--		nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
--		nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
--		nandc_set_reg(chip, NAND_DEV0_CFG0,
--			      host->cfg0_raw & ~(7 << CW_PER_PAGE));
--		nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
-+	} else if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE)) {
-+		q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
-+		nandc->regs->addr0 = q_op.addr1_reg;
-+		nandc->regs->addr1 = q_op.addr2_reg;
-+		nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~(7 << CW_PER_PAGE));
-+		nandc->regs->cfg1 = cpu_to_le32(host->cfg1_raw);
- 		instrs = 3;
--	} else if (q_op.cmd_reg != OP_RESET_DEVICE) {
-+	} else if (q_op.cmd_reg != cpu_to_le32(OP_RESET_DEVICE)) {
- 		return 0;
- 	}
- 
-@@ -2826,14 +2777,14 @@ static int qcom_misc_cmd_type_exec(struc
- 	clear_read_regs(nandc);
- 	clear_bam_transaction(nandc);
- 
--	nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
--	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-+	nandc->regs->cmd = q_op.cmd_reg;
-+	nandc->regs->exec = cpu_to_le32(1);
- 
--	write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
--	if (q_op.cmd_reg == OP_BLOCK_ERASE)
--		write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
-+	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
-+	if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
-+		write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
- 
--	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- 	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- 
- 	ret = submit_descs(nandc);
-@@ -2862,14 +2813,14 @@ static int qcom_param_page_type_exec(str
- 
- 	reg_base = NAND_READ_LOCATION_0;
- 
--	if (nandc->props->qpic_v2)
-+	if (nandc->props->qpic_version2)
- 		reg_base = NAND_READ_LOCATION_LAST_CW_0;
- 
- 	ret = qcom_parse_instructions(chip, subop, &q_op);
- 	if (ret)
- 		return ret;
- 
--	q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
-+	q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
- 
- 	nandc->buf_count = 0;
- 	nandc->buf_start = 0;
-@@ -2877,52 +2828,52 @@ static int qcom_param_page_type_exec(str
- 	clear_read_regs(nandc);
- 	clear_bam_transaction(nandc);
- 
--	nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
-+	nandc->regs->cmd = q_op.cmd_reg;
-+	nandc->regs->addr0 = 0;
-+	nandc->regs->addr1 = 0;
-+
-+	nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE |
-+					512 << UD_SIZE_BYTES |
-+					5 << NUM_ADDR_CYCLES |
-+					0 << SPARE_SIZE_BYTES);
-+
-+	nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES |
-+					0 << CS_ACTIVE_BSY |
-+					17 << BAD_BLOCK_BYTE_NUM |
-+					1 << BAD_BLOCK_IN_SPARE_AREA |
-+					2 << WR_RD_BSY_GAP |
-+					0 << WIDE_FLASH |
-+					1 << DEV0_CFG1_ECC_DISABLE);
- 
--	nandc_set_reg(chip, NAND_ADDR0, 0);
--	nandc_set_reg(chip, NAND_ADDR1, 0);
--	nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
--					| 512 << UD_SIZE_BYTES
--					| 5 << NUM_ADDR_CYCLES
--					| 0 << SPARE_SIZE_BYTES);
--	nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
--					| 0 << CS_ACTIVE_BSY
--					| 17 << BAD_BLOCK_BYTE_NUM
--					| 1 << BAD_BLOCK_IN_SPARE_AREA
--					| 2 << WR_RD_BSY_GAP
--					| 0 << WIDE_FLASH
--					| 1 << DEV0_CFG1_ECC_DISABLE);
--	if (!nandc->props->qpic_v2)
--		nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
-+	if (!nandc->props->qpic_version2)
-+		nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
- 
- 	/* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
--	if (!nandc->props->qpic_v2) {
--		nandc_set_reg(chip, NAND_DEV_CMD_VLD,
--			      (nandc->vld & ~READ_START_VLD));
--		nandc_set_reg(chip, NAND_DEV_CMD1,
--			      (nandc->cmd1 & ~(0xFF << READ_ADDR))
--			      | NAND_CMD_PARAM << READ_ADDR);
-+	if (!nandc->props->qpic_version2) {
-+		nandc->regs->vld = cpu_to_le32((nandc->vld & ~READ_START_VLD));
-+		nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~(0xFF << READ_ADDR))
-+				    | NAND_CMD_PARAM << READ_ADDR);
- 	}
- 
--	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
--
--	if (!nandc->props->qpic_v2) {
--		nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
--		nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
-+	nandc->regs->exec = cpu_to_le32(1);
-+
-+	if (!nandc->props->qpic_version2) {
-+		nandc->regs->orig_cmd1 = cpu_to_le32(nandc->cmd1);
-+		nandc->regs->orig_vld = cpu_to_le32(nandc->vld);
- 	}
- 
- 	instr = q_op.data_instr;
- 	op_id = q_op.data_instr_idx;
- 	len = nand_subop_get_data_len(subop, op_id);
- 
--	if (nandc->props->qpic_v2)
-+	if (nandc->props->qpic_version2)
- 		nandc_set_read_loc_last(chip, reg_base, 0, len, 1);
- 	else
- 		nandc_set_read_loc_first(chip, reg_base, 0, len, 1);
- 
--	if (!nandc->props->qpic_v2) {
--		write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
--		write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
-+	if (!nandc->props->qpic_version2) {
-+		write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
-+		write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
- 	}
- 
- 	nandc->buf_count = 512;
-@@ -2934,9 +2885,10 @@ static int qcom_param_page_type_exec(str
- 		      nandc->buf_count, 0);
- 
- 	/* restore CMD1 and VLD regs */
--	if (!nandc->props->qpic_v2) {
--		write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
--		write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
-+	if (!nandc->props->qpic_version2) {
-+		write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
-+		write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
-+			      NAND_BAM_NEXT_SGL);
- 	}
- 
- 	ret = submit_descs(nandc);
-@@ -3025,7 +2977,7 @@ static const struct nand_controller_ops
- 
- static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
- {
--	if (nandc->props->is_bam) {
-+	if (nandc->props->supports_bam) {
- 		if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
- 			dma_unmap_single(nandc->dev, nandc->reg_read_dma,
- 					 MAX_REG_RD *
-@@ -3078,7 +3030,7 @@ static int qcom_nandc_alloc(struct qcom_
- 	if (!nandc->reg_read_buf)
- 		return -ENOMEM;
- 
--	if (nandc->props->is_bam) {
-+	if (nandc->props->supports_bam) {
- 		nandc->reg_read_dma =
- 			dma_map_single(nandc->dev, nandc->reg_read_buf,
- 				       MAX_REG_RD *
-@@ -3159,15 +3111,15 @@ static int qcom_nandc_setup(struct qcom_
- 	u32 nand_ctrl;
- 
- 	/* kill onenand */
--	if (!nandc->props->is_qpic)
-+	if (!nandc->props->nandc_part_of_qpic)
- 		nandc_write(nandc, SFLASHC_BURST_CFG, 0);
- 
--	if (!nandc->props->qpic_v2)
-+	if (!nandc->props->qpic_version2)
- 		nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
- 			    NAND_DEV_CMD_VLD_VAL);
- 
- 	/* enable ADM or BAM DMA */
--	if (nandc->props->is_bam) {
-+	if (nandc->props->supports_bam) {
- 		nand_ctrl = nandc_read(nandc, NAND_CTRL);
- 
- 		/*
-@@ -3184,7 +3136,7 @@ static int qcom_nandc_setup(struct qcom_
- 	}
- 
- 	/* save the original values of these registers */
--	if (!nandc->props->qpic_v2) {
-+	if (!nandc->props->qpic_version2) {
- 		nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
- 		nandc->vld = NAND_DEV_CMD_VLD_VAL;
- 	}
-@@ -3357,7 +3309,7 @@ static int qcom_nandc_parse_dt(struct pl
- 	struct device_node *np = nandc->dev->of_node;
- 	int ret;
- 
--	if (!nandc->props->is_bam) {
-+	if (!nandc->props->supports_bam) {
- 		ret = of_property_read_u32(np, "qcom,cmd-crci",
- 					   &nandc->cmd_crci);
- 		if (ret) {
-@@ -3482,30 +3434,30 @@ static void qcom_nandc_remove(struct pla
- 
- static const struct qcom_nandc_props ipq806x_nandc_props = {
- 	.ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
--	.is_bam = false,
-+	.supports_bam = false,
- 	.use_codeword_fixup = true,
- 	.dev_cmd_reg_start = 0x0,
- };
- 
- static const struct qcom_nandc_props ipq4019_nandc_props = {
- 	.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
--	.is_bam = true,
--	.is_qpic = true,
-+	.supports_bam = true,
-+	.nandc_part_of_qpic = true,
- 	.dev_cmd_reg_start = 0x0,
- };
- 
- static const struct qcom_nandc_props ipq8074_nandc_props = {
- 	.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
--	.is_bam = true,
--	.is_qpic = true,
-+	.supports_bam = true,
-+	.nandc_part_of_qpic = true,
- 	.dev_cmd_reg_start = 0x7000,
- };
- 
- static const struct qcom_nandc_props sdx55_nandc_props = {
- 	.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
--	.is_bam = true,
--	.is_qpic = true,
--	.qpic_v2 = true,
-+	.supports_bam = true,
-+	.nandc_part_of_qpic = true,
-+	.qpic_version2 = true,
- 	.dev_cmd_reg_start = 0x7000,
- };
- 

+ 0 - 880
target/linux/generic/backport-6.6/413-02-v6.14-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch

@@ -1,880 +0,0 @@
-From 1d479f5b345e0c3650fec4dddeef9fc6fab30c8b Mon Sep 17 00:00:00 2001
-From: Md Sadre Alam <[email protected]>
-Date: Wed, 20 Nov 2024 14:45:01 +0530
-Subject: [PATCH 2/4] mtd: rawnand: qcom: Add qcom prefix to common api
-
-Add qcom prefix to all the api which will be commonly
-used by spi nand driver and raw nand driver.
-
-Reviewed-by: Konrad Dybcio <[email protected]>
-Signed-off-by: Md Sadre Alam <[email protected]>
-Signed-off-by: Miquel Raynal <[email protected]>
----
- drivers/mtd/nand/raw/qcom_nandc.c | 320 +++++++++++++++---------------
- 1 file changed, 160 insertions(+), 160 deletions(-)
-
---- a/drivers/mtd/nand/raw/qcom_nandc.c
-+++ b/drivers/mtd/nand/raw/qcom_nandc.c
-@@ -53,7 +53,7 @@
- #define	NAND_READ_LOCATION_LAST_CW_2	0xf48
- #define	NAND_READ_LOCATION_LAST_CW_3	0xf4c
- 
--/* dummy register offsets, used by write_reg_dma */
-+/* dummy register offsets, used by qcom_write_reg_dma */
- #define	NAND_DEV_CMD1_RESTORE		0xdead
- #define	NAND_DEV_CMD_VLD_RESTORE	0xbeef
- 
-@@ -211,7 +211,7 @@
- 
- /*
-  * Flags used in DMA descriptor preparation helper functions
-- * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
-+ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
-  */
- /* Don't set the EOT in current tx BAM sgl */
- #define NAND_BAM_NO_EOT			BIT(0)
-@@ -550,7 +550,7 @@ struct qcom_nandc_props {
- };
- 
- /* Frees the BAM transaction memory */
--static void free_bam_transaction(struct qcom_nand_controller *nandc)
-+static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
- {
- 	struct bam_transaction *bam_txn = nandc->bam_txn;
- 
-@@ -559,7 +559,7 @@ static void free_bam_transaction(struct
- 
- /* Allocates and Initializes the BAM transaction */
- static struct bam_transaction *
--alloc_bam_transaction(struct qcom_nand_controller *nandc)
-+qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
- {
- 	struct bam_transaction *bam_txn;
- 	size_t bam_txn_size;
-@@ -595,7 +595,7 @@ alloc_bam_transaction(struct qcom_nand_c
- }
- 
- /* Clears the BAM transaction indexes */
--static void clear_bam_transaction(struct qcom_nand_controller *nandc)
-+static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
- {
- 	struct bam_transaction *bam_txn = nandc->bam_txn;
- 
-@@ -614,7 +614,7 @@ static void clear_bam_transaction(struct
- }
- 
- /* Callback for DMA descriptor completion */
--static void qpic_bam_dma_done(void *data)
-+static void qcom_qpic_bam_dma_done(void *data)
- {
- 	struct bam_transaction *bam_txn = data;
- 
-@@ -644,7 +644,7 @@ static void nandc_write(struct qcom_nand
- 	iowrite32(val, nandc->base + offset);
- }
- 
--static void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
-+static void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
- {
- 	if (!nandc->props->supports_bam)
- 		return;
-@@ -824,9 +824,9 @@ static void update_rw_regs(struct qcom_n
-  * for BAM. This descriptor will be added in the NAND DMA descriptor queue
-  * which will be submitted to DMA engine.
-  */
--static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
--				  struct dma_chan *chan,
--				  unsigned long flags)
-+static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
-+				       struct dma_chan *chan,
-+				       unsigned long flags)
- {
- 	struct desc_info *desc;
- 	struct scatterlist *sgl;
-@@ -903,9 +903,9 @@ static int prepare_bam_async_desc(struct
-  * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
-  * after the current command element.
-  */
--static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
--				 int reg_off, const void *vaddr,
--				 int size, unsigned int flags)
-+static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
-+				      int reg_off, const void *vaddr,
-+				      int size, unsigned int flags)
- {
- 	int bam_ce_size;
- 	int i, ret;
-@@ -943,9 +943,9 @@ static int prep_bam_dma_desc_cmd(struct
- 		bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
- 
- 		if (flags & NAND_BAM_NWD) {
--			ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
--						     DMA_PREP_FENCE |
--						     DMA_PREP_CMD);
-+			ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
-+							  DMA_PREP_FENCE |
-+							  DMA_PREP_CMD);
- 			if (ret)
- 				return ret;
- 		}
-@@ -958,9 +958,8 @@ static int prep_bam_dma_desc_cmd(struct
-  * Prepares the data descriptor for BAM DMA which will be used for NAND
-  * data reads and writes.
-  */
--static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
--				  const void *vaddr,
--				  int size, unsigned int flags)
-+static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
-+				       const void *vaddr, int size, unsigned int flags)
- {
- 	int ret;
- 	struct bam_transaction *bam_txn = nandc->bam_txn;
-@@ -979,8 +978,8 @@ static int prep_bam_dma_desc_data(struct
- 		 * is not set, form the DMA descriptor
- 		 */
- 		if (!(flags & NAND_BAM_NO_EOT)) {
--			ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
--						     DMA_PREP_INTERRUPT);
-+			ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
-+							  DMA_PREP_INTERRUPT);
- 			if (ret)
- 				return ret;
- 		}
-@@ -989,9 +988,9 @@ static int prep_bam_dma_desc_data(struct
- 	return 0;
- }
- 
--static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
--			     int reg_off, const void *vaddr, int size,
--			     bool flow_control)
-+static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
-+				  int reg_off, const void *vaddr, int size,
-+				  bool flow_control)
- {
- 	struct desc_info *desc;
- 	struct dma_async_tx_descriptor *dma_desc;
-@@ -1069,15 +1068,15 @@ err:
- }
- 
- /*
-- * read_reg_dma:	prepares a descriptor to read a given number of
-+ * qcom_read_reg_dma:	prepares a descriptor to read a given number of
-  *			contiguous registers to the reg_read_buf pointer
-  *
-  * @first:		offset of the first register in the contiguous block
-  * @num_regs:		number of registers to read
-  * @flags:		flags to control DMA descriptor preparation
-  */
--static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
--			int num_regs, unsigned int flags)
-+static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
-+			     int num_regs, unsigned int flags)
- {
- 	bool flow_control = false;
- 	void *vaddr;
-@@ -1089,18 +1088,18 @@ static int read_reg_dma(struct qcom_nand
- 		first = dev_cmd_reg_addr(nandc, first);
- 
- 	if (nandc->props->supports_bam)
--		return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
-+		return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
- 					     num_regs, flags);
- 
- 	if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
- 		flow_control = true;
- 
--	return prep_adm_dma_desc(nandc, true, first, vaddr,
-+	return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
- 				 num_regs * sizeof(u32), flow_control);
- }
- 
- /*
-- * write_reg_dma:	prepares a descriptor to write a given number of
-+ * qcom_write_reg_dma:	prepares a descriptor to write a given number of
-  *			contiguous registers
-  *
-  * @vaddr:		contiguous memory from where register value will
-@@ -1109,8 +1108,8 @@ static int read_reg_dma(struct qcom_nand
-  * @num_regs:		number of registers to write
-  * @flags:		flags to control DMA descriptor preparation
-  */
--static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
--			 int first, int num_regs, unsigned int flags)
-+static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
-+			      int first, int num_regs, unsigned int flags)
- {
- 	bool flow_control = false;
- 
-@@ -1124,18 +1123,18 @@ static int write_reg_dma(struct qcom_nan
- 		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
- 
- 	if (nandc->props->supports_bam)
--		return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
-+		return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
- 					     num_regs, flags);
- 
- 	if (first == NAND_FLASH_CMD)
- 		flow_control = true;
- 
--	return prep_adm_dma_desc(nandc, false, first, vaddr,
-+	return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
- 				 num_regs * sizeof(u32), flow_control);
- }
- 
- /*
-- * read_data_dma:	prepares a DMA descriptor to transfer data from the
-+ * qcom_read_data_dma:	prepares a DMA descriptor to transfer data from the
-  *			controller's internal buffer to the buffer 'vaddr'
-  *
-  * @reg_off:		offset within the controller's data buffer
-@@ -1143,17 +1142,17 @@ static int write_reg_dma(struct qcom_nan
-  * @size:		DMA transaction size in bytes
-  * @flags:		flags to control DMA descriptor preparation
-  */
--static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
--			 const u8 *vaddr, int size, unsigned int flags)
-+static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-+			      const u8 *vaddr, int size, unsigned int flags)
- {
- 	if (nandc->props->supports_bam)
--		return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
-+		return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
- 
--	return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
-+	return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
- }
- 
- /*
-- * write_data_dma:	prepares a DMA descriptor to transfer data from
-+ * qcom_write_data_dma:	prepares a DMA descriptor to transfer data from
-  *			'vaddr' to the controller's internal buffer
-  *
-  * @reg_off:		offset within the controller's data buffer
-@@ -1161,13 +1160,13 @@ static int read_data_dma(struct qcom_nan
-  * @size:		DMA transaction size in bytes
-  * @flags:		flags to control DMA descriptor preparation
-  */
--static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
--			  const u8 *vaddr, int size, unsigned int flags)
-+static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-+			       const u8 *vaddr, int size, unsigned int flags)
- {
- 	if (nandc->props->supports_bam)
--		return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
-+		return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
- 
--	return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
-+	return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
- }
- 
- /*
-@@ -1178,14 +1177,14 @@ static void config_nand_page_read(struct
- {
- 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- 
--	write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
--	write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+	qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
-+	qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
- 	if (!nandc->props->qpic_version2)
--		write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
--	write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
--		      NAND_ERASED_CW_DETECT_CFG, 1, 0);
--	write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
--		      NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
-+		qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
-+	qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
-+			   NAND_ERASED_CW_DETECT_CFG, 1, 0);
-+	qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
-+			   NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
- }
- 
- /*
-@@ -1204,17 +1203,17 @@ config_nand_cw_read(struct nand_chip *ch
- 		reg = &nandc->regs->read_location_last0;
- 
- 	if (nandc->props->supports_bam)
--		write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
-+		qcom_write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
- 
--	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
--	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+	qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+	qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- 
- 	if (use_ecc) {
--		read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
--		read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
--			     NAND_BAM_NEXT_SGL);
-+		qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
-+		qcom_read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
-+				  NAND_BAM_NEXT_SGL);
- 	} else {
--		read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-+		qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- 	}
- }
- 
-@@ -1238,11 +1237,11 @@ static void config_nand_page_write(struc
- {
- 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- 
--	write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
--	write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+	qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
-+	qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
- 	if (!nandc->props->qpic_version2)
--		write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
--			      NAND_BAM_NEXT_SGL);
-+		qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
-+				   NAND_BAM_NEXT_SGL);
- }
- 
- /*
-@@ -1253,17 +1252,18 @@ static void config_nand_cw_write(struct
- {
- 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- 
--	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
--	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+	qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+	qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- 
--	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-+	qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- 
--	write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
--	write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
-+	qcom_write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
-+	qcom_write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
-+			   NAND_BAM_NEXT_SGL);
- }
- 
- /* helpers to submit/free our list of dma descriptors */
--static int submit_descs(struct qcom_nand_controller *nandc)
-+static int qcom_submit_descs(struct qcom_nand_controller *nandc)
- {
- 	struct desc_info *desc, *n;
- 	dma_cookie_t cookie = 0;
-@@ -1272,21 +1272,21 @@ static int submit_descs(struct qcom_nand
- 
- 	if (nandc->props->supports_bam) {
- 		if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
--			ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
-+			ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
- 			if (ret)
- 				goto err_unmap_free_desc;
- 		}
- 
- 		if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
--			ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
--						   DMA_PREP_INTERRUPT);
-+			ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
-+							  DMA_PREP_INTERRUPT);
- 			if (ret)
- 				goto err_unmap_free_desc;
- 		}
- 
- 		if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
--			ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
--						   DMA_PREP_CMD);
-+			ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
-+							  DMA_PREP_CMD);
- 			if (ret)
- 				goto err_unmap_free_desc;
- 		}
-@@ -1296,7 +1296,7 @@ static int submit_descs(struct qcom_nand
- 		cookie = dmaengine_submit(desc->dma_desc);
- 
- 	if (nandc->props->supports_bam) {
--		bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
-+		bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
- 		bam_txn->last_cmd_desc->callback_param = bam_txn;
- 
- 		dma_async_issue_pending(nandc->tx_chan);
-@@ -1314,7 +1314,7 @@ static int submit_descs(struct qcom_nand
- err_unmap_free_desc:
- 	/*
- 	 * Unmap the dma sg_list and free the desc allocated by both
--	 * prepare_bam_async_desc() and prep_adm_dma_desc() functions.
-+	 * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
- 	 */
- 	list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
- 		list_del(&desc->node);
-@@ -1333,10 +1333,10 @@ err_unmap_free_desc:
- }
- 
- /* reset the register read buffer for next NAND operation */
--static void clear_read_regs(struct qcom_nand_controller *nandc)
-+static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
- {
- 	nandc->reg_read_pos = 0;
--	nandc_dev_to_mem(nandc, false);
-+	qcom_nandc_dev_to_mem(nandc, false);
- }
- 
- /*
-@@ -1400,7 +1400,7 @@ static int check_flash_errors(struct qco
- 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- 	int i;
- 
--	nandc_dev_to_mem(nandc, true);
-+	qcom_nandc_dev_to_mem(nandc, true);
- 
- 	for (i = 0; i < cw_cnt; i++) {
- 		u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
-@@ -1427,13 +1427,13 @@ qcom_nandc_read_cw_raw(struct mtd_info *
- 	nand_read_page_op(chip, page, 0, NULL, 0);
- 	nandc->buf_count = 0;
- 	nandc->buf_start = 0;
--	clear_read_regs(nandc);
-+	qcom_clear_read_regs(nandc);
- 	host->use_ecc = false;
- 
- 	if (nandc->props->qpic_version2)
- 		raw_cw = ecc->steps - 1;
- 
--	clear_bam_transaction(nandc);
-+	qcom_clear_bam_transaction(nandc);
- 	set_address(host, host->cw_size * cw, page);
- 	update_rw_regs(host, 1, true, raw_cw);
- 	config_nand_page_read(chip);
-@@ -1466,18 +1466,18 @@ qcom_nandc_read_cw_raw(struct mtd_info *
- 
- 	config_nand_cw_read(chip, false, raw_cw);
- 
--	read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
-+	qcom_read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
- 	reg_off += data_size1;
- 
--	read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
-+	qcom_read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
- 	reg_off += oob_size1;
- 
--	read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
-+	qcom_read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
- 	reg_off += data_size2;
- 
--	read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
-+	qcom_read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
- 
--	ret = submit_descs(nandc);
-+	ret = qcom_submit_descs(nandc);
- 	if (ret) {
- 		dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
- 		return ret;
-@@ -1575,7 +1575,7 @@ static int parse_read_errors(struct qcom
- 	u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
- 
- 	buf = (struct read_stats *)nandc->reg_read_buf;
--	nandc_dev_to_mem(nandc, true);
-+	qcom_nandc_dev_to_mem(nandc, true);
- 
- 	for (i = 0; i < ecc->steps; i++, buf++) {
- 		u32 flash, buffer, erased_cw;
-@@ -1704,8 +1704,8 @@ static int read_page_ecc(struct qcom_nan
- 		config_nand_cw_read(chip, true, i);
- 
- 		if (data_buf)
--			read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
--				      data_size, 0);
-+			qcom_read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
-+					   data_size, 0);
- 
- 		/*
- 		 * when ecc is enabled, the controller doesn't read the real
-@@ -1720,8 +1720,8 @@ static int read_page_ecc(struct qcom_nan
- 			for (j = 0; j < host->bbm_size; j++)
- 				*oob_buf++ = 0xff;
- 
--			read_data_dma(nandc, FLASH_BUF_ACC + data_size,
--				      oob_buf, oob_size, 0);
-+			qcom_read_data_dma(nandc, FLASH_BUF_ACC + data_size,
-+					   oob_buf, oob_size, 0);
- 		}
- 
- 		if (data_buf)
-@@ -1730,7 +1730,7 @@ static int read_page_ecc(struct qcom_nan
- 			oob_buf += oob_size;
- 	}
- 
--	ret = submit_descs(nandc);
-+	ret = qcom_submit_descs(nandc);
- 	if (ret) {
- 		dev_err(nandc->dev, "failure to read page/oob\n");
- 		return ret;
-@@ -1751,7 +1751,7 @@ static int copy_last_cw(struct qcom_nand
- 	int size;
- 	int ret;
- 
--	clear_read_regs(nandc);
-+	qcom_clear_read_regs(nandc);
- 
- 	size = host->use_ecc ? host->cw_data : host->cw_size;
- 
-@@ -1763,9 +1763,9 @@ static int copy_last_cw(struct qcom_nand
- 
- 	config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
- 
--	read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
-+	qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
- 
--	ret = submit_descs(nandc);
-+	ret = qcom_submit_descs(nandc);
- 	if (ret)
- 		dev_err(nandc->dev, "failed to copy last codeword\n");
- 
-@@ -1851,14 +1851,14 @@ static int qcom_nandc_read_page(struct n
- 	nandc->buf_count = 0;
- 	nandc->buf_start = 0;
- 	host->use_ecc = true;
--	clear_read_regs(nandc);
-+	qcom_clear_read_regs(nandc);
- 	set_address(host, 0, page);
- 	update_rw_regs(host, ecc->steps, true, 0);
- 
- 	data_buf = buf;
- 	oob_buf = oob_required ? chip->oob_poi : NULL;
- 
--	clear_bam_transaction(nandc);
-+	qcom_clear_bam_transaction(nandc);
- 
- 	return read_page_ecc(host, data_buf, oob_buf, page);
- }
-@@ -1899,8 +1899,8 @@ static int qcom_nandc_read_oob(struct na
- 	if (host->nr_boot_partitions)
- 		qcom_nandc_codeword_fixup(host, page);
- 
--	clear_read_regs(nandc);
--	clear_bam_transaction(nandc);
-+	qcom_clear_read_regs(nandc);
-+	qcom_clear_bam_transaction(nandc);
- 
- 	host->use_ecc = true;
- 	set_address(host, 0, page);
-@@ -1927,8 +1927,8 @@ static int qcom_nandc_write_page(struct
- 	set_address(host, 0, page);
- 	nandc->buf_count = 0;
- 	nandc->buf_start = 0;
--	clear_read_regs(nandc);
--	clear_bam_transaction(nandc);
-+	qcom_clear_read_regs(nandc);
-+	qcom_clear_bam_transaction(nandc);
- 
- 	data_buf = (u8 *)buf;
- 	oob_buf = chip->oob_poi;
-@@ -1949,8 +1949,8 @@ static int qcom_nandc_write_page(struct
- 			oob_size = ecc->bytes;
- 		}
- 
--		write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
--			       i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
-+		qcom_write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
-+				    i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
- 
- 		/*
- 		 * when ECC is enabled, we don't really need to write anything
-@@ -1962,8 +1962,8 @@ static int qcom_nandc_write_page(struct
- 		if (qcom_nandc_is_last_cw(ecc, i)) {
- 			oob_buf += host->bbm_size;
- 
--			write_data_dma(nandc, FLASH_BUF_ACC + data_size,
--				       oob_buf, oob_size, 0);
-+			qcom_write_data_dma(nandc, FLASH_BUF_ACC + data_size,
-+					    oob_buf, oob_size, 0);
- 		}
- 
- 		config_nand_cw_write(chip);
-@@ -1972,7 +1972,7 @@ static int qcom_nandc_write_page(struct
- 		oob_buf += oob_size;
- 	}
- 
--	ret = submit_descs(nandc);
-+	ret = qcom_submit_descs(nandc);
- 	if (ret) {
- 		dev_err(nandc->dev, "failure to write page\n");
- 		return ret;
-@@ -1997,8 +1997,8 @@ static int qcom_nandc_write_page_raw(str
- 		qcom_nandc_codeword_fixup(host, page);
- 
- 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
--	clear_read_regs(nandc);
--	clear_bam_transaction(nandc);
-+	qcom_clear_read_regs(nandc);
-+	qcom_clear_bam_transaction(nandc);
- 
- 	data_buf = (u8 *)buf;
- 	oob_buf = chip->oob_poi;
-@@ -2024,28 +2024,28 @@ static int qcom_nandc_write_page_raw(str
- 			oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
- 		}
- 
--		write_data_dma(nandc, reg_off, data_buf, data_size1,
--			       NAND_BAM_NO_EOT);
-+		qcom_write_data_dma(nandc, reg_off, data_buf, data_size1,
-+				    NAND_BAM_NO_EOT);
- 		reg_off += data_size1;
- 		data_buf += data_size1;
- 
--		write_data_dma(nandc, reg_off, oob_buf, oob_size1,
--			       NAND_BAM_NO_EOT);
-+		qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size1,
-+				    NAND_BAM_NO_EOT);
- 		reg_off += oob_size1;
- 		oob_buf += oob_size1;
- 
--		write_data_dma(nandc, reg_off, data_buf, data_size2,
--			       NAND_BAM_NO_EOT);
-+		qcom_write_data_dma(nandc, reg_off, data_buf, data_size2,
-+				    NAND_BAM_NO_EOT);
- 		reg_off += data_size2;
- 		data_buf += data_size2;
- 
--		write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
-+		qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
- 		oob_buf += oob_size2;
- 
- 		config_nand_cw_write(chip);
- 	}
- 
--	ret = submit_descs(nandc);
-+	ret = qcom_submit_descs(nandc);
- 	if (ret) {
- 		dev_err(nandc->dev, "failure to write raw page\n");
- 		return ret;
-@@ -2075,7 +2075,7 @@ static int qcom_nandc_write_oob(struct n
- 		qcom_nandc_codeword_fixup(host, page);
- 
- 	host->use_ecc = true;
--	clear_bam_transaction(nandc);
-+	qcom_clear_bam_transaction(nandc);
- 
- 	/* calculate the data and oob size for the last codeword/step */
- 	data_size = ecc->size - ((ecc->steps - 1) << 2);
-@@ -2090,11 +2090,11 @@ static int qcom_nandc_write_oob(struct n
- 	update_rw_regs(host, 1, false, 0);
- 
- 	config_nand_page_write(chip);
--	write_data_dma(nandc, FLASH_BUF_ACC,
--		       nandc->data_buffer, data_size + oob_size, 0);
-+	qcom_write_data_dma(nandc, FLASH_BUF_ACC,
-+			    nandc->data_buffer, data_size + oob_size, 0);
- 	config_nand_cw_write(chip);
- 
--	ret = submit_descs(nandc);
-+	ret = qcom_submit_descs(nandc);
- 	if (ret) {
- 		dev_err(nandc->dev, "failure to write oob\n");
- 		return ret;
-@@ -2121,7 +2121,7 @@ static int qcom_nandc_block_bad(struct n
- 	 */
- 	host->use_ecc = false;
- 
--	clear_bam_transaction(nandc);
-+	qcom_clear_bam_transaction(nandc);
- 	ret = copy_last_cw(host, page);
- 	if (ret)
- 		goto err;
-@@ -2148,8 +2148,8 @@ static int qcom_nandc_block_markbad(stru
- 	struct nand_ecc_ctrl *ecc = &chip->ecc;
- 	int page, ret;
- 
--	clear_read_regs(nandc);
--	clear_bam_transaction(nandc);
-+	qcom_clear_read_regs(nandc);
-+	qcom_clear_bam_transaction(nandc);
- 
- 	/*
- 	 * to mark the BBM as bad, we flash the entire last codeword with 0s.
-@@ -2166,11 +2166,11 @@ static int qcom_nandc_block_markbad(stru
- 	update_rw_regs(host, 1, false, ecc->steps - 1);
- 
- 	config_nand_page_write(chip);
--	write_data_dma(nandc, FLASH_BUF_ACC,
--		       nandc->data_buffer, host->cw_size, 0);
-+	qcom_write_data_dma(nandc, FLASH_BUF_ACC,
-+			    nandc->data_buffer, host->cw_size, 0);
- 	config_nand_cw_write(chip);
- 
--	ret = submit_descs(nandc);
-+	ret = qcom_submit_descs(nandc);
- 	if (ret) {
- 		dev_err(nandc->dev, "failure to update BBM\n");
- 		return ret;
-@@ -2410,14 +2410,14 @@ static int qcom_nand_attach_chip(struct
- 	mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
- 	/* Free the initially allocated BAM transaction for reading the ONFI params */
- 	if (nandc->props->supports_bam)
--		free_bam_transaction(nandc);
-+		qcom_free_bam_transaction(nandc);
- 
- 	nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
- 				     cwperpage);
- 
- 	/* Now allocate the BAM transaction based on updated max_cwperpage */
- 	if (nandc->props->supports_bam) {
--		nandc->bam_txn = alloc_bam_transaction(nandc);
-+		nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
- 		if (!nandc->bam_txn) {
- 			dev_err(nandc->dev,
- 				"failed to allocate bam transaction\n");
-@@ -2617,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nan
- 	unsigned long start = jiffies + msecs_to_jiffies(time_ms);
- 	u32 flash;
- 
--	nandc_dev_to_mem(nandc, true);
-+	qcom_nandc_dev_to_mem(nandc, true);
- 
- 	do {
- 		flash = le32_to_cpu(nandc->reg_read_buf[0]);
-@@ -2657,23 +2657,23 @@ static int qcom_read_status_exec(struct
- 	nandc->buf_start = 0;
- 	host->use_ecc = false;
- 
--	clear_read_regs(nandc);
--	clear_bam_transaction(nandc);
-+	qcom_clear_read_regs(nandc);
-+	qcom_clear_bam_transaction(nandc);
- 
- 	nandc->regs->cmd = q_op.cmd_reg;
- 	nandc->regs->exec = cpu_to_le32(1);
- 
--	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
--	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
--	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-+	qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+	qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+	qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- 
--	ret = submit_descs(nandc);
-+	ret = qcom_submit_descs(nandc);
- 	if (ret) {
- 		dev_err(nandc->dev, "failure in submitting status descriptor\n");
- 		goto err_out;
- 	}
- 
--	nandc_dev_to_mem(nandc, true);
-+	qcom_nandc_dev_to_mem(nandc, true);
- 
- 	for (i = 0; i < num_cw; i++) {
- 		flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
-@@ -2714,8 +2714,8 @@ static int qcom_read_id_type_exec(struct
- 	nandc->buf_start = 0;
- 	host->use_ecc = false;
- 
--	clear_read_regs(nandc);
--	clear_bam_transaction(nandc);
-+	qcom_clear_read_regs(nandc);
-+	qcom_clear_bam_transaction(nandc);
- 
- 	nandc->regs->cmd = q_op.cmd_reg;
- 	nandc->regs->addr0 = q_op.addr1_reg;
-@@ -2723,12 +2723,12 @@ static int qcom_read_id_type_exec(struct
- 	nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
- 	nandc->regs->exec = cpu_to_le32(1);
- 
--	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
--	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+	qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
-+	qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- 
--	read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
-+	qcom_read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
- 
--	ret = submit_descs(nandc);
-+	ret = qcom_submit_descs(nandc);
- 	if (ret) {
- 		dev_err(nandc->dev, "failure in submitting read id descriptor\n");
- 		goto err_out;
-@@ -2738,7 +2738,7 @@ static int qcom_read_id_type_exec(struct
- 	op_id = q_op.data_instr_idx;
- 	len = nand_subop_get_data_len(subop, op_id);
- 
--	nandc_dev_to_mem(nandc, true);
-+	qcom_nandc_dev_to_mem(nandc, true);
- 	memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
- 
- err_out:
-@@ -2774,20 +2774,20 @@ static int qcom_misc_cmd_type_exec(struc
- 	nandc->buf_start = 0;
- 	host->use_ecc = false;
- 
--	clear_read_regs(nandc);
--	clear_bam_transaction(nandc);
-+	qcom_clear_read_regs(nandc);
-+	qcom_clear_bam_transaction(nandc);
- 
- 	nandc->regs->cmd = q_op.cmd_reg;
- 	nandc->regs->exec = cpu_to_le32(1);
- 
--	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
-+	qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
- 	if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
--		write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
-+		qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
- 
--	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
--	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-+	qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+	qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- 
--	ret = submit_descs(nandc);
-+	ret = qcom_submit_descs(nandc);
- 	if (ret) {
- 		dev_err(nandc->dev, "failure in submitting misc descriptor\n");
- 		goto err_out;
-@@ -2825,8 +2825,8 @@ static int qcom_param_page_type_exec(str
- 	nandc->buf_count = 0;
- 	nandc->buf_start = 0;
- 	host->use_ecc = false;
--	clear_read_regs(nandc);
--	clear_bam_transaction(nandc);
-+	qcom_clear_read_regs(nandc);
-+	qcom_clear_bam_transaction(nandc);
- 
- 	nandc->regs->cmd = q_op.cmd_reg;
- 	nandc->regs->addr0 = 0;
-@@ -2872,8 +2872,8 @@ static int qcom_param_page_type_exec(str
- 		nandc_set_read_loc_first(chip, reg_base, 0, len, 1);
- 
- 	if (!nandc->props->qpic_version2) {
--		write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
--		write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
-+		qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
-+		qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
- 	}
- 
- 	nandc->buf_count = 512;
-@@ -2881,17 +2881,17 @@ static int qcom_param_page_type_exec(str
- 
- 	config_nand_single_cw_page_read(chip, false, 0);
- 
--	read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
--		      nandc->buf_count, 0);
-+	qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
-+			   nandc->buf_count, 0);
- 
- 	/* restore CMD1 and VLD regs */
- 	if (!nandc->props->qpic_version2) {
--		write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
--		write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
--			      NAND_BAM_NEXT_SGL);
-+		qcom_write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
-+		qcom_write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
-+				   NAND_BAM_NEXT_SGL);
- 	}
- 
--	ret = submit_descs(nandc);
-+	ret = qcom_submit_descs(nandc);
- 	if (ret) {
- 		dev_err(nandc->dev, "failure in submitting param page descriptor\n");
- 		goto err_out;
-@@ -3075,7 +3075,7 @@ static int qcom_nandc_alloc(struct qcom_
- 		 * maximum codeword size
- 		 */
- 		nandc->max_cwperpage = 1;
--		nandc->bam_txn = alloc_bam_transaction(nandc);
-+		nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
- 		if (!nandc->bam_txn) {
- 			dev_err(nandc->dev,
- 				"failed to allocate bam transaction\n");

+ 0 - 2436
target/linux/generic/backport-6.6/413-03-v6.14-mtd-nand-Add-qpic_common-API-file.patch

@@ -1,2436 +0,0 @@
-From fdf3ee5c6e5278dab4f60b998b47ed2d510bf80f Mon Sep 17 00:00:00 2001
-From: Md Sadre Alam <[email protected]>
-Date: Wed, 20 Nov 2024 14:45:02 +0530
-Subject: [PATCH 3/4] mtd: nand: Add qpic_common API file
-
-Add qpic_common.c file which hold all the common
-qpic APIs which will be used by both qpic raw nand
-driver and qpic spi nand driver.
-
-Signed-off-by: Md Sadre Alam <[email protected]>
-Signed-off-by: Miquel Raynal <[email protected]>
----
- drivers/mtd/nand/Makefile            |    2 +-
- drivers/mtd/nand/qpic_common.c       |  759 ++++++++++++++++++
- drivers/mtd/nand/raw/qcom_nandc.c    | 1092 +-------------------------
- include/linux/mtd/nand-qpic-common.h |  468 +++++++++++
- 4 files changed, 1240 insertions(+), 1081 deletions(-)
- create mode 100644 drivers/mtd/nand/qpic_common.c
- create mode 100644 include/linux/mtd/nand-qpic-common.h
-
---- a/drivers/mtd/nand/Makefile
-+++ b/drivers/mtd/nand/Makefile
-@@ -3,7 +3,7 @@
- nandcore-objs := core.o bbt.o
- obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
- obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
--
-+obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
- obj-y	+= onenand/
- obj-y	+= raw/
- obj-y	+= spi/
---- /dev/null
-+++ b/drivers/mtd/nand/qpic_common.c
-@@ -0,0 +1,759 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
-+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
-+ */
-+#include <linux/clk.h>
-+#include <linux/delay.h>
-+#include <linux/dmaengine.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/dma/qcom_adm.h>
-+#include <linux/dma/qcom_bam_dma.h>
-+#include <linux/module.h>
-+#include <linux/of.h>
-+#include <linux/platform_device.h>
-+#include <linux/slab.h>
-+#include <linux/mtd/nand-qpic-common.h>
-+
-+/**
-+ * qcom_free_bam_transaction() - Frees the BAM transaction memory
-+ * @nandc: qpic nand controller
-+ *
-+ * This function frees the bam transaction memory
-+ */
-+void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
-+{
-+	struct bam_transaction *bam_txn = nandc->bam_txn;
-+
-+	kfree(bam_txn);
-+}
-+EXPORT_SYMBOL(qcom_free_bam_transaction);
-+
-+/**
-+ * qcom_alloc_bam_transaction() - allocate BAM transaction
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will allocate and initialize the BAM transaction structure
-+ */
-+struct bam_transaction *
-+qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
-+{
-+	struct bam_transaction *bam_txn;
-+	size_t bam_txn_size;
-+	unsigned int num_cw = nandc->max_cwperpage;
-+	void *bam_txn_buf;
-+
-+	bam_txn_size =
-+		sizeof(*bam_txn) + num_cw *
-+		((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
-+		(sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
-+		(sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
-+
-+	bam_txn_buf = kzalloc(bam_txn_size, GFP_KERNEL);
-+	if (!bam_txn_buf)
-+		return NULL;
-+
-+	bam_txn = bam_txn_buf;
-+	bam_txn_buf += sizeof(*bam_txn);
-+
-+	bam_txn->bam_ce = bam_txn_buf;
-+	bam_txn_buf +=
-+		sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
-+
-+	bam_txn->cmd_sgl = bam_txn_buf;
-+	bam_txn_buf +=
-+		sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
-+
-+	bam_txn->data_sgl = bam_txn_buf;
-+
-+	init_completion(&bam_txn->txn_done);
-+
-+	return bam_txn;
-+}
-+EXPORT_SYMBOL(qcom_alloc_bam_transaction);
-+
-+/**
-+ * qcom_clear_bam_transaction() - Clears the BAM transaction
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will clear the BAM transaction indexes.
-+ */
-+void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
-+{
-+	struct bam_transaction *bam_txn = nandc->bam_txn;
-+
-+	if (!nandc->props->supports_bam)
-+		return;
-+
-+	memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
-+	bam_txn->last_data_desc = NULL;
-+
-+	sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
-+		      QPIC_PER_CW_CMD_SGL);
-+	sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
-+		      QPIC_PER_CW_DATA_SGL);
-+
-+	reinit_completion(&bam_txn->txn_done);
-+}
-+EXPORT_SYMBOL(qcom_clear_bam_transaction);
-+
-+/**
-+ * qcom_qpic_bam_dma_done() - Callback for DMA descriptor completion
-+ * @data: data pointer
-+ *
-+ * This function is a callback for DMA descriptor completion
-+ */
-+void qcom_qpic_bam_dma_done(void *data)
-+{
-+	struct bam_transaction *bam_txn = data;
-+
-+	complete(&bam_txn->txn_done);
-+}
-+EXPORT_SYMBOL(qcom_qpic_bam_dma_done);
-+
-+/**
-+ * qcom_nandc_dev_to_mem() - Check for dma sync for cpu or device
-+ * @nandc: qpic nand controller
-+ * @is_cpu: cpu or Device
-+ *
-+ * This function will check for dma sync for cpu or device
-+ */
-+inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
-+{
-+	if (!nandc->props->supports_bam)
-+		return;
-+
-+	if (is_cpu)
-+		dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
-+					MAX_REG_RD *
-+					sizeof(*nandc->reg_read_buf),
-+					DMA_FROM_DEVICE);
-+	else
-+		dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
-+					   MAX_REG_RD *
-+					   sizeof(*nandc->reg_read_buf),
-+					   DMA_FROM_DEVICE);
-+}
-+EXPORT_SYMBOL(qcom_nandc_dev_to_mem);
-+
-+/**
-+ * qcom_prepare_bam_async_desc() - Prepare DMA descriptor
-+ * @nandc: qpic nand controller
-+ * @chan: dma channel
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function maps the scatter gather list for DMA transfer and forms the
-+ * DMA descriptor for BAM.This descriptor will be added in the NAND DMA
-+ * descriptor queue which will be submitted to DMA engine.
-+ */
-+int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
-+				struct dma_chan *chan, unsigned long flags)
-+{
-+	struct desc_info *desc;
-+	struct scatterlist *sgl;
-+	unsigned int sgl_cnt;
-+	int ret;
-+	struct bam_transaction *bam_txn = nandc->bam_txn;
-+	enum dma_transfer_direction dir_eng;
-+	struct dma_async_tx_descriptor *dma_desc;
-+
-+	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
-+	if (!desc)
-+		return -ENOMEM;
-+
-+	if (chan == nandc->cmd_chan) {
-+		sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
-+		sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
-+		bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
-+		dir_eng = DMA_MEM_TO_DEV;
-+		desc->dir = DMA_TO_DEVICE;
-+	} else if (chan == nandc->tx_chan) {
-+		sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
-+		sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
-+		bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
-+		dir_eng = DMA_MEM_TO_DEV;
-+		desc->dir = DMA_TO_DEVICE;
-+	} else {
-+		sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
-+		sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
-+		bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
-+		dir_eng = DMA_DEV_TO_MEM;
-+		desc->dir = DMA_FROM_DEVICE;
-+	}
-+
-+	sg_mark_end(sgl + sgl_cnt - 1);
-+	ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
-+	if (ret == 0) {
-+		dev_err(nandc->dev, "failure in mapping desc\n");
-+		kfree(desc);
-+		return -ENOMEM;
-+	}
-+
-+	desc->sgl_cnt = sgl_cnt;
-+	desc->bam_sgl = sgl;
-+
-+	dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
-+					   flags);
-+
-+	if (!dma_desc) {
-+		dev_err(nandc->dev, "failure in prep desc\n");
-+		dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
-+		kfree(desc);
-+		return -EINVAL;
-+	}
-+
-+	desc->dma_desc = dma_desc;
-+
-+	/* update last data/command descriptor */
-+	if (chan == nandc->cmd_chan)
-+		bam_txn->last_cmd_desc = dma_desc;
-+	else
-+		bam_txn->last_data_desc = dma_desc;
-+
-+	list_add_tail(&desc->node, &nandc->desc_list);
-+
-+	return 0;
-+}
-+EXPORT_SYMBOL(qcom_prepare_bam_async_desc);
-+
-+/**
-+ * qcom_prep_bam_dma_desc_cmd() - Prepares the command descriptor for BAM DMA
-+ * @nandc: qpic nand controller
-+ * @read: read or write type
-+ * @reg_off: offset within the controller's data buffer
-+ * @vaddr: virtual address of the buffer we want to write to
-+ * @size: DMA transaction size in bytes
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares the command descriptor for BAM DMA
-+ * which will be used for NAND register reads and writes.
-+ */
-+int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
-+			       int reg_off, const void *vaddr,
-+			       int size, unsigned int flags)
-+{
-+	int bam_ce_size;
-+	int i, ret;
-+	struct bam_cmd_element *bam_ce_buffer;
-+	struct bam_transaction *bam_txn = nandc->bam_txn;
-+
-+	bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
-+
-+	/* fill the command desc */
-+	for (i = 0; i < size; i++) {
-+		if (read)
-+			bam_prep_ce(&bam_ce_buffer[i],
-+				    nandc_reg_phys(nandc, reg_off + 4 * i),
-+				    BAM_READ_COMMAND,
-+				    reg_buf_dma_addr(nandc,
-+						     (__le32 *)vaddr + i));
-+		else
-+			bam_prep_ce_le32(&bam_ce_buffer[i],
-+					 nandc_reg_phys(nandc, reg_off + 4 * i),
-+					 BAM_WRITE_COMMAND,
-+					 *((__le32 *)vaddr + i));
-+	}
-+
-+	bam_txn->bam_ce_pos += size;
-+
-+	/* use the separate sgl after this command */
-+	if (flags & NAND_BAM_NEXT_SGL) {
-+		bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
-+		bam_ce_size = (bam_txn->bam_ce_pos -
-+				bam_txn->bam_ce_start) *
-+				sizeof(struct bam_cmd_element);
-+		sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
-+			   bam_ce_buffer, bam_ce_size);
-+		bam_txn->cmd_sgl_pos++;
-+		bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
-+
-+		if (flags & NAND_BAM_NWD) {
-+			ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
-+							  DMA_PREP_FENCE | DMA_PREP_CMD);
-+			if (ret)
-+				return ret;
-+		}
-+	}
-+
-+	return 0;
-+}
-+EXPORT_SYMBOL(qcom_prep_bam_dma_desc_cmd);
-+
-+/**
-+ * qcom_prep_bam_dma_desc_data() - Prepares the data descriptor for BAM DMA
-+ * @nandc: qpic nand controller
-+ * @read: read or write type
-+ * @vaddr: virtual address of the buffer we want to write to
-+ * @size: DMA transaction size in bytes
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares the data descriptor for BAM DMA which
-+ * will be used for NAND data reads and writes.
-+ */
-+int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
-+				const void *vaddr, int size, unsigned int flags)
-+{
-+	int ret;
-+	struct bam_transaction *bam_txn = nandc->bam_txn;
-+
-+	if (read) {
-+		sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
-+			   vaddr, size);
-+		bam_txn->rx_sgl_pos++;
-+	} else {
-+		sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
-+			   vaddr, size);
-+		bam_txn->tx_sgl_pos++;
-+
-+		/*
-+		 * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
-+		 * is not set, form the DMA descriptor
-+		 */
-+		if (!(flags & NAND_BAM_NO_EOT)) {
-+			ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
-+							  DMA_PREP_INTERRUPT);
-+			if (ret)
-+				return ret;
-+		}
-+	}
-+
-+	return 0;
-+}
-+EXPORT_SYMBOL(qcom_prep_bam_dma_desc_data);
-+
-+/**
-+ * qcom_prep_adm_dma_desc() - Prepare descriptor for adma
-+ * @nandc: qpic nand controller
-+ * @read: read or write type
-+ * @reg_off: offset within the controller's data buffer
-+ * @vaddr: virtual address of the buffer we want to write to
-+ * @size: adm dma transaction size in bytes
-+ * @flow_control: flow controller
-+ *
-+ * This function will prepare descriptor for adma
-+ */
-+int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
-+			   int reg_off, const void *vaddr, int size,
-+			   bool flow_control)
-+{
-+	struct qcom_adm_peripheral_config periph_conf = {};
-+	struct dma_async_tx_descriptor *dma_desc;
-+	struct dma_slave_config slave_conf = {0};
-+	enum dma_transfer_direction dir_eng;
-+	struct desc_info *desc;
-+	struct scatterlist *sgl;
-+	int ret;
-+
-+	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
-+	if (!desc)
-+		return -ENOMEM;
-+
-+	sgl = &desc->adm_sgl;
-+
-+	sg_init_one(sgl, vaddr, size);
-+
-+	if (read) {
-+		dir_eng = DMA_DEV_TO_MEM;
-+		desc->dir = DMA_FROM_DEVICE;
-+	} else {
-+		dir_eng = DMA_MEM_TO_DEV;
-+		desc->dir = DMA_TO_DEVICE;
-+	}
-+
-+	ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
-+	if (!ret) {
-+		ret = -ENOMEM;
-+		goto err;
-+	}
-+
-+	slave_conf.device_fc = flow_control;
-+	if (read) {
-+		slave_conf.src_maxburst = 16;
-+		slave_conf.src_addr = nandc->base_dma + reg_off;
-+		if (nandc->data_crci) {
-+			periph_conf.crci = nandc->data_crci;
-+			slave_conf.peripheral_config = &periph_conf;
-+			slave_conf.peripheral_size = sizeof(periph_conf);
-+		}
-+	} else {
-+		slave_conf.dst_maxburst = 16;
-+		slave_conf.dst_addr = nandc->base_dma + reg_off;
-+		if (nandc->cmd_crci) {
-+			periph_conf.crci = nandc->cmd_crci;
-+			slave_conf.peripheral_config = &periph_conf;
-+			slave_conf.peripheral_size = sizeof(periph_conf);
-+		}
-+	}
-+
-+	ret = dmaengine_slave_config(nandc->chan, &slave_conf);
-+	if (ret) {
-+		dev_err(nandc->dev, "failed to configure dma channel\n");
-+		goto err;
-+	}
-+
-+	dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
-+	if (!dma_desc) {
-+		dev_err(nandc->dev, "failed to prepare desc\n");
-+		ret = -EINVAL;
-+		goto err;
-+	}
-+
-+	desc->dma_desc = dma_desc;
-+
-+	list_add_tail(&desc->node, &nandc->desc_list);
-+
-+	return 0;
-+err:
-+	kfree(desc);
-+
-+	return ret;
-+}
-+EXPORT_SYMBOL(qcom_prep_adm_dma_desc);
-+
-+/**
-+ * qcom_read_reg_dma() - read a given number of registers to the reg_read_buf pointer
-+ * @nandc: qpic nand controller
-+ * @first: offset of the first register in the contiguous block
-+ * @num_regs: number of registers to read
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares a descriptor to read a given number of
-+ * contiguous registers to the reg_read_buf pointer.
-+ */
-+int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
-+		      int num_regs, unsigned int flags)
-+{
-+	bool flow_control = false;
-+	void *vaddr;
-+
-+	vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
-+	nandc->reg_read_pos += num_regs;
-+
-+	if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
-+		first = dev_cmd_reg_addr(nandc, first);
-+
-+	if (nandc->props->supports_bam)
-+		return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
-+					     num_regs, flags);
-+
-+	if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
-+		flow_control = true;
-+
-+	return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
-+				      num_regs * sizeof(u32), flow_control);
-+}
-+EXPORT_SYMBOL(qcom_read_reg_dma);
-+
-+/**
-+ * qcom_write_reg_dma() - write a given number of registers
-+ * @nandc: qpic nand controller
-+ * @vaddr: contiguous memory from where register value will
-+ *	   be written
-+ * @first: offset of the first register in the contiguous block
-+ * @num_regs: number of registers to write
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares a descriptor to write a given number of
-+ * contiguous registers
-+ */
-+int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
-+		       int first, int num_regs, unsigned int flags)
-+{
-+	bool flow_control = false;
-+
-+	if (first == NAND_EXEC_CMD)
-+		flags |= NAND_BAM_NWD;
-+
-+	if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
-+		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
-+
-+	if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
-+		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
-+
-+	if (nandc->props->supports_bam)
-+		return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
-+						  num_regs, flags);
-+
-+	if (first == NAND_FLASH_CMD)
-+		flow_control = true;
-+
-+	return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
-+				      num_regs * sizeof(u32), flow_control);
-+}
-+EXPORT_SYMBOL(qcom_write_reg_dma);
-+
-+/**
-+ * qcom_read_data_dma() - transfer data
-+ * @nandc: qpic nand controller
-+ * @reg_off: offset within the controller's data buffer
-+ * @vaddr: virtual address of the buffer we want to write to
-+ * @size: DMA transaction size in bytes
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares a DMA descriptor to transfer data from the
-+ * controller's internal buffer to the buffer 'vaddr'
-+ */
-+int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-+		       const u8 *vaddr, int size, unsigned int flags)
-+{
-+	if (nandc->props->supports_bam)
-+		return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
-+
-+	return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
-+}
-+EXPORT_SYMBOL(qcom_read_data_dma);
-+
-+/**
-+ * qcom_write_data_dma() - transfer data
-+ * @nandc: qpic nand controller
-+ * @reg_off: offset within the controller's data buffer
-+ * @vaddr: virtual address of the buffer we want to read from
-+ * @size: DMA transaction size in bytes
-+ * @flags: flags to control DMA descriptor preparation
-+ *
-+ * This function will prepares a DMA descriptor to transfer data from
-+ * 'vaddr' to the controller's internal buffer
-+ */
-+int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-+			const u8 *vaddr, int size, unsigned int flags)
-+{
-+	if (nandc->props->supports_bam)
-+		return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
-+
-+	return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
-+}
-+EXPORT_SYMBOL(qcom_write_data_dma);
-+
-+/**
-+ * qcom_submit_descs() - submit dma descriptor
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will submit all the prepared dma descriptor
-+ * cmd or data descriptor
-+ */
-+int qcom_submit_descs(struct qcom_nand_controller *nandc)
-+{
-+	struct desc_info *desc, *n;
-+	dma_cookie_t cookie = 0;
-+	struct bam_transaction *bam_txn = nandc->bam_txn;
-+	int ret = 0;
-+
-+	if (nandc->props->supports_bam) {
-+		if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
-+			ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
-+			if (ret)
-+				goto err_unmap_free_desc;
-+		}
-+
-+		if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
-+			ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
-+							  DMA_PREP_INTERRUPT);
-+			if (ret)
-+				goto err_unmap_free_desc;
-+		}
-+
-+		if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
-+			ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
-+							  DMA_PREP_CMD);
-+			if (ret)
-+				goto err_unmap_free_desc;
-+		}
-+	}
-+
-+	list_for_each_entry(desc, &nandc->desc_list, node)
-+		cookie = dmaengine_submit(desc->dma_desc);
-+
-+	if (nandc->props->supports_bam) {
-+		bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
-+		bam_txn->last_cmd_desc->callback_param = bam_txn;
-+
-+		dma_async_issue_pending(nandc->tx_chan);
-+		dma_async_issue_pending(nandc->rx_chan);
-+		dma_async_issue_pending(nandc->cmd_chan);
-+
-+		if (!wait_for_completion_timeout(&bam_txn->txn_done,
-+						 QPIC_NAND_COMPLETION_TIMEOUT))
-+			ret = -ETIMEDOUT;
-+	} else {
-+		if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
-+			ret = -ETIMEDOUT;
-+	}
-+
-+err_unmap_free_desc:
-+	/*
-+	 * Unmap the dma sg_list and free the desc allocated by both
-+	 * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
-+	 */
-+	list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
-+		list_del(&desc->node);
-+
-+		if (nandc->props->supports_bam)
-+			dma_unmap_sg(nandc->dev, desc->bam_sgl,
-+				     desc->sgl_cnt, desc->dir);
-+		else
-+			dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
-+				     desc->dir);
-+
-+		kfree(desc);
-+	}
-+
-+	return ret;
-+}
-+EXPORT_SYMBOL(qcom_submit_descs);
-+
-+/**
-+ * qcom_clear_read_regs() - reset the read register buffer
-+ * @nandc: qpic nand controller
-+ *
-+ * This function reset the register read buffer for next NAND operation
-+ */
-+void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
-+{
-+	nandc->reg_read_pos = 0;
-+	qcom_nandc_dev_to_mem(nandc, false);
-+}
-+EXPORT_SYMBOL(qcom_clear_read_regs);
-+
-+/**
-+ * qcom_nandc_unalloc() - unallocate qpic nand controller
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will unallocate memory alloacted for qpic nand controller
-+ */
-+void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
-+{
-+	if (nandc->props->supports_bam) {
-+		if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
-+			dma_unmap_single(nandc->dev, nandc->reg_read_dma,
-+					 MAX_REG_RD *
-+					 sizeof(*nandc->reg_read_buf),
-+					 DMA_FROM_DEVICE);
-+
-+		if (nandc->tx_chan)
-+			dma_release_channel(nandc->tx_chan);
-+
-+		if (nandc->rx_chan)
-+			dma_release_channel(nandc->rx_chan);
-+
-+		if (nandc->cmd_chan)
-+			dma_release_channel(nandc->cmd_chan);
-+	} else {
-+		if (nandc->chan)
-+			dma_release_channel(nandc->chan);
-+	}
-+}
-+EXPORT_SYMBOL(qcom_nandc_unalloc);
-+
-+/**
-+ * qcom_nandc_alloc() - Allocate qpic nand controller
-+ * @nandc: qpic nand controller
-+ *
-+ * This function will allocate memory for qpic nand controller
-+ */
-+int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
-+{
-+	int ret;
-+
-+	ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
-+	if (ret) {
-+		dev_err(nandc->dev, "failed to set DMA mask\n");
-+		return ret;
-+	}
-+
-+	/*
-+	 * we use the internal buffer for reading ONFI params, reading small
-+	 * data like ID and status, and preforming read-copy-write operations
-+	 * when writing to a codeword partially. 532 is the maximum possible
-+	 * size of a codeword for our nand controller
-+	 */
-+	nandc->buf_size = 532;
-+
-+	nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
-+	if (!nandc->data_buffer)
-+		return -ENOMEM;
-+
-+	nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
-+	if (!nandc->regs)
-+		return -ENOMEM;
-+
-+	nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
-+					   sizeof(*nandc->reg_read_buf),
-+					   GFP_KERNEL);
-+	if (!nandc->reg_read_buf)
-+		return -ENOMEM;
-+
-+	if (nandc->props->supports_bam) {
-+		nandc->reg_read_dma =
-+			dma_map_single(nandc->dev, nandc->reg_read_buf,
-+				       MAX_REG_RD *
-+				       sizeof(*nandc->reg_read_buf),
-+				       DMA_FROM_DEVICE);
-+		if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
-+			dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
-+			return -EIO;
-+		}
-+
-+		nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
-+		if (IS_ERR(nandc->tx_chan)) {
-+			ret = PTR_ERR(nandc->tx_chan);
-+			nandc->tx_chan = NULL;
-+			dev_err_probe(nandc->dev, ret,
-+				      "tx DMA channel request failed\n");
-+			goto unalloc;
-+		}
-+
-+		nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
-+		if (IS_ERR(nandc->rx_chan)) {
-+			ret = PTR_ERR(nandc->rx_chan);
-+			nandc->rx_chan = NULL;
-+			dev_err_probe(nandc->dev, ret,
-+				      "rx DMA channel request failed\n");
-+			goto unalloc;
-+		}
-+
-+		nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
-+		if (IS_ERR(nandc->cmd_chan)) {
-+			ret = PTR_ERR(nandc->cmd_chan);
-+			nandc->cmd_chan = NULL;
-+			dev_err_probe(nandc->dev, ret,
-+				      "cmd DMA channel request failed\n");
-+			goto unalloc;
-+		}
-+
-+		/*
-+		 * Initially allocate BAM transaction to read ONFI param page.
-+		 * After detecting all the devices, this BAM transaction will
-+		 * be freed and the next BAM transaction will be allocated with
-+		 * maximum codeword size
-+		 */
-+		nandc->max_cwperpage = 1;
-+		nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
-+		if (!nandc->bam_txn) {
-+			dev_err(nandc->dev,
-+				"failed to allocate bam transaction\n");
-+			ret = -ENOMEM;
-+			goto unalloc;
-+		}
-+	} else {
-+		nandc->chan = dma_request_chan(nandc->dev, "rxtx");
-+		if (IS_ERR(nandc->chan)) {
-+			ret = PTR_ERR(nandc->chan);
-+			nandc->chan = NULL;
-+			dev_err_probe(nandc->dev, ret,
-+				      "rxtx DMA channel request failed\n");
-+			return ret;
-+		}
-+	}
-+
-+	INIT_LIST_HEAD(&nandc->desc_list);
-+	INIT_LIST_HEAD(&nandc->host_list);
-+
-+	return 0;
-+unalloc:
-+	qcom_nandc_unalloc(nandc);
-+	return ret;
-+}
-+EXPORT_SYMBOL(qcom_nandc_alloc);
-+
-+MODULE_DESCRIPTION("QPIC controller common api");
-+MODULE_LICENSE("GPL");
---- a/drivers/mtd/nand/raw/qcom_nandc.c
-+++ b/drivers/mtd/nand/raw/qcom_nandc.c
-@@ -15,417 +15,7 @@
- #include <linux/of.h>
- #include <linux/platform_device.h>
- #include <linux/slab.h>
--
--/* NANDc reg offsets */
--#define	NAND_FLASH_CMD			0x00
--#define	NAND_ADDR0			0x04
--#define	NAND_ADDR1			0x08
--#define	NAND_FLASH_CHIP_SELECT		0x0c
--#define	NAND_EXEC_CMD			0x10
--#define	NAND_FLASH_STATUS		0x14
--#define	NAND_BUFFER_STATUS		0x18
--#define	NAND_DEV0_CFG0			0x20
--#define	NAND_DEV0_CFG1			0x24
--#define	NAND_DEV0_ECC_CFG		0x28
--#define	NAND_AUTO_STATUS_EN		0x2c
--#define	NAND_DEV1_CFG0			0x30
--#define	NAND_DEV1_CFG1			0x34
--#define	NAND_READ_ID			0x40
--#define	NAND_READ_STATUS		0x44
--#define	NAND_DEV_CMD0			0xa0
--#define	NAND_DEV_CMD1			0xa4
--#define	NAND_DEV_CMD2			0xa8
--#define	NAND_DEV_CMD_VLD		0xac
--#define	SFLASHC_BURST_CFG		0xe0
--#define	NAND_ERASED_CW_DETECT_CFG	0xe8
--#define	NAND_ERASED_CW_DETECT_STATUS	0xec
--#define	NAND_EBI2_ECC_BUF_CFG		0xf0
--#define	FLASH_BUF_ACC			0x100
--
--#define	NAND_CTRL			0xf00
--#define	NAND_VERSION			0xf08
--#define	NAND_READ_LOCATION_0		0xf20
--#define	NAND_READ_LOCATION_1		0xf24
--#define	NAND_READ_LOCATION_2		0xf28
--#define	NAND_READ_LOCATION_3		0xf2c
--#define	NAND_READ_LOCATION_LAST_CW_0	0xf40
--#define	NAND_READ_LOCATION_LAST_CW_1	0xf44
--#define	NAND_READ_LOCATION_LAST_CW_2	0xf48
--#define	NAND_READ_LOCATION_LAST_CW_3	0xf4c
--
--/* dummy register offsets, used by qcom_write_reg_dma */
--#define	NAND_DEV_CMD1_RESTORE		0xdead
--#define	NAND_DEV_CMD_VLD_RESTORE	0xbeef
--
--/* NAND_FLASH_CMD bits */
--#define	PAGE_ACC			BIT(4)
--#define	LAST_PAGE			BIT(5)
--
--/* NAND_FLASH_CHIP_SELECT bits */
--#define	NAND_DEV_SEL			0
--#define	DM_EN				BIT(2)
--
--/* NAND_FLASH_STATUS bits */
--#define	FS_OP_ERR			BIT(4)
--#define	FS_READY_BSY_N			BIT(5)
--#define	FS_MPU_ERR			BIT(8)
--#define	FS_DEVICE_STS_ERR		BIT(16)
--#define	FS_DEVICE_WP			BIT(23)
--
--/* NAND_BUFFER_STATUS bits */
--#define	BS_UNCORRECTABLE_BIT		BIT(8)
--#define	BS_CORRECTABLE_ERR_MSK		0x1f
--
--/* NAND_DEVn_CFG0 bits */
--#define	DISABLE_STATUS_AFTER_WRITE	4
--#define	CW_PER_PAGE			6
--#define	UD_SIZE_BYTES			9
--#define	UD_SIZE_BYTES_MASK		GENMASK(18, 9)
--#define	ECC_PARITY_SIZE_BYTES_RS	19
--#define	SPARE_SIZE_BYTES		23
--#define	SPARE_SIZE_BYTES_MASK		GENMASK(26, 23)
--#define	NUM_ADDR_CYCLES			27
--#define	STATUS_BFR_READ			30
--#define	SET_RD_MODE_AFTER_STATUS	31
--
--/* NAND_DEVn_CFG0 bits */
--#define	DEV0_CFG1_ECC_DISABLE		0
--#define	WIDE_FLASH			1
--#define	NAND_RECOVERY_CYCLES		2
--#define	CS_ACTIVE_BSY			5
--#define	BAD_BLOCK_BYTE_NUM		6
--#define	BAD_BLOCK_IN_SPARE_AREA		16
--#define	WR_RD_BSY_GAP			17
--#define	ENABLE_BCH_ECC			27
--
--/* NAND_DEV0_ECC_CFG bits */
--#define	ECC_CFG_ECC_DISABLE		0
--#define	ECC_SW_RESET			1
--#define	ECC_MODE			4
--#define	ECC_PARITY_SIZE_BYTES_BCH	8
--#define	ECC_NUM_DATA_BYTES		16
--#define	ECC_NUM_DATA_BYTES_MASK		GENMASK(25, 16)
--#define	ECC_FORCE_CLK_OPEN		30
--
--/* NAND_DEV_CMD1 bits */
--#define	READ_ADDR			0
--
--/* NAND_DEV_CMD_VLD bits */
--#define	READ_START_VLD			BIT(0)
--#define	READ_STOP_VLD			BIT(1)
--#define	WRITE_START_VLD			BIT(2)
--#define	ERASE_START_VLD			BIT(3)
--#define	SEQ_READ_START_VLD		BIT(4)
--
--/* NAND_EBI2_ECC_BUF_CFG bits */
--#define	NUM_STEPS			0
--
--/* NAND_ERASED_CW_DETECT_CFG bits */
--#define	ERASED_CW_ECC_MASK		1
--#define	AUTO_DETECT_RES			0
--#define	MASK_ECC			BIT(ERASED_CW_ECC_MASK)
--#define	RESET_ERASED_DET		BIT(AUTO_DETECT_RES)
--#define	ACTIVE_ERASED_DET		(0 << AUTO_DETECT_RES)
--#define	CLR_ERASED_PAGE_DET		(RESET_ERASED_DET | MASK_ECC)
--#define	SET_ERASED_PAGE_DET		(ACTIVE_ERASED_DET | MASK_ECC)
--
--/* NAND_ERASED_CW_DETECT_STATUS bits */
--#define	PAGE_ALL_ERASED			BIT(7)
--#define	CODEWORD_ALL_ERASED		BIT(6)
--#define	PAGE_ERASED			BIT(5)
--#define	CODEWORD_ERASED			BIT(4)
--#define	ERASED_PAGE			(PAGE_ALL_ERASED | PAGE_ERASED)
--#define	ERASED_CW			(CODEWORD_ALL_ERASED | CODEWORD_ERASED)
--
--/* NAND_READ_LOCATION_n bits */
--#define READ_LOCATION_OFFSET		0
--#define READ_LOCATION_SIZE		16
--#define READ_LOCATION_LAST		31
--
--/* Version Mask */
--#define	NAND_VERSION_MAJOR_MASK		0xf0000000
--#define	NAND_VERSION_MAJOR_SHIFT	28
--#define	NAND_VERSION_MINOR_MASK		0x0fff0000
--#define	NAND_VERSION_MINOR_SHIFT	16
--
--/* NAND OP_CMDs */
--#define	OP_PAGE_READ			0x2
--#define	OP_PAGE_READ_WITH_ECC		0x3
--#define	OP_PAGE_READ_WITH_ECC_SPARE	0x4
--#define	OP_PAGE_READ_ONFI_READ		0x5
--#define	OP_PROGRAM_PAGE			0x6
--#define	OP_PAGE_PROGRAM_WITH_ECC	0x7
--#define	OP_PROGRAM_PAGE_SPARE		0x9
--#define	OP_BLOCK_ERASE			0xa
--#define	OP_CHECK_STATUS			0xc
--#define	OP_FETCH_ID			0xb
--#define	OP_RESET_DEVICE			0xd
--
--/* Default Value for NAND_DEV_CMD_VLD */
--#define NAND_DEV_CMD_VLD_VAL		(READ_START_VLD | WRITE_START_VLD | \
--					 ERASE_START_VLD | SEQ_READ_START_VLD)
--
--/* NAND_CTRL bits */
--#define	BAM_MODE_EN			BIT(0)
--
--/*
-- * the NAND controller performs reads/writes with ECC in 516 byte chunks.
-- * the driver calls the chunks 'step' or 'codeword' interchangeably
-- */
--#define	NANDC_STEP_SIZE			512
--
--/*
-- * the largest page size we support is 8K, this will have 16 steps/codewords
-- * of 512 bytes each
-- */
--#define	MAX_NUM_STEPS			(SZ_8K / NANDC_STEP_SIZE)
--
--/* we read at most 3 registers per codeword scan */
--#define	MAX_REG_RD			(3 * MAX_NUM_STEPS)
--
--/* ECC modes supported by the controller */
--#define	ECC_NONE	BIT(0)
--#define	ECC_RS_4BIT	BIT(1)
--#define	ECC_BCH_4BIT	BIT(2)
--#define	ECC_BCH_8BIT	BIT(3)
--
--/*
-- * Returns the actual register address for all NAND_DEV_ registers
-- * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
-- */
--#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
--
--/* Returns the NAND register physical address */
--#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
--
--/* Returns the dma address for reg read buffer */
--#define reg_buf_dma_addr(chip, vaddr) \
--	((chip)->reg_read_dma + \
--	((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
--
--#define QPIC_PER_CW_CMD_ELEMENTS	32
--#define QPIC_PER_CW_CMD_SGL		32
--#define QPIC_PER_CW_DATA_SGL		8
--
--#define QPIC_NAND_COMPLETION_TIMEOUT	msecs_to_jiffies(2000)
--
--/*
-- * Flags used in DMA descriptor preparation helper functions
-- * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
-- */
--/* Don't set the EOT in current tx BAM sgl */
--#define NAND_BAM_NO_EOT			BIT(0)
--/* Set the NWD flag in current BAM sgl */
--#define NAND_BAM_NWD			BIT(1)
--/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
--#define NAND_BAM_NEXT_SGL		BIT(2)
--/*
-- * Erased codeword status is being used two times in single transfer so this
-- * flag will determine the current value of erased codeword status register
-- */
--#define NAND_ERASED_CW_SET		BIT(4)
--
--#define MAX_ADDRESS_CYCLE		5
--
--/*
-- * This data type corresponds to the BAM transaction which will be used for all
-- * NAND transfers.
-- * @bam_ce - the array of BAM command elements
-- * @cmd_sgl - sgl for NAND BAM command pipe
-- * @data_sgl - sgl for NAND BAM consumer/producer pipe
-- * @last_data_desc - last DMA desc in data channel (tx/rx).
-- * @last_cmd_desc - last DMA desc in command channel.
-- * @txn_done - completion for NAND transfer.
-- * @bam_ce_pos - the index in bam_ce which is available for next sgl
-- * @bam_ce_start - the index in bam_ce which marks the start position ce
-- *		   for current sgl. It will be used for size calculation
-- *		   for current sgl
-- * @cmd_sgl_pos - current index in command sgl.
-- * @cmd_sgl_start - start index in command sgl.
-- * @tx_sgl_pos - current index in data sgl for tx.
-- * @tx_sgl_start - start index in data sgl for tx.
-- * @rx_sgl_pos - current index in data sgl for rx.
-- * @rx_sgl_start - start index in data sgl for rx.
-- */
--struct bam_transaction {
--	struct bam_cmd_element *bam_ce;
--	struct scatterlist *cmd_sgl;
--	struct scatterlist *data_sgl;
--	struct dma_async_tx_descriptor *last_data_desc;
--	struct dma_async_tx_descriptor *last_cmd_desc;
--	struct completion txn_done;
--	u32 bam_ce_pos;
--	u32 bam_ce_start;
--	u32 cmd_sgl_pos;
--	u32 cmd_sgl_start;
--	u32 tx_sgl_pos;
--	u32 tx_sgl_start;
--	u32 rx_sgl_pos;
--	u32 rx_sgl_start;
--};
--
--/*
-- * This data type corresponds to the nand dma descriptor
-- * @dma_desc - low level DMA engine descriptor
-- * @list - list for desc_info
-- *
-- * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
-- *	      ADM
-- * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
-- * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
-- * @dir - DMA transfer direction
-- */
--struct desc_info {
--	struct dma_async_tx_descriptor *dma_desc;
--	struct list_head node;
--
--	union {
--		struct scatterlist adm_sgl;
--		struct {
--			struct scatterlist *bam_sgl;
--			int sgl_cnt;
--		};
--	};
--	enum dma_data_direction dir;
--};
--
--/*
-- * holds the current register values that we want to write. acts as a contiguous
-- * chunk of memory which we use to write the controller registers through DMA.
-- */
--struct nandc_regs {
--	__le32 cmd;
--	__le32 addr0;
--	__le32 addr1;
--	__le32 chip_sel;
--	__le32 exec;
--
--	__le32 cfg0;
--	__le32 cfg1;
--	__le32 ecc_bch_cfg;
--
--	__le32 clrflashstatus;
--	__le32 clrreadstatus;
--
--	__le32 cmd1;
--	__le32 vld;
--
--	__le32 orig_cmd1;
--	__le32 orig_vld;
--
--	__le32 ecc_buf_cfg;
--	__le32 read_location0;
--	__le32 read_location1;
--	__le32 read_location2;
--	__le32 read_location3;
--	__le32 read_location_last0;
--	__le32 read_location_last1;
--	__le32 read_location_last2;
--	__le32 read_location_last3;
--
--	__le32 erased_cw_detect_cfg_clr;
--	__le32 erased_cw_detect_cfg_set;
--};
--
--/*
-- * NAND controller data struct
-- *
-- * @dev:			parent device
-- *
-- * @base:			MMIO base
-- *
-- * @core_clk:			controller clock
-- * @aon_clk:			another controller clock
-- *
-- * @regs:			a contiguous chunk of memory for DMA register
-- *				writes. contains the register values to be
-- *				written to controller
-- *
-- * @props:			properties of current NAND controller,
-- *				initialized via DT match data
-- *
-- * @controller:			base controller structure
-- * @host_list:			list containing all the chips attached to the
-- *				controller
-- *
-- * @chan:			dma channel
-- * @cmd_crci:			ADM DMA CRCI for command flow control
-- * @data_crci:			ADM DMA CRCI for data flow control
-- *
-- * @desc_list:			DMA descriptor list (list of desc_infos)
-- *
-- * @data_buffer:		our local DMA buffer for page read/writes,
-- *				used when we can't use the buffer provided
-- *				by upper layers directly
-- * @reg_read_buf:		local buffer for reading back registers via DMA
-- *
-- * @base_phys:			physical base address of controller registers
-- * @base_dma:			dma base address of controller registers
-- * @reg_read_dma:		contains dma address for register read buffer
-- *
-- * @buf_size/count/start:	markers for chip->legacy.read_buf/write_buf
-- *				functions
-- * @max_cwperpage:		maximum QPIC codewords required. calculated
-- *				from all connected NAND devices pagesize
-- *
-- * @reg_read_pos:		marker for data read in reg_read_buf
-- *
-- * @cmd1/vld:			some fixed controller register values
-- *
-- * @exec_opwrite:		flag to select correct number of code word
-- *				while reading status
-- */
--struct qcom_nand_controller {
--	struct device *dev;
--
--	void __iomem *base;
--
--	struct clk *core_clk;
--	struct clk *aon_clk;
--
--	struct nandc_regs *regs;
--	struct bam_transaction *bam_txn;
--
--	const struct qcom_nandc_props *props;
--
--	struct nand_controller controller;
--	struct list_head host_list;
--
--	union {
--		/* will be used only by QPIC for BAM DMA */
--		struct {
--			struct dma_chan *tx_chan;
--			struct dma_chan *rx_chan;
--			struct dma_chan *cmd_chan;
--		};
--
--		/* will be used only by EBI2 for ADM DMA */
--		struct {
--			struct dma_chan *chan;
--			unsigned int cmd_crci;
--			unsigned int data_crci;
--		};
--	};
--
--	struct list_head desc_list;
--
--	u8		*data_buffer;
--	__le32		*reg_read_buf;
--
--	phys_addr_t base_phys;
--	dma_addr_t base_dma;
--	dma_addr_t reg_read_dma;
--
--	int		buf_size;
--	int		buf_count;
--	int		buf_start;
--	unsigned int	max_cwperpage;
--
--	int reg_read_pos;
--
--	u32 cmd1, vld;
--	bool exec_opwrite;
--};
-+#include <linux/mtd/nand-qpic-common.h>
- 
- /*
-  * NAND special boot partitions
-@@ -530,97 +120,6 @@ struct qcom_nand_host {
- 	bool bch_enabled;
- };
- 
--/*
-- * This data type corresponds to the NAND controller properties which varies
-- * among different NAND controllers.
-- * @ecc_modes - ecc mode for NAND
-- * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
-- * @supports_bam - whether NAND controller is using Bus Access Manager (BAM)
-- * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
-- * @qpic_version2 - flag to indicate QPIC IP version 2
-- * @use_codeword_fixup - whether NAND has different layout for boot partitions
-- */
--struct qcom_nandc_props {
--	u32 ecc_modes;
--	u32 dev_cmd_reg_start;
--	bool supports_bam;
--	bool nandc_part_of_qpic;
--	bool qpic_version2;
--	bool use_codeword_fixup;
--};
--
--/* Frees the BAM transaction memory */
--static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
--{
--	struct bam_transaction *bam_txn = nandc->bam_txn;
--
--	devm_kfree(nandc->dev, bam_txn);
--}
--
--/* Allocates and Initializes the BAM transaction */
--static struct bam_transaction *
--qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
--{
--	struct bam_transaction *bam_txn;
--	size_t bam_txn_size;
--	unsigned int num_cw = nandc->max_cwperpage;
--	void *bam_txn_buf;
--
--	bam_txn_size =
--		sizeof(*bam_txn) + num_cw *
--		((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
--		(sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
--		(sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
--
--	bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
--	if (!bam_txn_buf)
--		return NULL;
--
--	bam_txn = bam_txn_buf;
--	bam_txn_buf += sizeof(*bam_txn);
--
--	bam_txn->bam_ce = bam_txn_buf;
--	bam_txn_buf +=
--		sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
--
--	bam_txn->cmd_sgl = bam_txn_buf;
--	bam_txn_buf +=
--		sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
--
--	bam_txn->data_sgl = bam_txn_buf;
--
--	init_completion(&bam_txn->txn_done);
--
--	return bam_txn;
--}
--
--/* Clears the BAM transaction indexes */
--static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
--{
--	struct bam_transaction *bam_txn = nandc->bam_txn;
--
--	if (!nandc->props->supports_bam)
--		return;
--
--	memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
--	bam_txn->last_data_desc = NULL;
--
--	sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
--		      QPIC_PER_CW_CMD_SGL);
--	sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
--		      QPIC_PER_CW_DATA_SGL);
--
--	reinit_completion(&bam_txn->txn_done);
--}
--
--/* Callback for DMA descriptor completion */
--static void qcom_qpic_bam_dma_done(void *data)
--{
--	struct bam_transaction *bam_txn = data;
--
--	complete(&bam_txn->txn_done);
--}
--
- static struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
- {
- 	return container_of(chip, struct qcom_nand_host, chip);
-@@ -629,8 +128,8 @@ static struct qcom_nand_host *to_qcom_na
- static struct qcom_nand_controller *
- get_qcom_nand_controller(struct nand_chip *chip)
- {
--	return container_of(chip->controller, struct qcom_nand_controller,
--			    controller);
-+	return (struct qcom_nand_controller *)
-+		((u8 *)chip->controller - sizeof(struct qcom_nand_controller));
- }
- 
- static u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
-@@ -644,23 +143,6 @@ static void nandc_write(struct qcom_nand
- 	iowrite32(val, nandc->base + offset);
- }
- 
--static void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
--{
--	if (!nandc->props->supports_bam)
--		return;
--
--	if (is_cpu)
--		dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
--					MAX_REG_RD *
--					sizeof(*nandc->reg_read_buf),
--					DMA_FROM_DEVICE);
--	else
--		dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
--					   MAX_REG_RD *
--					   sizeof(*nandc->reg_read_buf),
--					   DMA_FROM_DEVICE);
--}
--
- /* Helper to check whether this is the last CW or not */
- static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
- {
-@@ -820,356 +302,6 @@ static void update_rw_regs(struct qcom_n
- }
- 
- /*
-- * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
-- * for BAM. This descriptor will be added in the NAND DMA descriptor queue
-- * which will be submitted to DMA engine.
-- */
--static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
--				       struct dma_chan *chan,
--				       unsigned long flags)
--{
--	struct desc_info *desc;
--	struct scatterlist *sgl;
--	unsigned int sgl_cnt;
--	int ret;
--	struct bam_transaction *bam_txn = nandc->bam_txn;
--	enum dma_transfer_direction dir_eng;
--	struct dma_async_tx_descriptor *dma_desc;
--
--	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
--	if (!desc)
--		return -ENOMEM;
--
--	if (chan == nandc->cmd_chan) {
--		sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
--		sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
--		bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
--		dir_eng = DMA_MEM_TO_DEV;
--		desc->dir = DMA_TO_DEVICE;
--	} else if (chan == nandc->tx_chan) {
--		sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
--		sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
--		bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
--		dir_eng = DMA_MEM_TO_DEV;
--		desc->dir = DMA_TO_DEVICE;
--	} else {
--		sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
--		sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
--		bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
--		dir_eng = DMA_DEV_TO_MEM;
--		desc->dir = DMA_FROM_DEVICE;
--	}
--
--	sg_mark_end(sgl + sgl_cnt - 1);
--	ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
--	if (ret == 0) {
--		dev_err(nandc->dev, "failure in mapping desc\n");
--		kfree(desc);
--		return -ENOMEM;
--	}
--
--	desc->sgl_cnt = sgl_cnt;
--	desc->bam_sgl = sgl;
--
--	dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
--					   flags);
--
--	if (!dma_desc) {
--		dev_err(nandc->dev, "failure in prep desc\n");
--		dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
--		kfree(desc);
--		return -EINVAL;
--	}
--
--	desc->dma_desc = dma_desc;
--
--	/* update last data/command descriptor */
--	if (chan == nandc->cmd_chan)
--		bam_txn->last_cmd_desc = dma_desc;
--	else
--		bam_txn->last_data_desc = dma_desc;
--
--	list_add_tail(&desc->node, &nandc->desc_list);
--
--	return 0;
--}
--
--/*
-- * Prepares the command descriptor for BAM DMA which will be used for NAND
-- * register reads and writes. The command descriptor requires the command
-- * to be formed in command element type so this function uses the command
-- * element from bam transaction ce array and fills the same with required
-- * data. A single SGL can contain multiple command elements so
-- * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
-- * after the current command element.
-- */
--static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
--				      int reg_off, const void *vaddr,
--				      int size, unsigned int flags)
--{
--	int bam_ce_size;
--	int i, ret;
--	struct bam_cmd_element *bam_ce_buffer;
--	struct bam_transaction *bam_txn = nandc->bam_txn;
--
--	bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
--
--	/* fill the command desc */
--	for (i = 0; i < size; i++) {
--		if (read)
--			bam_prep_ce(&bam_ce_buffer[i],
--				    nandc_reg_phys(nandc, reg_off + 4 * i),
--				    BAM_READ_COMMAND,
--				    reg_buf_dma_addr(nandc,
--						     (__le32 *)vaddr + i));
--		else
--			bam_prep_ce_le32(&bam_ce_buffer[i],
--					 nandc_reg_phys(nandc, reg_off + 4 * i),
--					 BAM_WRITE_COMMAND,
--					 *((__le32 *)vaddr + i));
--	}
--
--	bam_txn->bam_ce_pos += size;
--
--	/* use the separate sgl after this command */
--	if (flags & NAND_BAM_NEXT_SGL) {
--		bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
--		bam_ce_size = (bam_txn->bam_ce_pos -
--				bam_txn->bam_ce_start) *
--				sizeof(struct bam_cmd_element);
--		sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
--			   bam_ce_buffer, bam_ce_size);
--		bam_txn->cmd_sgl_pos++;
--		bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
--
--		if (flags & NAND_BAM_NWD) {
--			ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
--							  DMA_PREP_FENCE |
--							  DMA_PREP_CMD);
--			if (ret)
--				return ret;
--		}
--	}
--
--	return 0;
--}
--
--/*
-- * Prepares the data descriptor for BAM DMA which will be used for NAND
-- * data reads and writes.
-- */
--static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
--				       const void *vaddr, int size, unsigned int flags)
--{
--	int ret;
--	struct bam_transaction *bam_txn = nandc->bam_txn;
--
--	if (read) {
--		sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
--			   vaddr, size);
--		bam_txn->rx_sgl_pos++;
--	} else {
--		sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
--			   vaddr, size);
--		bam_txn->tx_sgl_pos++;
--
--		/*
--		 * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
--		 * is not set, form the DMA descriptor
--		 */
--		if (!(flags & NAND_BAM_NO_EOT)) {
--			ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
--							  DMA_PREP_INTERRUPT);
--			if (ret)
--				return ret;
--		}
--	}
--
--	return 0;
--}
--
--static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
--				  int reg_off, const void *vaddr, int size,
--				  bool flow_control)
--{
--	struct desc_info *desc;
--	struct dma_async_tx_descriptor *dma_desc;
--	struct scatterlist *sgl;
--	struct dma_slave_config slave_conf;
--	struct qcom_adm_peripheral_config periph_conf = {};
--	enum dma_transfer_direction dir_eng;
--	int ret;
--
--	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
--	if (!desc)
--		return -ENOMEM;
--
--	sgl = &desc->adm_sgl;
--
--	sg_init_one(sgl, vaddr, size);
--
--	if (read) {
--		dir_eng = DMA_DEV_TO_MEM;
--		desc->dir = DMA_FROM_DEVICE;
--	} else {
--		dir_eng = DMA_MEM_TO_DEV;
--		desc->dir = DMA_TO_DEVICE;
--	}
--
--	ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
--	if (ret == 0) {
--		ret = -ENOMEM;
--		goto err;
--	}
--
--	memset(&slave_conf, 0x00, sizeof(slave_conf));
--
--	slave_conf.device_fc = flow_control;
--	if (read) {
--		slave_conf.src_maxburst = 16;
--		slave_conf.src_addr = nandc->base_dma + reg_off;
--		if (nandc->data_crci) {
--			periph_conf.crci = nandc->data_crci;
--			slave_conf.peripheral_config = &periph_conf;
--			slave_conf.peripheral_size = sizeof(periph_conf);
--		}
--	} else {
--		slave_conf.dst_maxburst = 16;
--		slave_conf.dst_addr = nandc->base_dma + reg_off;
--		if (nandc->cmd_crci) {
--			periph_conf.crci = nandc->cmd_crci;
--			slave_conf.peripheral_config = &periph_conf;
--			slave_conf.peripheral_size = sizeof(periph_conf);
--		}
--	}
--
--	ret = dmaengine_slave_config(nandc->chan, &slave_conf);
--	if (ret) {
--		dev_err(nandc->dev, "failed to configure dma channel\n");
--		goto err;
--	}
--
--	dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
--	if (!dma_desc) {
--		dev_err(nandc->dev, "failed to prepare desc\n");
--		ret = -EINVAL;
--		goto err;
--	}
--
--	desc->dma_desc = dma_desc;
--
--	list_add_tail(&desc->node, &nandc->desc_list);
--
--	return 0;
--err:
--	kfree(desc);
--
--	return ret;
--}
--
--/*
-- * qcom_read_reg_dma:	prepares a descriptor to read a given number of
-- *			contiguous registers to the reg_read_buf pointer
-- *
-- * @first:		offset of the first register in the contiguous block
-- * @num_regs:		number of registers to read
-- * @flags:		flags to control DMA descriptor preparation
-- */
--static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
--			     int num_regs, unsigned int flags)
--{
--	bool flow_control = false;
--	void *vaddr;
--
--	vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
--	nandc->reg_read_pos += num_regs;
--
--	if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
--		first = dev_cmd_reg_addr(nandc, first);
--
--	if (nandc->props->supports_bam)
--		return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
--					     num_regs, flags);
--
--	if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
--		flow_control = true;
--
--	return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
--				 num_regs * sizeof(u32), flow_control);
--}
--
--/*
-- * qcom_write_reg_dma:	prepares a descriptor to write a given number of
-- *			contiguous registers
-- *
-- * @vaddr:		contiguous memory from where register value will
-- *			be written
-- * @first:		offset of the first register in the contiguous block
-- * @num_regs:		number of registers to write
-- * @flags:		flags to control DMA descriptor preparation
-- */
--static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
--			      int first, int num_regs, unsigned int flags)
--{
--	bool flow_control = false;
--
--	if (first == NAND_EXEC_CMD)
--		flags |= NAND_BAM_NWD;
--
--	if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
--		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
--
--	if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
--		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
--
--	if (nandc->props->supports_bam)
--		return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
--					     num_regs, flags);
--
--	if (first == NAND_FLASH_CMD)
--		flow_control = true;
--
--	return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
--				 num_regs * sizeof(u32), flow_control);
--}
--
--/*
-- * qcom_read_data_dma:	prepares a DMA descriptor to transfer data from the
-- *			controller's internal buffer to the buffer 'vaddr'
-- *
-- * @reg_off:		offset within the controller's data buffer
-- * @vaddr:		virtual address of the buffer we want to write to
-- * @size:		DMA transaction size in bytes
-- * @flags:		flags to control DMA descriptor preparation
-- */
--static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
--			      const u8 *vaddr, int size, unsigned int flags)
--{
--	if (nandc->props->supports_bam)
--		return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
--
--	return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
--}
--
--/*
-- * qcom_write_data_dma:	prepares a DMA descriptor to transfer data from
-- *			'vaddr' to the controller's internal buffer
-- *
-- * @reg_off:		offset within the controller's data buffer
-- * @vaddr:		virtual address of the buffer we want to read from
-- * @size:		DMA transaction size in bytes
-- * @flags:		flags to control DMA descriptor preparation
-- */
--static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
--			       const u8 *vaddr, int size, unsigned int flags)
--{
--	if (nandc->props->supports_bam)
--		return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
--
--	return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
--}
--
--/*
-  * Helper to prepare DMA descriptors for configuring registers
-  * before reading a NAND page.
-  */
-@@ -1262,83 +394,6 @@ static void config_nand_cw_write(struct
- 			   NAND_BAM_NEXT_SGL);
- }
- 
--/* helpers to submit/free our list of dma descriptors */
--static int qcom_submit_descs(struct qcom_nand_controller *nandc)
--{
--	struct desc_info *desc, *n;
--	dma_cookie_t cookie = 0;
--	struct bam_transaction *bam_txn = nandc->bam_txn;
--	int ret = 0;
--
--	if (nandc->props->supports_bam) {
--		if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
--			ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
--			if (ret)
--				goto err_unmap_free_desc;
--		}
--
--		if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
--			ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
--							  DMA_PREP_INTERRUPT);
--			if (ret)
--				goto err_unmap_free_desc;
--		}
--
--		if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
--			ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
--							  DMA_PREP_CMD);
--			if (ret)
--				goto err_unmap_free_desc;
--		}
--	}
--
--	list_for_each_entry(desc, &nandc->desc_list, node)
--		cookie = dmaengine_submit(desc->dma_desc);
--
--	if (nandc->props->supports_bam) {
--		bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
--		bam_txn->last_cmd_desc->callback_param = bam_txn;
--
--		dma_async_issue_pending(nandc->tx_chan);
--		dma_async_issue_pending(nandc->rx_chan);
--		dma_async_issue_pending(nandc->cmd_chan);
--
--		if (!wait_for_completion_timeout(&bam_txn->txn_done,
--						 QPIC_NAND_COMPLETION_TIMEOUT))
--			ret = -ETIMEDOUT;
--	} else {
--		if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
--			ret = -ETIMEDOUT;
--	}
--
--err_unmap_free_desc:
--	/*
--	 * Unmap the dma sg_list and free the desc allocated by both
--	 * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
--	 */
--	list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
--		list_del(&desc->node);
--
--		if (nandc->props->supports_bam)
--			dma_unmap_sg(nandc->dev, desc->bam_sgl,
--				     desc->sgl_cnt, desc->dir);
--		else
--			dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
--				     desc->dir);
--
--		kfree(desc);
--	}
--
--	return ret;
--}
--
--/* reset the register read buffer for next NAND operation */
--static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
--{
--	nandc->reg_read_pos = 0;
--	qcom_nandc_dev_to_mem(nandc, false);
--}
--
- /*
-  * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
-  * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
-@@ -2975,141 +2030,14 @@ static const struct nand_controller_ops
- 	.exec_op = qcom_nand_exec_op,
- };
- 
--static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
--{
--	if (nandc->props->supports_bam) {
--		if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
--			dma_unmap_single(nandc->dev, nandc->reg_read_dma,
--					 MAX_REG_RD *
--					 sizeof(*nandc->reg_read_buf),
--					 DMA_FROM_DEVICE);
--
--		if (nandc->tx_chan)
--			dma_release_channel(nandc->tx_chan);
--
--		if (nandc->rx_chan)
--			dma_release_channel(nandc->rx_chan);
--
--		if (nandc->cmd_chan)
--			dma_release_channel(nandc->cmd_chan);
--	} else {
--		if (nandc->chan)
--			dma_release_channel(nandc->chan);
--	}
--}
--
--static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
--{
--	int ret;
--
--	ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
--	if (ret) {
--		dev_err(nandc->dev, "failed to set DMA mask\n");
--		return ret;
--	}
--
--	/*
--	 * we use the internal buffer for reading ONFI params, reading small
--	 * data like ID and status, and preforming read-copy-write operations
--	 * when writing to a codeword partially. 532 is the maximum possible
--	 * size of a codeword for our nand controller
--	 */
--	nandc->buf_size = 532;
--
--	nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
--	if (!nandc->data_buffer)
--		return -ENOMEM;
--
--	nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
--	if (!nandc->regs)
--		return -ENOMEM;
--
--	nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
--					   sizeof(*nandc->reg_read_buf),
--					   GFP_KERNEL);
--	if (!nandc->reg_read_buf)
--		return -ENOMEM;
--
--	if (nandc->props->supports_bam) {
--		nandc->reg_read_dma =
--			dma_map_single(nandc->dev, nandc->reg_read_buf,
--				       MAX_REG_RD *
--				       sizeof(*nandc->reg_read_buf),
--				       DMA_FROM_DEVICE);
--		if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
--			dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
--			return -EIO;
--		}
--
--		nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
--		if (IS_ERR(nandc->tx_chan)) {
--			ret = PTR_ERR(nandc->tx_chan);
--			nandc->tx_chan = NULL;
--			dev_err_probe(nandc->dev, ret,
--				      "tx DMA channel request failed\n");
--			goto unalloc;
--		}
--
--		nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
--		if (IS_ERR(nandc->rx_chan)) {
--			ret = PTR_ERR(nandc->rx_chan);
--			nandc->rx_chan = NULL;
--			dev_err_probe(nandc->dev, ret,
--				      "rx DMA channel request failed\n");
--			goto unalloc;
--		}
--
--		nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
--		if (IS_ERR(nandc->cmd_chan)) {
--			ret = PTR_ERR(nandc->cmd_chan);
--			nandc->cmd_chan = NULL;
--			dev_err_probe(nandc->dev, ret,
--				      "cmd DMA channel request failed\n");
--			goto unalloc;
--		}
--
--		/*
--		 * Initially allocate BAM transaction to read ONFI param page.
--		 * After detecting all the devices, this BAM transaction will
--		 * be freed and the next BAM transaction will be allocated with
--		 * maximum codeword size
--		 */
--		nandc->max_cwperpage = 1;
--		nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
--		if (!nandc->bam_txn) {
--			dev_err(nandc->dev,
--				"failed to allocate bam transaction\n");
--			ret = -ENOMEM;
--			goto unalloc;
--		}
--	} else {
--		nandc->chan = dma_request_chan(nandc->dev, "rxtx");
--		if (IS_ERR(nandc->chan)) {
--			ret = PTR_ERR(nandc->chan);
--			nandc->chan = NULL;
--			dev_err_probe(nandc->dev, ret,
--				      "rxtx DMA channel request failed\n");
--			return ret;
--		}
--	}
--
--	INIT_LIST_HEAD(&nandc->desc_list);
--	INIT_LIST_HEAD(&nandc->host_list);
--
--	nand_controller_init(&nandc->controller);
--	nandc->controller.ops = &qcom_nandc_ops;
--
--	return 0;
--unalloc:
--	qcom_nandc_unalloc(nandc);
--	return ret;
--}
--
- /* one time setup of a few nand controller registers */
- static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
- {
- 	u32 nand_ctrl;
- 
-+	nand_controller_init(nandc->controller);
-+	nandc->controller->ops = &qcom_nandc_ops;
-+
- 	/* kill onenand */
- 	if (!nandc->props->nandc_part_of_qpic)
- 		nandc_write(nandc, SFLASHC_BURST_CFG, 0);
-@@ -3248,7 +2176,7 @@ static int qcom_nand_host_init_and_regis
- 	chip->legacy.block_bad		= qcom_nandc_block_bad;
- 	chip->legacy.block_markbad	= qcom_nandc_block_markbad;
- 
--	chip->controller = &nandc->controller;
-+	chip->controller = nandc->controller;
- 	chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
- 			 NAND_SKIP_BBTSCAN;
- 
-@@ -3331,17 +2259,21 @@ static int qcom_nandc_parse_dt(struct pl
- static int qcom_nandc_probe(struct platform_device *pdev)
- {
- 	struct qcom_nand_controller *nandc;
-+	struct nand_controller *controller;
- 	const void *dev_data;
- 	struct device *dev = &pdev->dev;
- 	struct resource *res;
- 	int ret;
- 
--	nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
-+	nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc) + sizeof(*controller),
-+			     GFP_KERNEL);
- 	if (!nandc)
- 		return -ENOMEM;
-+	controller = (struct nand_controller *)&nandc[1];
- 
- 	platform_set_drvdata(pdev, nandc);
- 	nandc->dev = dev;
-+	nandc->controller = controller;
- 
- 	dev_data = of_device_get_match_data(dev);
- 	if (!dev_data) {
---- /dev/null
-+++ b/include/linux/mtd/nand-qpic-common.h
-@@ -0,0 +1,468 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+/*
-+ * QCOM QPIC common APIs header file
-+ *
-+ * Copyright (c) 2023 Qualcomm Inc.
-+ * Authors:	Md sadre Alam	<[email protected]>
-+ *
-+ */
-+#ifndef __MTD_NAND_QPIC_COMMON_H__
-+#define __MTD_NAND_QPIC_COMMON_H__
-+
-+/* NANDc reg offsets */
-+#define	NAND_FLASH_CMD			0x00
-+#define	NAND_ADDR0			0x04
-+#define	NAND_ADDR1			0x08
-+#define	NAND_FLASH_CHIP_SELECT		0x0c
-+#define	NAND_EXEC_CMD			0x10
-+#define	NAND_FLASH_STATUS		0x14
-+#define	NAND_BUFFER_STATUS		0x18
-+#define	NAND_DEV0_CFG0			0x20
-+#define	NAND_DEV0_CFG1			0x24
-+#define	NAND_DEV0_ECC_CFG		0x28
-+#define	NAND_AUTO_STATUS_EN		0x2c
-+#define	NAND_DEV1_CFG0			0x30
-+#define	NAND_DEV1_CFG1			0x34
-+#define	NAND_READ_ID			0x40
-+#define	NAND_READ_STATUS		0x44
-+#define	NAND_DEV_CMD0			0xa0
-+#define	NAND_DEV_CMD1			0xa4
-+#define	NAND_DEV_CMD2			0xa8
-+#define	NAND_DEV_CMD_VLD		0xac
-+#define	SFLASHC_BURST_CFG		0xe0
-+#define	NAND_ERASED_CW_DETECT_CFG	0xe8
-+#define	NAND_ERASED_CW_DETECT_STATUS	0xec
-+#define	NAND_EBI2_ECC_BUF_CFG		0xf0
-+#define	FLASH_BUF_ACC			0x100
-+
-+#define	NAND_CTRL			0xf00
-+#define	NAND_VERSION			0xf08
-+#define	NAND_READ_LOCATION_0		0xf20
-+#define	NAND_READ_LOCATION_1		0xf24
-+#define	NAND_READ_LOCATION_2		0xf28
-+#define	NAND_READ_LOCATION_3		0xf2c
-+#define	NAND_READ_LOCATION_LAST_CW_0	0xf40
-+#define	NAND_READ_LOCATION_LAST_CW_1	0xf44
-+#define	NAND_READ_LOCATION_LAST_CW_2	0xf48
-+#define	NAND_READ_LOCATION_LAST_CW_3	0xf4c
-+
-+/* dummy register offsets, used by qcom_write_reg_dma */
-+#define	NAND_DEV_CMD1_RESTORE		0xdead
-+#define	NAND_DEV_CMD_VLD_RESTORE	0xbeef
-+
-+/* NAND_FLASH_CMD bits */
-+#define	PAGE_ACC			BIT(4)
-+#define	LAST_PAGE			BIT(5)
-+
-+/* NAND_FLASH_CHIP_SELECT bits */
-+#define	NAND_DEV_SEL			0
-+#define	DM_EN				BIT(2)
-+
-+/* NAND_FLASH_STATUS bits */
-+#define	FS_OP_ERR			BIT(4)
-+#define	FS_READY_BSY_N			BIT(5)
-+#define	FS_MPU_ERR			BIT(8)
-+#define	FS_DEVICE_STS_ERR		BIT(16)
-+#define	FS_DEVICE_WP			BIT(23)
-+
-+/* NAND_BUFFER_STATUS bits */
-+#define	BS_UNCORRECTABLE_BIT		BIT(8)
-+#define	BS_CORRECTABLE_ERR_MSK		0x1f
-+
-+/* NAND_DEVn_CFG0 bits */
-+#define	DISABLE_STATUS_AFTER_WRITE	4
-+#define	CW_PER_PAGE			6
-+#define	UD_SIZE_BYTES			9
-+#define	UD_SIZE_BYTES_MASK		GENMASK(18, 9)
-+#define	ECC_PARITY_SIZE_BYTES_RS	19
-+#define	SPARE_SIZE_BYTES		23
-+#define	SPARE_SIZE_BYTES_MASK		GENMASK(26, 23)
-+#define	NUM_ADDR_CYCLES			27
-+#define	STATUS_BFR_READ			30
-+#define	SET_RD_MODE_AFTER_STATUS	31
-+
-+/* NAND_DEVn_CFG0 bits */
-+#define	DEV0_CFG1_ECC_DISABLE		0
-+#define	WIDE_FLASH			1
-+#define	NAND_RECOVERY_CYCLES		2
-+#define	CS_ACTIVE_BSY			5
-+#define	BAD_BLOCK_BYTE_NUM		6
-+#define	BAD_BLOCK_IN_SPARE_AREA		16
-+#define	WR_RD_BSY_GAP			17
-+#define	ENABLE_BCH_ECC			27
-+
-+/* NAND_DEV0_ECC_CFG bits */
-+#define	ECC_CFG_ECC_DISABLE		0
-+#define	ECC_SW_RESET			1
-+#define	ECC_MODE			4
-+#define	ECC_PARITY_SIZE_BYTES_BCH	8
-+#define	ECC_NUM_DATA_BYTES		16
-+#define	ECC_NUM_DATA_BYTES_MASK		GENMASK(25, 16)
-+#define	ECC_FORCE_CLK_OPEN		30
-+
-+/* NAND_DEV_CMD1 bits */
-+#define	READ_ADDR			0
-+
-+/* NAND_DEV_CMD_VLD bits */
-+#define	READ_START_VLD			BIT(0)
-+#define	READ_STOP_VLD			BIT(1)
-+#define	WRITE_START_VLD			BIT(2)
-+#define	ERASE_START_VLD			BIT(3)
-+#define	SEQ_READ_START_VLD		BIT(4)
-+
-+/* NAND_EBI2_ECC_BUF_CFG bits */
-+#define	NUM_STEPS			0
-+
-+/* NAND_ERASED_CW_DETECT_CFG bits */
-+#define	ERASED_CW_ECC_MASK		1
-+#define	AUTO_DETECT_RES			0
-+#define	MASK_ECC			BIT(ERASED_CW_ECC_MASK)
-+#define	RESET_ERASED_DET		BIT(AUTO_DETECT_RES)
-+#define	ACTIVE_ERASED_DET		(0 << AUTO_DETECT_RES)
-+#define	CLR_ERASED_PAGE_DET		(RESET_ERASED_DET | MASK_ECC)
-+#define	SET_ERASED_PAGE_DET		(ACTIVE_ERASED_DET | MASK_ECC)
-+
-+/* NAND_ERASED_CW_DETECT_STATUS bits */
-+#define	PAGE_ALL_ERASED			BIT(7)
-+#define	CODEWORD_ALL_ERASED		BIT(6)
-+#define	PAGE_ERASED			BIT(5)
-+#define	CODEWORD_ERASED			BIT(4)
-+#define	ERASED_PAGE			(PAGE_ALL_ERASED | PAGE_ERASED)
-+#define	ERASED_CW			(CODEWORD_ALL_ERASED | CODEWORD_ERASED)
-+
-+/* NAND_READ_LOCATION_n bits */
-+#define READ_LOCATION_OFFSET		0
-+#define READ_LOCATION_SIZE		16
-+#define READ_LOCATION_LAST		31
-+
-+/* Version Mask */
-+#define	NAND_VERSION_MAJOR_MASK		0xf0000000
-+#define	NAND_VERSION_MAJOR_SHIFT	28
-+#define	NAND_VERSION_MINOR_MASK		0x0fff0000
-+#define	NAND_VERSION_MINOR_SHIFT	16
-+
-+/* NAND OP_CMDs */
-+#define	OP_PAGE_READ			0x2
-+#define	OP_PAGE_READ_WITH_ECC		0x3
-+#define	OP_PAGE_READ_WITH_ECC_SPARE	0x4
-+#define	OP_PAGE_READ_ONFI_READ		0x5
-+#define	OP_PROGRAM_PAGE			0x6
-+#define	OP_PAGE_PROGRAM_WITH_ECC	0x7
-+#define	OP_PROGRAM_PAGE_SPARE		0x9
-+#define	OP_BLOCK_ERASE			0xa
-+#define	OP_CHECK_STATUS			0xc
-+#define	OP_FETCH_ID			0xb
-+#define	OP_RESET_DEVICE			0xd
-+
-+/* Default Value for NAND_DEV_CMD_VLD */
-+#define NAND_DEV_CMD_VLD_VAL		(READ_START_VLD | WRITE_START_VLD | \
-+					 ERASE_START_VLD | SEQ_READ_START_VLD)
-+
-+/* NAND_CTRL bits */
-+#define	BAM_MODE_EN			BIT(0)
-+
-+/*
-+ * the NAND controller performs reads/writes with ECC in 516 byte chunks.
-+ * the driver calls the chunks 'step' or 'codeword' interchangeably
-+ */
-+#define	NANDC_STEP_SIZE			512
-+
-+/*
-+ * the largest page size we support is 8K, this will have 16 steps/codewords
-+ * of 512 bytes each
-+ */
-+#define	MAX_NUM_STEPS			(SZ_8K / NANDC_STEP_SIZE)
-+
-+/* we read at most 3 registers per codeword scan */
-+#define	MAX_REG_RD			(3 * MAX_NUM_STEPS)
-+
-+/* ECC modes supported by the controller */
-+#define	ECC_NONE	BIT(0)
-+#define	ECC_RS_4BIT	BIT(1)
-+#define	ECC_BCH_4BIT	BIT(2)
-+#define	ECC_BCH_8BIT	BIT(3)
-+
-+/*
-+ * Returns the actual register address for all NAND_DEV_ registers
-+ * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
-+ */
-+#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
-+
-+/* Returns the NAND register physical address */
-+#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
-+
-+/* Returns the dma address for reg read buffer */
-+#define reg_buf_dma_addr(chip, vaddr) \
-+	((chip)->reg_read_dma + \
-+	((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
-+
-+#define QPIC_PER_CW_CMD_ELEMENTS	32
-+#define QPIC_PER_CW_CMD_SGL		32
-+#define QPIC_PER_CW_DATA_SGL		8
-+
-+#define QPIC_NAND_COMPLETION_TIMEOUT	msecs_to_jiffies(2000)
-+
-+/*
-+ * Flags used in DMA descriptor preparation helper functions
-+ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
-+ */
-+/* Don't set the EOT in current tx BAM sgl */
-+#define NAND_BAM_NO_EOT			BIT(0)
-+/* Set the NWD flag in current BAM sgl */
-+#define NAND_BAM_NWD			BIT(1)
-+/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
-+#define NAND_BAM_NEXT_SGL		BIT(2)
-+/*
-+ * Erased codeword status is being used two times in single transfer so this
-+ * flag will determine the current value of erased codeword status register
-+ */
-+#define NAND_ERASED_CW_SET		BIT(4)
-+
-+#define MAX_ADDRESS_CYCLE		5
-+
-+/*
-+ * This data type corresponds to the BAM transaction which will be used for all
-+ * NAND transfers.
-+ * @bam_ce - the array of BAM command elements
-+ * @cmd_sgl - sgl for NAND BAM command pipe
-+ * @data_sgl - sgl for NAND BAM consumer/producer pipe
-+ * @last_data_desc - last DMA desc in data channel (tx/rx).
-+ * @last_cmd_desc - last DMA desc in command channel.
-+ * @txn_done - completion for NAND transfer.
-+ * @bam_ce_pos - the index in bam_ce which is available for next sgl
-+ * @bam_ce_start - the index in bam_ce which marks the start position ce
-+ *		   for current sgl. It will be used for size calculation
-+ *		   for current sgl
-+ * @cmd_sgl_pos - current index in command sgl.
-+ * @cmd_sgl_start - start index in command sgl.
-+ * @tx_sgl_pos - current index in data sgl for tx.
-+ * @tx_sgl_start - start index in data sgl for tx.
-+ * @rx_sgl_pos - current index in data sgl for rx.
-+ * @rx_sgl_start - start index in data sgl for rx.
-+ */
-+struct bam_transaction {
-+	struct bam_cmd_element *bam_ce;
-+	struct scatterlist *cmd_sgl;
-+	struct scatterlist *data_sgl;
-+	struct dma_async_tx_descriptor *last_data_desc;
-+	struct dma_async_tx_descriptor *last_cmd_desc;
-+	struct completion txn_done;
-+	u32 bam_ce_pos;
-+	u32 bam_ce_start;
-+	u32 cmd_sgl_pos;
-+	u32 cmd_sgl_start;
-+	u32 tx_sgl_pos;
-+	u32 tx_sgl_start;
-+	u32 rx_sgl_pos;
-+	u32 rx_sgl_start;
-+};
-+
-+/*
-+ * This data type corresponds to the nand dma descriptor
-+ * @dma_desc - low level DMA engine descriptor
-+ * @list - list for desc_info
-+ *
-+ * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
-+ *	      ADM
-+ * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
-+ * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
-+ * @dir - DMA transfer direction
-+ */
-+struct desc_info {
-+	struct dma_async_tx_descriptor *dma_desc;
-+	struct list_head node;
-+
-+	union {
-+		struct scatterlist adm_sgl;
-+		struct {
-+			struct scatterlist *bam_sgl;
-+			int sgl_cnt;
-+		};
-+	};
-+	enum dma_data_direction dir;
-+};
-+
-+/*
-+ * holds the current register values that we want to write. acts as a contiguous
-+ * chunk of memory which we use to write the controller registers through DMA.
-+ */
-+struct nandc_regs {
-+	__le32 cmd;
-+	__le32 addr0;
-+	__le32 addr1;
-+	__le32 chip_sel;
-+	__le32 exec;
-+
-+	__le32 cfg0;
-+	__le32 cfg1;
-+	__le32 ecc_bch_cfg;
-+
-+	__le32 clrflashstatus;
-+	__le32 clrreadstatus;
-+
-+	__le32 cmd1;
-+	__le32 vld;
-+
-+	__le32 orig_cmd1;
-+	__le32 orig_vld;
-+
-+	__le32 ecc_buf_cfg;
-+	__le32 read_location0;
-+	__le32 read_location1;
-+	__le32 read_location2;
-+	__le32 read_location3;
-+	__le32 read_location_last0;
-+	__le32 read_location_last1;
-+	__le32 read_location_last2;
-+	__le32 read_location_last3;
-+
-+	__le32 erased_cw_detect_cfg_clr;
-+	__le32 erased_cw_detect_cfg_set;
-+};
-+
-+/*
-+ * NAND controller data struct
-+ *
-+ * @dev:			parent device
-+ *
-+ * @base:			MMIO base
-+ *
-+ * @core_clk:			controller clock
-+ * @aon_clk:			another controller clock
-+ *
-+ * @regs:			a contiguous chunk of memory for DMA register
-+ *				writes. contains the register values to be
-+ *				written to controller
-+ *
-+ * @props:			properties of current NAND controller,
-+ *				initialized via DT match data
-+ *
-+ * @controller:			base controller structure
-+ * @host_list:			list containing all the chips attached to the
-+ *				controller
-+ *
-+ * @chan:			dma channel
-+ * @cmd_crci:			ADM DMA CRCI for command flow control
-+ * @data_crci:			ADM DMA CRCI for data flow control
-+ *
-+ * @desc_list:			DMA descriptor list (list of desc_infos)
-+ *
-+ * @data_buffer:		our local DMA buffer for page read/writes,
-+ *				used when we can't use the buffer provided
-+ *				by upper layers directly
-+ * @reg_read_buf:		local buffer for reading back registers via DMA
-+ *
-+ * @base_phys:			physical base address of controller registers
-+ * @base_dma:			dma base address of controller registers
-+ * @reg_read_dma:		contains dma address for register read buffer
-+ *
-+ * @buf_size/count/start:	markers for chip->legacy.read_buf/write_buf
-+ *				functions
-+ * @max_cwperpage:		maximum QPIC codewords required. calculated
-+ *				from all connected NAND devices pagesize
-+ *
-+ * @reg_read_pos:		marker for data read in reg_read_buf
-+ *
-+ * @cmd1/vld:			some fixed controller register values
-+ *
-+ * @exec_opwrite:		flag to select correct number of code word
-+ *				while reading status
-+ */
-+struct qcom_nand_controller {
-+	struct device *dev;
-+
-+	void __iomem *base;
-+
-+	struct clk *core_clk;
-+	struct clk *aon_clk;
-+
-+	struct nandc_regs *regs;
-+	struct bam_transaction *bam_txn;
-+
-+	const struct qcom_nandc_props *props;
-+
-+	struct nand_controller *controller;
-+	struct list_head host_list;
-+
-+	union {
-+		/* will be used only by QPIC for BAM DMA */
-+		struct {
-+			struct dma_chan *tx_chan;
-+			struct dma_chan *rx_chan;
-+			struct dma_chan *cmd_chan;
-+		};
-+
-+		/* will be used only by EBI2 for ADM DMA */
-+		struct {
-+			struct dma_chan *chan;
-+			unsigned int cmd_crci;
-+			unsigned int data_crci;
-+		};
-+	};
-+
-+	struct list_head desc_list;
-+
-+	u8		*data_buffer;
-+	__le32		*reg_read_buf;
-+
-+	phys_addr_t base_phys;
-+	dma_addr_t base_dma;
-+	dma_addr_t reg_read_dma;
-+
-+	int		buf_size;
-+	int		buf_count;
-+	int		buf_start;
-+	unsigned int	max_cwperpage;
-+
-+	int reg_read_pos;
-+
-+	u32 cmd1, vld;
-+	bool exec_opwrite;
-+};
-+
-+/*
-+ * This data type corresponds to the NAND controller properties which varies
-+ * among different NAND controllers.
-+ * @ecc_modes - ecc mode for NAND
-+ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
-+ * @supports_bam - whether NAND controller is using BAM
-+ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
-+ * @qpic_version2 - flag to indicate QPIC IP version 2
-+ * @use_codeword_fixup - whether NAND has different layout for boot partitions
-+ */
-+struct qcom_nandc_props {
-+	u32 ecc_modes;
-+	u32 dev_cmd_reg_start;
-+	bool supports_bam;
-+	bool nandc_part_of_qpic;
-+	bool qpic_version2;
-+	bool use_codeword_fixup;
-+};
-+
-+void qcom_free_bam_transaction(struct qcom_nand_controller *nandc);
-+struct bam_transaction *qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc);
-+void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc);
-+void qcom_qpic_bam_dma_done(void *data);
-+void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu);
-+int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
-+				struct dma_chan *chan, unsigned long flags);
-+int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
-+			       int reg_off, const void *vaddr, int size, unsigned int flags);
-+int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
-+				const void *vaddr, int size, unsigned int flags);
-+int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, int reg_off,
-+			   const void *vaddr, int size, bool flow_control);
-+int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, int num_regs,
-+		      unsigned int flags);
-+int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, int first,
-+		       int num_regs, unsigned int flags);
-+int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
-+		       int size, unsigned int flags);
-+int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
-+			int size, unsigned int flags);
-+int qcom_submit_descs(struct qcom_nand_controller *nandc);
-+void qcom_clear_read_regs(struct qcom_nand_controller *nandc);
-+void qcom_nandc_unalloc(struct qcom_nand_controller *nandc);
-+int qcom_nandc_alloc(struct qcom_nand_controller *nandc);
-+#endif
-+

+ 0 - 198
target/linux/generic/backport-6.6/413-04-v6.14-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch

@@ -1,198 +0,0 @@
-From 0c08080fd71cd5dd59643104b39d3c89d793ab3c Mon Sep 17 00:00:00 2001
-From: Md Sadre Alam <[email protected]>
-Date: Wed, 20 Nov 2024 14:45:03 +0530
-Subject: [PATCH 4/4] mtd: rawnand: qcom: use FIELD_PREP and GENMASK
-
-Use the bitfield macro FIELD_PREP, and GENMASK to
-do the shift and mask in one go. This makes the code
-more readable.
-
-Reviewed-by: Konrad Dybcio <[email protected]>
-Signed-off-by: Md Sadre Alam <[email protected]>
-Signed-off-by: Miquel Raynal <[email protected]>
----
- drivers/mtd/nand/raw/qcom_nandc.c    | 97 ++++++++++++++--------------
- include/linux/mtd/nand-qpic-common.h | 31 +++++----
- 2 files changed, 67 insertions(+), 61 deletions(-)
-
---- a/drivers/mtd/nand/raw/qcom_nandc.c
-+++ b/drivers/mtd/nand/raw/qcom_nandc.c
-@@ -281,7 +281,7 @@ static void update_rw_regs(struct qcom_n
- 				(num_cw - 1) << CW_PER_PAGE);
- 
- 		cfg1 = cpu_to_le32(host->cfg1_raw);
--		ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
-+		ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
- 	}
- 
- 	nandc->regs->cmd = cmd;
-@@ -1494,42 +1494,41 @@ static int qcom_nand_attach_chip(struct
- 	host->cw_size = host->cw_data + ecc->bytes;
- 	bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
- 
--	host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
--				| host->cw_data << UD_SIZE_BYTES
--				| 0 << DISABLE_STATUS_AFTER_WRITE
--				| 5 << NUM_ADDR_CYCLES
--				| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
--				| 0 << STATUS_BFR_READ
--				| 1 << SET_RD_MODE_AFTER_STATUS
--				| host->spare_bytes << SPARE_SIZE_BYTES;
--
--	host->cfg1 = 7 << NAND_RECOVERY_CYCLES
--				| 0 <<  CS_ACTIVE_BSY
--				| bad_block_byte << BAD_BLOCK_BYTE_NUM
--				| 0 << BAD_BLOCK_IN_SPARE_AREA
--				| 2 << WR_RD_BSY_GAP
--				| wide_bus << WIDE_FLASH
--				| host->bch_enabled << ENABLE_BCH_ECC;
--
--	host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
--				| host->cw_size << UD_SIZE_BYTES
--				| 5 << NUM_ADDR_CYCLES
--				| 0 << SPARE_SIZE_BYTES;
--
--	host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
--				| 0 << CS_ACTIVE_BSY
--				| 17 << BAD_BLOCK_BYTE_NUM
--				| 1 << BAD_BLOCK_IN_SPARE_AREA
--				| 2 << WR_RD_BSY_GAP
--				| wide_bus << WIDE_FLASH
--				| 1 << DEV0_CFG1_ECC_DISABLE;
--
--	host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
--				| 0 << ECC_SW_RESET
--				| host->cw_data << ECC_NUM_DATA_BYTES
--				| 1 << ECC_FORCE_CLK_OPEN
--				| ecc_mode << ECC_MODE
--				| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
-+	host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
-+		     FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data) |
-+		     FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 0) |
-+		     FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
-+		     FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, host->ecc_bytes_hw) |
-+		     FIELD_PREP(STATUS_BFR_READ, 0) |
-+		     FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
-+		     FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes);
-+
-+	host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
-+		     FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
-+		     FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
-+		     FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
-+		     FIELD_PREP(WIDE_FLASH, wide_bus) |
-+		     FIELD_PREP(ENABLE_BCH_ECC, host->bch_enabled);
-+
-+	host->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
-+			 FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_size) |
-+			 FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
-+			 FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
-+
-+	host->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
-+			 FIELD_PREP(CS_ACTIVE_BSY, 0) |
-+			 FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
-+			 FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
-+			 FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
-+			 FIELD_PREP(WIDE_FLASH, wide_bus) |
-+			 FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
-+
-+	host->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !host->bch_enabled) |
-+			    FIELD_PREP(ECC_SW_RESET, 0) |
-+			    FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data) |
-+			    FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
-+			    FIELD_PREP(ECC_MODE_MASK, ecc_mode) |
-+			    FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw);
- 
- 	if (!nandc->props->qpic_version2)
- 		host->ecc_buf_cfg = 0x203 << NUM_STEPS;
-@@ -1887,21 +1886,21 @@ static int qcom_param_page_type_exec(str
- 	nandc->regs->addr0 = 0;
- 	nandc->regs->addr1 = 0;
- 
--	nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE |
--					512 << UD_SIZE_BYTES |
--					5 << NUM_ADDR_CYCLES |
--					0 << SPARE_SIZE_BYTES);
--
--	nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES |
--					0 << CS_ACTIVE_BSY |
--					17 << BAD_BLOCK_BYTE_NUM |
--					1 << BAD_BLOCK_IN_SPARE_AREA |
--					2 << WR_RD_BSY_GAP |
--					0 << WIDE_FLASH |
--					1 << DEV0_CFG1_ECC_DISABLE);
-+	host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
-+		     FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
-+		     FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
-+		     FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
-+
-+	host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
-+		     FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
-+		     FIELD_PREP(CS_ACTIVE_BSY, 0) |
-+		     FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
-+		     FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
-+		     FIELD_PREP(WIDE_FLASH, 0) |
-+		     FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
- 
- 	if (!nandc->props->qpic_version2)
--		nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
-+		nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
- 
- 	/* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
- 	if (!nandc->props->qpic_version2) {
---- a/include/linux/mtd/nand-qpic-common.h
-+++ b/include/linux/mtd/nand-qpic-common.h
-@@ -70,35 +70,42 @@
- #define	BS_CORRECTABLE_ERR_MSK		0x1f
- 
- /* NAND_DEVn_CFG0 bits */
--#define	DISABLE_STATUS_AFTER_WRITE	4
-+#define	DISABLE_STATUS_AFTER_WRITE	BIT(4)
- #define	CW_PER_PAGE			6
-+#define	CW_PER_PAGE_MASK		GENMASK(8, 6)
- #define	UD_SIZE_BYTES			9
- #define	UD_SIZE_BYTES_MASK		GENMASK(18, 9)
--#define	ECC_PARITY_SIZE_BYTES_RS	19
-+#define	ECC_PARITY_SIZE_BYTES_RS	GENMASK(22, 19)
- #define	SPARE_SIZE_BYTES		23
- #define	SPARE_SIZE_BYTES_MASK		GENMASK(26, 23)
- #define	NUM_ADDR_CYCLES			27
--#define	STATUS_BFR_READ			30
--#define	SET_RD_MODE_AFTER_STATUS	31
-+#define	NUM_ADDR_CYCLES_MASK		GENMASK(29, 27)
-+#define	STATUS_BFR_READ			BIT(30)
-+#define	SET_RD_MODE_AFTER_STATUS	BIT(31)
- 
- /* NAND_DEVn_CFG0 bits */
--#define	DEV0_CFG1_ECC_DISABLE		0
--#define	WIDE_FLASH			1
-+#define	DEV0_CFG1_ECC_DISABLE		BIT(0)
-+#define	WIDE_FLASH			BIT(1)
- #define	NAND_RECOVERY_CYCLES		2
--#define	CS_ACTIVE_BSY			5
-+#define	NAND_RECOVERY_CYCLES_MASK	GENMASK(4, 2)
-+#define	CS_ACTIVE_BSY			BIT(5)
- #define	BAD_BLOCK_BYTE_NUM		6
--#define	BAD_BLOCK_IN_SPARE_AREA		16
-+#define	BAD_BLOCK_BYTE_NUM_MASK		GENMASK(15, 6)
-+#define	BAD_BLOCK_IN_SPARE_AREA		BIT(16)
- #define	WR_RD_BSY_GAP			17
--#define	ENABLE_BCH_ECC			27
-+#define	WR_RD_BSY_GAP_MASK		GENMASK(22, 17)
-+#define	ENABLE_BCH_ECC			BIT(27)
- 
- /* NAND_DEV0_ECC_CFG bits */
--#define	ECC_CFG_ECC_DISABLE		0
--#define	ECC_SW_RESET			1
-+#define	ECC_CFG_ECC_DISABLE		BIT(0)
-+#define	ECC_SW_RESET			BIT(1)
- #define	ECC_MODE			4
-+#define	ECC_MODE_MASK			GENMASK(5, 4)
- #define	ECC_PARITY_SIZE_BYTES_BCH	8
-+#define	ECC_PARITY_SIZE_BYTES_BCH_MASK	GENMASK(12, 8)
- #define	ECC_NUM_DATA_BYTES		16
- #define	ECC_NUM_DATA_BYTES_MASK		GENMASK(25, 16)
--#define	ECC_FORCE_CLK_OPEN		30
-+#define	ECC_FORCE_CLK_OPEN		BIT(30)
- 
- /* NAND_DEV_CMD1 bits */
- #define	READ_ADDR			0

+ 0 - 64
target/linux/generic/backport-6.6/414-v6.14-mtd-rawnand-qcom-fix-broken-config-in-qcom_param_pag.patch

@@ -1,64 +0,0 @@
-From 9d4ffbcfde283f2a87ea45128ddf7e6651facdd9 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <[email protected]>
-Date: Fri, 7 Feb 2025 20:42:38 +0100
-Subject: [PATCH] mtd: rawnand: qcom: fix broken config in
- qcom_param_page_type_exec
-
-Fix broken config in qcom_param_page_type_exec caused by copy-paste error
-from commit 0c08080fd71c ("mtd: rawnand: qcom: use FIELD_PREP and GENMASK")
-
-In qcom_param_page_type_exec the value needs to be set to
-nandc->regs->cfg0 instead of host->cfg0. This wrong configuration caused
-the Qcom NANDC driver to malfunction on any device that makes use of it
-(IPQ806x, IPQ40xx, IPQ807x, IPQ60xx) with the following error:
-
-[    0.885369] nand: device found, Manufacturer ID: 0x2c, Chip ID: 0xaa
-[    0.885909] nand: Micron NAND 256MiB 1,8V 8-bit
-[    0.892499] nand: 256 MiB, SLC, erase size: 128 KiB, page size: 2048, OOB size: 64
-[    0.896823] nand: ECC (step, strength) = (512, 8) does not fit in OOB
-[    0.896836] qcom-nandc 79b0000.nand-controller: No valid ECC settings possible
-[    0.910996] bam-dma-engine 7984000.dma-controller: Cannot free busy channel
-[    0.918070] qcom-nandc: probe of 79b0000.nand-controller failed with error -28
-
-Restore original configuration fix the problem and makes the driver work
-again.
-
-Cc: [email protected]
-Fixes: 0c08080fd71c ("mtd: rawnand: qcom: use FIELD_PREP and GENMASK")
-Signed-off-by: Christian Marangi <[email protected]>
----
- drivers/mtd/nand/raw/qcom_nandc.c | 24 ++++++++++++------------
- 1 file changed, 12 insertions(+), 12 deletions(-)
-
---- a/drivers/mtd/nand/raw/qcom_nandc.c
-+++ b/drivers/mtd/nand/raw/qcom_nandc.c
-@@ -1886,18 +1886,18 @@ static int qcom_param_page_type_exec(str
- 	nandc->regs->addr0 = 0;
- 	nandc->regs->addr1 = 0;
- 
--	host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
--		     FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
--		     FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
--		     FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
-+	nandc->regs->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
-+			    FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
-+			    FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
-+			    FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
- 
--	host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
--		     FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
--		     FIELD_PREP(CS_ACTIVE_BSY, 0) |
--		     FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
--		     FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
--		     FIELD_PREP(WIDE_FLASH, 0) |
--		     FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
-+	nandc->regs->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
-+			    FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
-+			    FIELD_PREP(CS_ACTIVE_BSY, 0) |
-+			    FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
-+			    FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
-+			    FIELD_PREP(WIDE_FLASH, 0) |
-+			    FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
- 
- 	if (!nandc->props->qpic_version2)
- 		nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);

+ 0 - 77
target/linux/generic/backport-6.6/415-v6.14-mtd-rawnand-qcom-Fix-build-issue-on-x86-architecture.patch

@@ -1,77 +0,0 @@
-From b9371866799d67a80be0ea9e01bd41987db22f26 Mon Sep 17 00:00:00 2001
-From: Md Sadre Alam <[email protected]>
-Date: Mon, 6 Jan 2025 18:45:58 +0530
-Subject: [PATCH] mtd: rawnand: qcom: Fix build issue on x86 architecture
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Fix a buffer overflow issue in qcom_clear_bam_transaction by using
-struct_group to group related fields and avoid FORTIFY_SOURCE warnings.
-
-On x86 architecture, the following error occurs due to warnings being
-treated as errors:
-
-In function ‘fortify_memset_chk’,
-    inlined from ‘qcom_clear_bam_transaction’ at
-drivers/mtd/nand/qpic_common.c:88:2:
-./include/linux/fortify-string.h:480:25: error: call to ‘__write_overflow_field’
-declared with attribute warning: detected write beyond size of field
-(1st parameter); maybe use struct_group()? [-Werror=attribute-warning]
-  480 |                         __write_overflow_field(p_size_field, size);
-      |                         ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-  LD [M]  drivers/mtd/nand/nandcore.o
-  CC [M]  drivers/w1/masters/mxc_w1.o
-cc1: all warnings being treated as errors
-
-This patch addresses the issue by grouping the related fields in
-struct bam_transaction using struct_group and updating the memset call
-accordingly.
-
-Fixes: 8c52932da5e6 ("mtd: rawnand: qcom: cleanup qcom_nandc driver")
-Signed-off-by: Md Sadre Alam <[email protected]>
-Signed-off-by: Miquel Raynal <[email protected]>
----
- drivers/mtd/nand/qpic_common.c       |  2 +-
- include/linux/mtd/nand-qpic-common.h | 19 +++++++++++--------
- 2 files changed, 12 insertions(+), 9 deletions(-)
-
---- a/drivers/mtd/nand/qpic_common.c
-+++ b/drivers/mtd/nand/qpic_common.c
-@@ -85,7 +85,7 @@ void qcom_clear_bam_transaction(struct q
- 	if (!nandc->props->supports_bam)
- 		return;
- 
--	memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
-+	memset(&bam_txn->bam_positions, 0, sizeof(bam_txn->bam_positions));
- 	bam_txn->last_data_desc = NULL;
- 
- 	sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
---- a/include/linux/mtd/nand-qpic-common.h
-+++ b/include/linux/mtd/nand-qpic-common.h
-@@ -254,14 +254,17 @@ struct bam_transaction {
- 	struct dma_async_tx_descriptor *last_data_desc;
- 	struct dma_async_tx_descriptor *last_cmd_desc;
- 	struct completion txn_done;
--	u32 bam_ce_pos;
--	u32 bam_ce_start;
--	u32 cmd_sgl_pos;
--	u32 cmd_sgl_start;
--	u32 tx_sgl_pos;
--	u32 tx_sgl_start;
--	u32 rx_sgl_pos;
--	u32 rx_sgl_start;
-+	struct_group(bam_positions,
-+		u32 bam_ce_pos;
-+		u32 bam_ce_start;
-+		u32 cmd_sgl_pos;
-+		u32 cmd_sgl_start;
-+		u32 tx_sgl_pos;
-+		u32 tx_sgl_start;
-+		u32 rx_sgl_pos;
-+		u32 rx_sgl_start;
-+
-+	);
- };
- 
- /*

+ 0 - 1737
target/linux/generic/backport-6.6/416-v6.15-01-spi-spi-qpic-add-driver-for-QCOM-SPI-NAND-flash-Inte.patch

@@ -1,1737 +0,0 @@
-From 7304d1909080ef0c9da703500a97f46c98393fcd Mon Sep 17 00:00:00 2001
-From: Md Sadre Alam <[email protected]>
-Date: Mon, 24 Feb 2025 16:44:14 +0530
-Subject: [PATCH] spi: spi-qpic: add driver for QCOM SPI NAND flash Interface
-
-This driver implements support for the SPI-NAND mode of QCOM NAND Flash
-Interface as a SPI-MEM controller with pipelined ECC capability.
-
-Co-developed-by: Sricharan Ramabadhran <[email protected]>
-Signed-off-by: Sricharan Ramabadhran <[email protected]>
-Co-developed-by: Varadarajan Narayanan <[email protected]>
-Signed-off-by: Varadarajan Narayanan <[email protected]>
-Signed-off-by: Md Sadre Alam <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Mark Brown <[email protected]>
----
- drivers/mtd/nand/Makefile            |    4 +
- drivers/spi/Kconfig                  |    9 +
- drivers/spi/Makefile                 |    1 +
- drivers/spi/spi-qpic-snand.c         | 1631 ++++++++++++++++++++++++++
- include/linux/mtd/nand-qpic-common.h |    7 +
- 5 files changed, 1652 insertions(+)
- create mode 100644 drivers/spi/spi-qpic-snand.c
-
---- a/drivers/mtd/nand/Makefile
-+++ b/drivers/mtd/nand/Makefile
-@@ -3,7 +3,11 @@
- nandcore-objs := core.o bbt.o
- obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
- obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
-+ifeq ($(CONFIG_SPI_QPIC_SNAND),y)
-+obj-$(CONFIG_SPI_QPIC_SNAND) += qpic_common.o
-+else
- obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
-+endif
- obj-y	+= onenand/
- obj-y	+= raw/
- obj-y	+= spi/
---- a/drivers/spi/Kconfig
-+++ b/drivers/spi/Kconfig
-@@ -870,6 +870,15 @@ config SPI_QCOM_QSPI
- 	help
- 	  QSPI(Quad SPI) driver for Qualcomm QSPI controller.
- 
-+config SPI_QPIC_SNAND
-+	bool "QPIC SNAND controller"
-+	depends on ARCH_QCOM || COMPILE_TEST
-+	select MTD
-+	help
-+	  QPIC_SNAND (QPIC SPI NAND) driver for Qualcomm QPIC controller.
-+	  QPIC controller supports both parallel nand and serial nand.
-+	  This config will enable serial nand driver for QPIC controller.
-+
- config SPI_QUP
- 	tristate "Qualcomm SPI controller with QUP interface"
- 	depends on ARCH_QCOM || COMPILE_TEST
---- a/drivers/spi/Makefile
-+++ b/drivers/spi/Makefile
-@@ -110,6 +110,7 @@ obj-$(CONFIG_SPI_PXA2XX)		+= spi-pxa2xx-
- obj-$(CONFIG_SPI_PXA2XX_PCI)		+= spi-pxa2xx-pci.o
- obj-$(CONFIG_SPI_QCOM_GENI)		+= spi-geni-qcom.o
- obj-$(CONFIG_SPI_QCOM_QSPI)		+= spi-qcom-qspi.o
-+obj-$(CONFIG_SPI_QPIC_SNAND)            += spi-qpic-snand.o
- obj-$(CONFIG_SPI_QUP)			+= spi-qup.o
- obj-$(CONFIG_SPI_ROCKCHIP)		+= spi-rockchip.o
- obj-$(CONFIG_SPI_ROCKCHIP_SFC)		+= spi-rockchip-sfc.o
---- /dev/null
-+++ b/drivers/spi/spi-qpic-snand.c
-@@ -0,0 +1,1631 @@
-+/*
-+ * SPDX-License-Identifier: GPL-2.0
-+ *
-+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
-+ *
-+ * Authors:
-+ *	Md Sadre Alam <[email protected]>
-+ *	Sricharan R <[email protected]>
-+ *	Varadarajan Narayanan <[email protected]>
-+ */
-+#include <linux/bitops.h>
-+#include <linux/clk.h>
-+#include <linux/delay.h>
-+#include <linux/dmaengine.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/dma/qcom_adm.h>
-+#include <linux/dma/qcom_bam_dma.h>
-+#include <linux/module.h>
-+#include <linux/of.h>
-+#include <linux/platform_device.h>
-+#include <linux/slab.h>
-+#include <linux/mtd/nand-qpic-common.h>
-+#include <linux/mtd/spinand.h>
-+#include <linux/bitfield.h>
-+
-+#define NAND_FLASH_SPI_CFG		0xc0
-+#define NAND_NUM_ADDR_CYCLES		0xc4
-+#define NAND_BUSY_CHECK_WAIT_CNT	0xc8
-+#define NAND_FLASH_FEATURES		0xf64
-+
-+/* QSPI NAND config reg bits */
-+#define LOAD_CLK_CNTR_INIT_EN		BIT(28)
-+#define CLK_CNTR_INIT_VAL_VEC		0x924
-+#define CLK_CNTR_INIT_VAL_VEC_MASK	GENMASK(27, 16)
-+#define FEA_STATUS_DEV_ADDR		0xc0
-+#define FEA_STATUS_DEV_ADDR_MASK	GENMASK(15, 8)
-+#define SPI_CFG				BIT(0)
-+#define SPI_NUM_ADDR			0xDA4DB
-+#define SPI_WAIT_CNT			0x10
-+#define QPIC_QSPI_NUM_CS		1
-+#define SPI_TRANSFER_MODE_x1		BIT(29)
-+#define SPI_TRANSFER_MODE_x4		(3 << 29)
-+#define SPI_WP				BIT(28)
-+#define SPI_HOLD			BIT(27)
-+#define QPIC_SET_FEATURE		BIT(31)
-+
-+#define SPINAND_RESET			0xff
-+#define SPINAND_READID			0x9f
-+#define SPINAND_GET_FEATURE		0x0f
-+#define SPINAND_SET_FEATURE		0x1f
-+#define SPINAND_READ			0x13
-+#define SPINAND_ERASE			0xd8
-+#define SPINAND_WRITE_EN		0x06
-+#define SPINAND_PROGRAM_EXECUTE		0x10
-+#define SPINAND_PROGRAM_LOAD		0x84
-+
-+#define ACC_FEATURE			0xe
-+#define BAD_BLOCK_MARKER_SIZE		0x2
-+#define OOB_BUF_SIZE			128
-+#define ecceng_to_qspi(eng)		container_of(eng, struct qpic_spi_nand, ecc_eng)
-+
-+struct qpic_snand_op {
-+	u32 cmd_reg;
-+	u32 addr1_reg;
-+	u32 addr2_reg;
-+};
-+
-+struct snandc_read_status {
-+	__le32 snandc_flash;
-+	__le32 snandc_buffer;
-+	__le32 snandc_erased_cw;
-+};
-+
-+/*
-+ * ECC state struct
-+ * @corrected:		ECC corrected
-+ * @bitflips:		Max bit flip
-+ * @failed:		ECC failed
-+ */
-+struct qcom_ecc_stats {
-+	u32 corrected;
-+	u32 bitflips;
-+	u32 failed;
-+};
-+
-+struct qpic_ecc {
-+	struct device *dev;
-+	int ecc_bytes_hw;
-+	int spare_bytes;
-+	int bbm_size;
-+	int ecc_mode;
-+	int bytes;
-+	int steps;
-+	int step_size;
-+	int strength;
-+	int cw_size;
-+	int cw_data;
-+	u32 cfg0;
-+	u32 cfg1;
-+	u32 cfg0_raw;
-+	u32 cfg1_raw;
-+	u32 ecc_buf_cfg;
-+	u32 ecc_bch_cfg;
-+	u32 clrflashstatus;
-+	u32 clrreadstatus;
-+	bool bch_enabled;
-+};
-+
-+struct qpic_spi_nand {
-+	struct qcom_nand_controller *snandc;
-+	struct spi_controller *ctlr;
-+	struct mtd_info *mtd;
-+	struct clk *iomacro_clk;
-+	struct qpic_ecc *ecc;
-+	struct qcom_ecc_stats ecc_stats;
-+	struct nand_ecc_engine ecc_eng;
-+	u8 *data_buf;
-+	u8 *oob_buf;
-+	u32 wlen;
-+	__le32 addr1;
-+	__le32 addr2;
-+	__le32 cmd;
-+	u32 num_cw;
-+	bool oob_rw;
-+	bool page_rw;
-+	bool raw_rw;
-+};
-+
-+static void qcom_spi_set_read_loc_first(struct qcom_nand_controller *snandc,
-+					int reg, int cw_offset, int read_size,
-+					int is_last_read_loc)
-+{
-+	__le32 locreg_val;
-+	u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
-+		  ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc)
-+		  << READ_LOCATION_LAST));
-+
-+	locreg_val = cpu_to_le32(val);
-+
-+	if (reg == NAND_READ_LOCATION_0)
-+		snandc->regs->read_location0 = locreg_val;
-+	else if (reg == NAND_READ_LOCATION_1)
-+		snandc->regs->read_location1 = locreg_val;
-+	else if (reg == NAND_READ_LOCATION_2)
-+		snandc->regs->read_location1 = locreg_val;
-+	else if (reg == NAND_READ_LOCATION_3)
-+		snandc->regs->read_location3 = locreg_val;
-+}
-+
-+static void qcom_spi_set_read_loc_last(struct qcom_nand_controller *snandc,
-+				       int reg, int cw_offset, int read_size,
-+				       int is_last_read_loc)
-+{
-+	__le32 locreg_val;
-+	u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
-+		  ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc)
-+		  << READ_LOCATION_LAST));
-+
-+	locreg_val = cpu_to_le32(val);
-+
-+	if (reg == NAND_READ_LOCATION_LAST_CW_0)
-+		snandc->regs->read_location_last0 = locreg_val;
-+	else if (reg == NAND_READ_LOCATION_LAST_CW_1)
-+		snandc->regs->read_location_last1 = locreg_val;
-+	else if (reg == NAND_READ_LOCATION_LAST_CW_2)
-+		snandc->regs->read_location_last2 = locreg_val;
-+	else if (reg == NAND_READ_LOCATION_LAST_CW_3)
-+		snandc->regs->read_location_last3 = locreg_val;
-+}
-+
-+static struct qcom_nand_controller *nand_to_qcom_snand(struct nand_device *nand)
-+{
-+	struct nand_ecc_engine *eng = nand->ecc.engine;
-+	struct qpic_spi_nand *qspi = ecceng_to_qspi(eng);
-+
-+	return qspi->snandc;
-+}
-+
-+static int qcom_spi_init(struct qcom_nand_controller *snandc)
-+{
-+	u32 snand_cfg_val = 0x0;
-+	int ret;
-+
-+	snand_cfg_val = FIELD_PREP(CLK_CNTR_INIT_VAL_VEC_MASK, CLK_CNTR_INIT_VAL_VEC) |
-+			FIELD_PREP(LOAD_CLK_CNTR_INIT_EN, 0) |
-+			FIELD_PREP(FEA_STATUS_DEV_ADDR_MASK, FEA_STATUS_DEV_ADDR) |
-+			FIELD_PREP(SPI_CFG, 0);
-+
-+	snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val);
-+	snandc->regs->num_addr_cycle = cpu_to_le32(SPI_NUM_ADDR);
-+	snandc->regs->busy_wait_cnt = cpu_to_le32(SPI_WAIT_CNT);
-+
-+	qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0);
-+
-+	snand_cfg_val &= ~LOAD_CLK_CNTR_INIT_EN;
-+	snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val);
-+
-+	qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0);
-+
-+	qcom_write_reg_dma(snandc, &snandc->regs->num_addr_cycle, NAND_NUM_ADDR_CYCLES, 1, 0);
-+	qcom_write_reg_dma(snandc, &snandc->regs->busy_wait_cnt, NAND_BUSY_CHECK_WAIT_CNT, 1,
-+			   NAND_BAM_NEXT_SGL);
-+
-+	ret = qcom_submit_descs(snandc);
-+	if (ret) {
-+		dev_err(snandc->dev, "failure in submitting spi init descriptor\n");
-+		return ret;
-+	}
-+
-+	return ret;
-+}
-+
-+static int qcom_spi_ooblayout_ecc(struct mtd_info *mtd, int section,
-+				  struct mtd_oob_region *oobregion)
-+{
-+	struct nand_device *nand = mtd_to_nanddev(mtd);
-+	struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
-+	struct qpic_ecc *qecc = snandc->qspi->ecc;
-+
-+	if (section > 1)
-+		return -ERANGE;
-+
-+	oobregion->length = qecc->ecc_bytes_hw + qecc->spare_bytes;
-+	oobregion->offset = mtd->oobsize - oobregion->length;
-+
-+	return 0;
-+}
-+
-+static int qcom_spi_ooblayout_free(struct mtd_info *mtd, int section,
-+				   struct mtd_oob_region *oobregion)
-+{
-+	struct nand_device *nand = mtd_to_nanddev(mtd);
-+	struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
-+	struct qpic_ecc *qecc = snandc->qspi->ecc;
-+
-+	if (section)
-+		return -ERANGE;
-+
-+	oobregion->length = qecc->steps * 4;
-+	oobregion->offset = ((qecc->steps - 1) * qecc->bytes) + qecc->bbm_size;
-+
-+	return 0;
-+}
-+
-+static const struct mtd_ooblayout_ops qcom_spi_ooblayout = {
-+	.ecc = qcom_spi_ooblayout_ecc,
-+	.free = qcom_spi_ooblayout_free,
-+};
-+
-+static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand)
-+{
-+	struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
-+	struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
-+	struct mtd_info *mtd = nanddev_to_mtd(nand);
-+	int cwperpage, bad_block_byte;
-+	struct qpic_ecc *ecc_cfg;
-+
-+	cwperpage = mtd->writesize / NANDC_STEP_SIZE;
-+	snandc->qspi->num_cw = cwperpage;
-+
-+	ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
-+	if (!ecc_cfg)
-+		return -ENOMEM;
-+	snandc->qspi->oob_buf = kzalloc(mtd->writesize + mtd->oobsize,
-+					GFP_KERNEL);
-+	if (!snandc->qspi->oob_buf)
-+		return -ENOMEM;
-+
-+	memset(snandc->qspi->oob_buf, 0xff, mtd->writesize + mtd->oobsize);
-+
-+	nand->ecc.ctx.priv = ecc_cfg;
-+	snandc->qspi->mtd = mtd;
-+
-+	ecc_cfg->ecc_bytes_hw = 7;
-+	ecc_cfg->spare_bytes = 4;
-+	ecc_cfg->bbm_size = 1;
-+	ecc_cfg->bch_enabled = true;
-+	ecc_cfg->bytes = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes + ecc_cfg->bbm_size;
-+
-+	ecc_cfg->steps = 4;
-+	ecc_cfg->strength = 4;
-+	ecc_cfg->step_size = 512;
-+	ecc_cfg->cw_data = 516;
-+	ecc_cfg->cw_size = ecc_cfg->cw_data + ecc_cfg->bytes;
-+	bad_block_byte = mtd->writesize - ecc_cfg->cw_size * (cwperpage - 1) + 1;
-+
-+	mtd_set_ooblayout(mtd, &qcom_spi_ooblayout);
-+
-+	ecc_cfg->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
-+			FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_data) |
-+			FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 1) |
-+			FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) |
-+			FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, ecc_cfg->ecc_bytes_hw) |
-+			FIELD_PREP(STATUS_BFR_READ, 0) |
-+			FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
-+			FIELD_PREP(SPARE_SIZE_BYTES_MASK, ecc_cfg->spare_bytes);
-+
-+	ecc_cfg->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) |
-+			FIELD_PREP(CS_ACTIVE_BSY, 0) |
-+			FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
-+			FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
-+			FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) |
-+			FIELD_PREP(WIDE_FLASH, 0) |
-+			FIELD_PREP(ENABLE_BCH_ECC, ecc_cfg->bch_enabled);
-+
-+	ecc_cfg->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
-+			    FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) |
-+			    FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_size) |
-+			    FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
-+
-+	ecc_cfg->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) |
-+			    FIELD_PREP(CS_ACTIVE_BSY, 0) |
-+			    FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
-+			    FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
-+			    FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) |
-+			    FIELD_PREP(WIDE_FLASH, 0) |
-+			    FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
-+
-+	ecc_cfg->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !ecc_cfg->bch_enabled) |
-+			       FIELD_PREP(ECC_SW_RESET, 0) |
-+			       FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, ecc_cfg->cw_data) |
-+			       FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
-+			       FIELD_PREP(ECC_MODE_MASK, 0) |
-+			       FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, ecc_cfg->ecc_bytes_hw);
-+
-+	ecc_cfg->ecc_buf_cfg = 0x203 << NUM_STEPS;
-+	ecc_cfg->clrflashstatus = FS_READY_BSY_N;
-+	ecc_cfg->clrreadstatus = 0xc0;
-+
-+	conf->step_size = ecc_cfg->step_size;
-+	conf->strength = ecc_cfg->strength;
-+
-+	snandc->regs->erased_cw_detect_cfg_clr = cpu_to_le32(CLR_ERASED_PAGE_DET);
-+	snandc->regs->erased_cw_detect_cfg_set = cpu_to_le32(SET_ERASED_PAGE_DET);
-+
-+	dev_dbg(snandc->dev, "ECC strength: %u bits per %u bytes\n",
-+		ecc_cfg->strength, ecc_cfg->step_size);
-+
-+	return 0;
-+}
-+
-+static void qcom_spi_ecc_cleanup_ctx_pipelined(struct nand_device *nand)
-+{
-+	struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand);
-+
-+	kfree(ecc_cfg);
-+}
-+
-+static int qcom_spi_ecc_prepare_io_req_pipelined(struct nand_device *nand,
-+						 struct nand_page_io_req *req)
-+{
-+	struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
-+	struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand);
-+
-+	snandc->qspi->ecc = ecc_cfg;
-+	snandc->qspi->raw_rw = false;
-+	snandc->qspi->oob_rw = false;
-+	snandc->qspi->page_rw = false;
-+
-+	if (req->datalen)
-+		snandc->qspi->page_rw = true;
-+
-+	if (req->ooblen)
-+		snandc->qspi->oob_rw = true;
-+
-+	if (req->mode == MTD_OPS_RAW)
-+		snandc->qspi->raw_rw = true;
-+
-+	return 0;
-+}
-+
-+static int qcom_spi_ecc_finish_io_req_pipelined(struct nand_device *nand,
-+						struct nand_page_io_req *req)
-+{
-+	struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
-+	struct mtd_info *mtd = nanddev_to_mtd(nand);
-+
-+	if (req->mode == MTD_OPS_RAW || req->type != NAND_PAGE_READ)
-+		return 0;
-+
-+	if (snandc->qspi->ecc_stats.failed)
-+		mtd->ecc_stats.failed += snandc->qspi->ecc_stats.failed;
-+	else
-+		mtd->ecc_stats.corrected += snandc->qspi->ecc_stats.corrected;
-+
-+	if (snandc->qspi->ecc_stats.failed)
-+		return -EBADMSG;
-+	else
-+		return snandc->qspi->ecc_stats.bitflips;
-+}
-+
-+static struct nand_ecc_engine_ops qcom_spi_ecc_engine_ops_pipelined = {
-+	.init_ctx = qcom_spi_ecc_init_ctx_pipelined,
-+	.cleanup_ctx = qcom_spi_ecc_cleanup_ctx_pipelined,
-+	.prepare_io_req = qcom_spi_ecc_prepare_io_req_pipelined,
-+	.finish_io_req = qcom_spi_ecc_finish_io_req_pipelined,
-+};
-+
-+/* helper to configure location register values */
-+static void qcom_spi_set_read_loc(struct qcom_nand_controller *snandc, int cw, int reg,
-+				  int cw_offset, int read_size, int is_last_read_loc)
-+{
-+	int reg_base = NAND_READ_LOCATION_0;
-+	int num_cw = snandc->qspi->num_cw;
-+
-+	if (cw == (num_cw - 1))
-+		reg_base = NAND_READ_LOCATION_LAST_CW_0;
-+
-+	reg_base += reg * 4;
-+
-+	if (cw == (num_cw - 1))
-+		return qcom_spi_set_read_loc_last(snandc, reg_base, cw_offset,
-+						  read_size, is_last_read_loc);
-+	else
-+		return qcom_spi_set_read_loc_first(snandc, reg_base, cw_offset,
-+						   read_size, is_last_read_loc);
-+}
-+
-+static void
-+qcom_spi_config_cw_read(struct qcom_nand_controller *snandc, bool use_ecc, int cw)
-+{
-+	__le32 *reg = &snandc->regs->read_location0;
-+	int num_cw = snandc->qspi->num_cw;
-+
-+	qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
-+	if (cw == (num_cw - 1)) {
-+		reg = &snandc->regs->read_location_last0;
-+		qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4,
-+				   NAND_BAM_NEXT_SGL);
-+	}
-+
-+	qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+	qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+
-+	qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 2, 0);
-+	qcom_read_reg_dma(snandc, NAND_ERASED_CW_DETECT_STATUS, 1,
-+			  NAND_BAM_NEXT_SGL);
-+}
-+
-+static int qcom_spi_block_erase(struct qcom_nand_controller *snandc)
-+{
-+	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
-+	int ret;
-+
-+	snandc->buf_count = 0;
-+	snandc->buf_start = 0;
-+	qcom_clear_read_regs(snandc);
-+	qcom_clear_bam_transaction(snandc);
-+
-+	snandc->regs->cmd = snandc->qspi->cmd;
-+	snandc->regs->addr0 = snandc->qspi->addr1;
-+	snandc->regs->addr1 = snandc->qspi->addr2;
-+	snandc->regs->cfg0 = cpu_to_le32(ecc_cfg->cfg0_raw & ~(7 << CW_PER_PAGE));
-+	snandc->regs->cfg1 = cpu_to_le32(ecc_cfg->cfg1_raw);
-+	snandc->regs->exec = cpu_to_le32(1);
-+
-+	qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
-+	qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
-+	qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+
-+	ret = qcom_submit_descs(snandc);
-+	if (ret) {
-+		dev_err(snandc->dev, "failure to erase block\n");
-+		return ret;
-+	}
-+
-+	return 0;
-+}
-+
-+static void qcom_spi_config_single_cw_page_read(struct qcom_nand_controller *snandc,
-+						bool use_ecc, int cw)
-+{
-+	__le32 *reg = &snandc->regs->read_location0;
-+	int num_cw = snandc->qspi->num_cw;
-+
-+	qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
-+	qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+	qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
-+			   NAND_ERASED_CW_DETECT_CFG, 1, 0);
-+	qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
-+			   NAND_ERASED_CW_DETECT_CFG, 1,
-+			   NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
-+
-+	if (cw == (num_cw - 1)) {
-+		reg = &snandc->regs->read_location_last0;
-+		qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4, NAND_BAM_NEXT_SGL);
-+	}
-+	qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+	qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+
-+	qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, 0);
-+}
-+
-+static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc,
-+				 const struct spi_mem_op *op)
-+{
-+	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
-+	struct mtd_info *mtd = snandc->qspi->mtd;
-+	int size, ret = 0;
-+	int col,  bbpos;
-+	u32 cfg0, cfg1, ecc_bch_cfg;
-+	u32 num_cw = snandc->qspi->num_cw;
-+
-+	qcom_clear_bam_transaction(snandc);
-+	qcom_clear_read_regs(snandc);
-+
-+	size = ecc_cfg->cw_size;
-+	col = ecc_cfg->cw_size * (num_cw - 1);
-+
-+	memset(snandc->data_buffer, 0xff, size);
-+	snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
-+	snandc->regs->addr1 = snandc->qspi->addr2;
-+
-+	cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
-+		0 << CW_PER_PAGE;
-+	cfg1 = ecc_cfg->cfg1_raw;
-+	ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
-+
-+	snandc->regs->cmd = snandc->qspi->cmd;
-+	snandc->regs->cfg0 = cpu_to_le32(cfg0);
-+	snandc->regs->cfg1 = cpu_to_le32(cfg1);
-+	snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
-+	snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
-+	snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
-+	snandc->regs->exec = cpu_to_le32(1);
-+
-+	qcom_spi_set_read_loc(snandc, num_cw - 1, 0, 0, ecc_cfg->cw_size, 1);
-+
-+	qcom_spi_config_single_cw_page_read(snandc, false, num_cw - 1);
-+
-+	qcom_read_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, size, 0);
-+
-+	ret = qcom_submit_descs(snandc);
-+	if (ret) {
-+		dev_err(snandc->dev, "failed to read last cw\n");
-+		return ret;
-+	}
-+
-+	qcom_nandc_dev_to_mem(snandc, true);
-+	u32 flash = le32_to_cpu(snandc->reg_read_buf[0]);
-+
-+	if (flash & (FS_OP_ERR | FS_MPU_ERR))
-+		return -EIO;
-+
-+	bbpos = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
-+
-+	if (snandc->data_buffer[bbpos] == 0xff)
-+		snandc->data_buffer[bbpos + 1] = 0xff;
-+	if (snandc->data_buffer[bbpos] != 0xff)
-+		snandc->data_buffer[bbpos + 1] = snandc->data_buffer[bbpos];
-+
-+	memcpy(op->data.buf.in, snandc->data_buffer + bbpos, op->data.nbytes);
-+
-+	return ret;
-+}
-+
-+static int qcom_spi_check_error(struct qcom_nand_controller *snandc, u8 *data_buf, u8 *oob_buf)
-+{
-+	struct snandc_read_status *buf;
-+	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
-+	int i, num_cw = snandc->qspi->num_cw;
-+	bool flash_op_err = false, erased;
-+	unsigned int max_bitflips = 0;
-+	unsigned int uncorrectable_cws = 0;
-+
-+	snandc->qspi->ecc_stats.failed = 0;
-+	snandc->qspi->ecc_stats.corrected = 0;
-+
-+	qcom_nandc_dev_to_mem(snandc, true);
-+	buf = (struct snandc_read_status *)snandc->reg_read_buf;
-+
-+	for (i = 0; i < num_cw; i++, buf++) {
-+		u32 flash, buffer, erased_cw;
-+		int data_len, oob_len;
-+
-+		if (i == (num_cw - 1)) {
-+			data_len = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
-+			oob_len = num_cw << 2;
-+		} else {
-+			data_len = ecc_cfg->cw_data;
-+			oob_len = 0;
-+		}
-+
-+		flash = le32_to_cpu(buf->snandc_flash);
-+		buffer = le32_to_cpu(buf->snandc_buffer);
-+		erased_cw = le32_to_cpu(buf->snandc_erased_cw);
-+
-+		if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
-+			if (ecc_cfg->bch_enabled)
-+				erased = (erased_cw & ERASED_CW) == ERASED_CW;
-+			else
-+				erased = false;
-+
-+			if (!erased)
-+				uncorrectable_cws |= BIT(i);
-+
-+		} else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
-+			flash_op_err = true;
-+		} else {
-+			unsigned int stat;
-+
-+			stat = buffer & BS_CORRECTABLE_ERR_MSK;
-+			snandc->qspi->ecc_stats.corrected += stat;
-+			max_bitflips = max(max_bitflips, stat);
-+		}
-+
-+		if (data_buf)
-+			data_buf += data_len;
-+		if (oob_buf)
-+			oob_buf += oob_len + ecc_cfg->bytes;
-+	}
-+
-+	if (flash_op_err)
-+		return -EIO;
-+
-+	if (!uncorrectable_cws)
-+		snandc->qspi->ecc_stats.bitflips = max_bitflips;
-+	else
-+		snandc->qspi->ecc_stats.failed++;
-+
-+	return 0;
-+}
-+
-+static int qcom_spi_check_raw_flash_errors(struct qcom_nand_controller *snandc, int cw_cnt)
-+{
-+	int i;
-+
-+	qcom_nandc_dev_to_mem(snandc, true);
-+
-+	for (i = 0; i < cw_cnt; i++) {
-+		u32 flash = le32_to_cpu(snandc->reg_read_buf[i]);
-+
-+		if (flash & (FS_OP_ERR | FS_MPU_ERR))
-+			return -EIO;
-+	}
-+
-+	return 0;
-+}
-+
-+static int qcom_spi_read_cw_raw(struct qcom_nand_controller *snandc, u8 *data_buf,
-+				u8 *oob_buf, int cw)
-+{
-+	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
-+	struct mtd_info *mtd = snandc->qspi->mtd;
-+	int data_size1, data_size2, oob_size1, oob_size2;
-+	int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
-+	int raw_cw = cw;
-+	u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
-+	int col;
-+
-+	snandc->buf_count = 0;
-+	snandc->buf_start = 0;
-+	qcom_clear_read_regs(snandc);
-+	qcom_clear_bam_transaction(snandc);
-+	raw_cw = num_cw - 1;
-+
-+	cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
-+				0 << CW_PER_PAGE;
-+	cfg1 = ecc_cfg->cfg1_raw;
-+	ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
-+
-+	col = ecc_cfg->cw_size * cw;
-+
-+	snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
-+	snandc->regs->addr1 = snandc->qspi->addr2;
-+	snandc->regs->cmd = snandc->qspi->cmd;
-+	snandc->regs->cfg0 = cpu_to_le32(cfg0);
-+	snandc->regs->cfg1 = cpu_to_le32(cfg1);
-+	snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
-+	snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
-+	snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
-+	snandc->regs->exec = cpu_to_le32(1);
-+
-+	qcom_spi_set_read_loc(snandc, raw_cw, 0, 0, ecc_cfg->cw_size, 1);
-+
-+	qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
-+	qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+	qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
-+
-+	qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
-+			   NAND_ERASED_CW_DETECT_CFG, 1, 0);
-+	qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
-+			   NAND_ERASED_CW_DETECT_CFG, 1,
-+			   NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
-+
-+	data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
-+	oob_size1 = ecc_cfg->bbm_size;
-+
-+	if (cw == (num_cw - 1)) {
-+		data_size2 = NANDC_STEP_SIZE - data_size1 -
-+			     ((num_cw - 1) * 4);
-+		oob_size2 = (num_cw * 4) + ecc_cfg->ecc_bytes_hw +
-+			    ecc_cfg->spare_bytes;
-+	} else {
-+		data_size2 = ecc_cfg->cw_data - data_size1;
-+		oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
-+	}
-+
-+	qcom_spi_set_read_loc(snandc, cw, 0, read_loc, data_size1, 0);
-+	read_loc += data_size1;
-+
-+	qcom_spi_set_read_loc(snandc, cw, 1, read_loc, oob_size1, 0);
-+	read_loc += oob_size1;
-+
-+	qcom_spi_set_read_loc(snandc, cw, 2, read_loc, data_size2, 0);
-+	read_loc += data_size2;
-+
-+	qcom_spi_set_read_loc(snandc, cw, 3, read_loc, oob_size2, 1);
-+
-+	qcom_spi_config_cw_read(snandc, false, raw_cw);
-+
-+	qcom_read_data_dma(snandc, reg_off, data_buf, data_size1, 0);
-+	reg_off += data_size1;
-+
-+	qcom_read_data_dma(snandc, reg_off, oob_buf, oob_size1, 0);
-+	reg_off += oob_size1;
-+
-+	qcom_read_data_dma(snandc, reg_off, data_buf + data_size1, data_size2, 0);
-+	reg_off += data_size2;
-+
-+	qcom_read_data_dma(snandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
-+
-+	ret = qcom_submit_descs(snandc);
-+	if (ret) {
-+		dev_err(snandc->dev, "failure to read raw cw %d\n", cw);
-+		return ret;
-+	}
-+
-+	return qcom_spi_check_raw_flash_errors(snandc, 1);
-+}
-+
-+static int qcom_spi_read_page_raw(struct qcom_nand_controller *snandc,
-+				  const struct spi_mem_op *op)
-+{
-+	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
-+	u8 *data_buf = NULL, *oob_buf = NULL;
-+	int ret, cw;
-+	u32 num_cw = snandc->qspi->num_cw;
-+
-+	if (snandc->qspi->page_rw)
-+		data_buf = op->data.buf.in;
-+
-+	oob_buf = snandc->qspi->oob_buf;
-+	memset(oob_buf, 0xff, OOB_BUF_SIZE);
-+
-+	for (cw = 0; cw < num_cw; cw++) {
-+		ret = qcom_spi_read_cw_raw(snandc, data_buf, oob_buf, cw);
-+		if (ret)
-+			return ret;
-+
-+		if (data_buf)
-+			data_buf += ecc_cfg->cw_data;
-+		if (oob_buf)
-+			oob_buf += ecc_cfg->bytes;
-+	}
-+
-+	return 0;
-+}
-+
-+static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc,
-+				  const struct spi_mem_op *op)
-+{
-+	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
-+	u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start;
-+	int ret, i;
-+	u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
-+
-+	data_buf = op->data.buf.in;
-+	data_buf_start = data_buf;
-+
-+	oob_buf = snandc->qspi->oob_buf;
-+	oob_buf_start = oob_buf;
-+
-+	snandc->buf_count = 0;
-+	snandc->buf_start = 0;
-+	qcom_clear_read_regs(snandc);
-+
-+	cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
-+				(num_cw - 1) << CW_PER_PAGE;
-+	cfg1 = ecc_cfg->cfg1;
-+	ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
-+
-+	snandc->regs->addr0 = snandc->qspi->addr1;
-+	snandc->regs->addr1 = snandc->qspi->addr2;
-+	snandc->regs->cmd = snandc->qspi->cmd;
-+	snandc->regs->cfg0 = cpu_to_le32(cfg0);
-+	snandc->regs->cfg1 = cpu_to_le32(cfg1);
-+	snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
-+	snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
-+	snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
-+	snandc->regs->exec = cpu_to_le32(1);
-+
-+	qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
-+
-+	qcom_clear_bam_transaction(snandc);
-+
-+	qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
-+	qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+	qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
-+			   NAND_ERASED_CW_DETECT_CFG, 1, 0);
-+	qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
-+			   NAND_ERASED_CW_DETECT_CFG, 1,
-+			   NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
-+
-+	for (i = 0; i < num_cw; i++) {
-+		int data_size, oob_size;
-+
-+		if (i == (num_cw - 1)) {
-+			data_size = 512 - ((num_cw - 1) << 2);
-+			oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
-+				    ecc_cfg->spare_bytes;
-+		} else {
-+			data_size = ecc_cfg->cw_data;
-+			oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
-+		}
-+
-+		if (data_buf && oob_buf) {
-+			qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 0);
-+			qcom_spi_set_read_loc(snandc, i, 1, data_size, oob_size, 1);
-+		} else if (data_buf) {
-+			qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 1);
-+		} else {
-+			qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1);
-+		}
-+
-+		qcom_spi_config_cw_read(snandc, true, i);
-+
-+		if (data_buf)
-+			qcom_read_data_dma(snandc, FLASH_BUF_ACC, data_buf,
-+					   data_size, 0);
-+		if (oob_buf) {
-+			int j;
-+
-+			for (j = 0; j < ecc_cfg->bbm_size; j++)
-+				*oob_buf++ = 0xff;
-+
-+			qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size,
-+					   oob_buf, oob_size, 0);
-+		}
-+
-+		if (data_buf)
-+			data_buf += data_size;
-+		if (oob_buf)
-+			oob_buf += oob_size;
-+	}
-+
-+	ret = qcom_submit_descs(snandc);
-+	if (ret) {
-+		dev_err(snandc->dev, "failure to read page\n");
-+		return ret;
-+	}
-+
-+	return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start);
-+}
-+
-+static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc,
-+				  const struct spi_mem_op *op)
-+{
-+	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
-+	u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start;
-+	int ret, i;
-+	u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
-+
-+	oob_buf = op->data.buf.in;
-+	oob_buf_start = oob_buf;
-+
-+	data_buf_start = data_buf;
-+
-+	snandc->buf_count = 0;
-+	snandc->buf_start = 0;
-+	qcom_clear_read_regs(snandc);
-+	qcom_clear_bam_transaction(snandc);
-+
-+	cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
-+				(num_cw - 1) << CW_PER_PAGE;
-+	cfg1 = ecc_cfg->cfg1;
-+	ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
-+
-+	snandc->regs->addr0 = snandc->qspi->addr1;
-+	snandc->regs->addr1 = snandc->qspi->addr2;
-+	snandc->regs->cmd = snandc->qspi->cmd;
-+	snandc->regs->cfg0 = cpu_to_le32(cfg0);
-+	snandc->regs->cfg1 = cpu_to_le32(cfg1);
-+	snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
-+	snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
-+	snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
-+	snandc->regs->exec = cpu_to_le32(1);
-+
-+	qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
-+
-+	qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
-+	qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+	qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
-+			   NAND_ERASED_CW_DETECT_CFG, 1, 0);
-+	qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
-+			   NAND_ERASED_CW_DETECT_CFG, 1,
-+			   NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
-+
-+	for (i = 0; i < num_cw; i++) {
-+		int data_size, oob_size;
-+
-+		if (i == (num_cw - 1)) {
-+			data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
-+			oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
-+				    ecc_cfg->spare_bytes;
-+		} else {
-+			data_size = ecc_cfg->cw_data;
-+			oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
-+		}
-+
-+		qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1);
-+
-+		qcom_spi_config_cw_read(snandc, true, i);
-+
-+		if (oob_buf) {
-+			int j;
-+
-+			for (j = 0; j < ecc_cfg->bbm_size; j++)
-+				*oob_buf++ = 0xff;
-+
-+			qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size,
-+					   oob_buf, oob_size, 0);
-+		}
-+
-+		if (oob_buf)
-+			oob_buf += oob_size;
-+	}
-+
-+	ret = qcom_submit_descs(snandc);
-+	if (ret) {
-+		dev_err(snandc->dev, "failure to read oob\n");
-+		return ret;
-+	}
-+
-+	return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start);
-+}
-+
-+static int qcom_spi_read_page(struct qcom_nand_controller *snandc,
-+			      const struct spi_mem_op *op)
-+{
-+	if (snandc->qspi->page_rw && snandc->qspi->raw_rw)
-+		return qcom_spi_read_page_raw(snandc, op);
-+
-+	if (snandc->qspi->page_rw)
-+		return qcom_spi_read_page_ecc(snandc, op);
-+
-+	if (snandc->qspi->oob_rw && snandc->qspi->raw_rw)
-+		return qcom_spi_read_last_cw(snandc, op);
-+
-+	if (snandc->qspi->oob_rw)
-+		return qcom_spi_read_page_oob(snandc, op);
-+
-+	return 0;
-+}
-+
-+static void qcom_spi_config_page_write(struct qcom_nand_controller *snandc)
-+{
-+	qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
-+	qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
-+	qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG,
-+			   1, NAND_BAM_NEXT_SGL);
-+}
-+
-+static void qcom_spi_config_cw_write(struct qcom_nand_controller *snandc)
-+{
-+	qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
-+	qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+	qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-+
-+	qcom_write_reg_dma(snandc, &snandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
-+	qcom_write_reg_dma(snandc, &snandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
-+			   NAND_BAM_NEXT_SGL);
-+}
-+
-+static int qcom_spi_program_raw(struct qcom_nand_controller *snandc,
-+				const struct spi_mem_op *op)
-+{
-+	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
-+	struct mtd_info *mtd = snandc->qspi->mtd;
-+	u8 *data_buf = NULL, *oob_buf = NULL;
-+	int i, ret;
-+	int num_cw = snandc->qspi->num_cw;
-+	u32 cfg0, cfg1, ecc_bch_cfg;
-+
-+	cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
-+			(num_cw - 1) << CW_PER_PAGE;
-+	cfg1 = ecc_cfg->cfg1_raw;
-+	ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
-+
-+	data_buf = snandc->qspi->data_buf;
-+
-+	oob_buf = snandc->qspi->oob_buf;
-+	memset(oob_buf, 0xff, OOB_BUF_SIZE);
-+
-+	snandc->buf_count = 0;
-+	snandc->buf_start = 0;
-+	qcom_clear_read_regs(snandc);
-+	qcom_clear_bam_transaction(snandc);
-+
-+	snandc->regs->addr0 = snandc->qspi->addr1;
-+	snandc->regs->addr1 = snandc->qspi->addr2;
-+	snandc->regs->cmd = snandc->qspi->cmd;
-+	snandc->regs->cfg0 = cpu_to_le32(cfg0);
-+	snandc->regs->cfg1 = cpu_to_le32(cfg1);
-+	snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
-+	snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
-+	snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
-+	snandc->regs->exec = cpu_to_le32(1);
-+
-+	qcom_spi_config_page_write(snandc);
-+
-+	for (i = 0; i < num_cw; i++) {
-+		int data_size1, data_size2, oob_size1, oob_size2;
-+		int reg_off = FLASH_BUF_ACC;
-+
-+		data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
-+		oob_size1 = ecc_cfg->bbm_size;
-+
-+		if (i == (num_cw - 1)) {
-+			data_size2 = NANDC_STEP_SIZE - data_size1 -
-+				     ((num_cw - 1) << 2);
-+			oob_size2 = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
-+				    ecc_cfg->spare_bytes;
-+		} else {
-+			data_size2 = ecc_cfg->cw_data - data_size1;
-+			oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
-+		}
-+
-+		qcom_write_data_dma(snandc, reg_off, data_buf, data_size1,
-+				    NAND_BAM_NO_EOT);
-+		reg_off += data_size1;
-+		data_buf += data_size1;
-+
-+		qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size1,
-+				    NAND_BAM_NO_EOT);
-+		oob_buf += oob_size1;
-+		reg_off += oob_size1;
-+
-+		qcom_write_data_dma(snandc, reg_off, data_buf, data_size2,
-+				    NAND_BAM_NO_EOT);
-+		reg_off += data_size2;
-+		data_buf += data_size2;
-+
-+		qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size2, 0);
-+		oob_buf += oob_size2;
-+
-+		qcom_spi_config_cw_write(snandc);
-+	}
-+
-+	ret = qcom_submit_descs(snandc);
-+	if (ret) {
-+		dev_err(snandc->dev, "failure to write raw page\n");
-+		return ret;
-+	}
-+
-+	return 0;
-+}
-+
-+static int qcom_spi_program_ecc(struct qcom_nand_controller *snandc,
-+				const struct spi_mem_op *op)
-+{
-+	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
-+	u8 *data_buf = NULL, *oob_buf = NULL;
-+	int i, ret;
-+	int num_cw = snandc->qspi->num_cw;
-+	u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
-+
-+	cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
-+				(num_cw - 1) << CW_PER_PAGE;
-+	cfg1 = ecc_cfg->cfg1;
-+	ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
-+	ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
-+
-+	if (snandc->qspi->data_buf)
-+		data_buf = snandc->qspi->data_buf;
-+
-+	oob_buf = snandc->qspi->oob_buf;
-+
-+	snandc->buf_count = 0;
-+	snandc->buf_start = 0;
-+	qcom_clear_read_regs(snandc);
-+	qcom_clear_bam_transaction(snandc);
-+
-+	snandc->regs->addr0 = snandc->qspi->addr1;
-+	snandc->regs->addr1 = snandc->qspi->addr2;
-+	snandc->regs->cmd = snandc->qspi->cmd;
-+	snandc->regs->cfg0 = cpu_to_le32(cfg0);
-+	snandc->regs->cfg1 = cpu_to_le32(cfg1);
-+	snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
-+	snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg);
-+	snandc->regs->exec = cpu_to_le32(1);
-+
-+	qcom_spi_config_page_write(snandc);
-+
-+	for (i = 0; i < num_cw; i++) {
-+		int data_size, oob_size;
-+
-+		if (i == (num_cw - 1)) {
-+			data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
-+			oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
-+				    ecc_cfg->spare_bytes;
-+		} else {
-+			data_size = ecc_cfg->cw_data;
-+			oob_size = ecc_cfg->bytes;
-+		}
-+
-+		if (data_buf)
-+			qcom_write_data_dma(snandc, FLASH_BUF_ACC, data_buf, data_size,
-+					    i == (num_cw - 1) ? NAND_BAM_NO_EOT : 0);
-+
-+		if (i == (num_cw - 1)) {
-+			if (oob_buf) {
-+				oob_buf += ecc_cfg->bbm_size;
-+				qcom_write_data_dma(snandc, FLASH_BUF_ACC + data_size,
-+						    oob_buf, oob_size, 0);
-+			}
-+		}
-+
-+		qcom_spi_config_cw_write(snandc);
-+
-+		if (data_buf)
-+			data_buf += data_size;
-+		if (oob_buf)
-+			oob_buf += oob_size;
-+	}
-+
-+	ret = qcom_submit_descs(snandc);
-+	if (ret) {
-+		dev_err(snandc->dev, "failure to write page\n");
-+		return ret;
-+	}
-+
-+	return 0;
-+}
-+
-+static int qcom_spi_program_oob(struct qcom_nand_controller *snandc,
-+				const struct spi_mem_op *op)
-+{
-+	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
-+	u8 *oob_buf = NULL;
-+	int ret, col, data_size, oob_size;
-+	int num_cw = snandc->qspi->num_cw;
-+	u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
-+
-+	cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
-+				(num_cw - 1) << CW_PER_PAGE;
-+	cfg1 = ecc_cfg->cfg1;
-+	ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
-+	ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
-+
-+	col = ecc_cfg->cw_size * (num_cw - 1);
-+
-+	oob_buf = snandc->qspi->data_buf;
-+
-+	snandc->buf_count = 0;
-+	snandc->buf_start = 0;
-+	qcom_clear_read_regs(snandc);
-+	qcom_clear_bam_transaction(snandc);
-+	snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
-+	snandc->regs->addr1 = snandc->qspi->addr2;
-+	snandc->regs->cmd = snandc->qspi->cmd;
-+	snandc->regs->cfg0 = cpu_to_le32(cfg0);
-+	snandc->regs->cfg1 = cpu_to_le32(cfg1);
-+	snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
-+	snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg);
-+	snandc->regs->exec = cpu_to_le32(1);
-+
-+	/* calculate the data and oob size for the last codeword/step */
-+	data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
-+	oob_size = snandc->qspi->mtd->oobavail;
-+
-+	memset(snandc->data_buffer, 0xff, ecc_cfg->cw_data);
-+	/* override new oob content to last codeword */
-+	mtd_ooblayout_get_databytes(snandc->qspi->mtd, snandc->data_buffer + data_size,
-+				    oob_buf, 0, snandc->qspi->mtd->oobavail);
-+	qcom_spi_config_page_write(snandc);
-+	qcom_write_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, data_size + oob_size, 0);
-+	qcom_spi_config_cw_write(snandc);
-+
-+	ret = qcom_submit_descs(snandc);
-+	if (ret) {
-+		dev_err(snandc->dev, "failure to write oob\n");
-+		return ret;
-+	}
-+
-+	return 0;
-+}
-+
-+static int qcom_spi_program_execute(struct qcom_nand_controller *snandc,
-+				    const struct spi_mem_op *op)
-+{
-+	if (snandc->qspi->page_rw && snandc->qspi->raw_rw)
-+		return qcom_spi_program_raw(snandc, op);
-+
-+	if (snandc->qspi->page_rw)
-+		return qcom_spi_program_ecc(snandc, op);
-+
-+	if (snandc->qspi->oob_rw)
-+		return qcom_spi_program_oob(snandc, op);
-+
-+	return 0;
-+}
-+
-+static int qcom_spi_cmd_mapping(struct qcom_nand_controller *snandc, u32 opcode, u32 *cmd)
-+{
-+	switch (opcode) {
-+	case SPINAND_RESET:
-+		*cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_RESET_DEVICE);
-+		break;
-+	case SPINAND_READID:
-+		*cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_FETCH_ID);
-+		break;
-+	case SPINAND_GET_FEATURE:
-+		*cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE);
-+		break;
-+	case SPINAND_SET_FEATURE:
-+		*cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE |
-+			QPIC_SET_FEATURE);
-+		break;
-+	case SPINAND_READ:
-+		if (snandc->qspi->raw_rw) {
-+			*cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
-+					SPI_WP | SPI_HOLD | OP_PAGE_READ);
-+		} else {
-+			*cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
-+					SPI_WP | SPI_HOLD | OP_PAGE_READ_WITH_ECC);
-+		}
-+
-+		break;
-+	case SPINAND_ERASE:
-+		*cmd = OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE | SPI_WP |
-+			SPI_HOLD | SPI_TRANSFER_MODE_x1;
-+		break;
-+	case SPINAND_WRITE_EN:
-+		*cmd = SPINAND_WRITE_EN;
-+		break;
-+	case SPINAND_PROGRAM_EXECUTE:
-+		*cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
-+				SPI_WP | SPI_HOLD | OP_PROGRAM_PAGE);
-+		break;
-+	case SPINAND_PROGRAM_LOAD:
-+		*cmd = SPINAND_PROGRAM_LOAD;
-+		break;
-+	default:
-+		dev_err(snandc->dev, "Opcode not supported: %u\n", opcode);
-+		return -EOPNOTSUPP;
-+	}
-+
-+	return 0;
-+}
-+
-+static int qcom_spi_write_page(struct qcom_nand_controller *snandc,
-+			       const struct spi_mem_op *op)
-+{
-+	int ret;
-+	u32 cmd;
-+
-+	ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd);
-+	if (ret < 0)
-+		return ret;
-+
-+	if (op->cmd.opcode == SPINAND_PROGRAM_LOAD)
-+		snandc->qspi->data_buf = (u8 *)op->data.buf.out;
-+
-+	return 0;
-+}
-+
-+static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc,
-+				 const struct spi_mem_op *op)
-+{
-+	struct qpic_snand_op s_op = {};
-+	u32 cmd;
-+	int ret, opcode;
-+
-+	ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd);
-+	if (ret < 0)
-+		return ret;
-+
-+	s_op.cmd_reg = cmd;
-+	s_op.addr1_reg = op->addr.val;
-+	s_op.addr2_reg = 0;
-+
-+	opcode = op->cmd.opcode;
-+
-+	switch (opcode) {
-+	case SPINAND_WRITE_EN:
-+		return 0;
-+	case SPINAND_PROGRAM_EXECUTE:
-+		s_op.addr1_reg = op->addr.val << 16;
-+		s_op.addr2_reg = op->addr.val >> 16 & 0xff;
-+		snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg);
-+		snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
-+		snandc->qspi->cmd = cpu_to_le32(cmd);
-+		return qcom_spi_program_execute(snandc, op);
-+	case SPINAND_READ:
-+		s_op.addr1_reg = (op->addr.val << 16);
-+		s_op.addr2_reg = op->addr.val >> 16 & 0xff;
-+		snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg);
-+		snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
-+		snandc->qspi->cmd = cpu_to_le32(cmd);
-+		return 0;
-+	case SPINAND_ERASE:
-+		s_op.addr2_reg = (op->addr.val >> 16) & 0xffff;
-+		s_op.addr1_reg = op->addr.val;
-+		snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg << 16);
-+		snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
-+		snandc->qspi->cmd = cpu_to_le32(cmd);
-+		qcom_spi_block_erase(snandc);
-+		return 0;
-+	default:
-+		break;
-+	}
-+
-+	snandc->buf_count = 0;
-+	snandc->buf_start = 0;
-+	qcom_clear_read_regs(snandc);
-+	qcom_clear_bam_transaction(snandc);
-+
-+	snandc->regs->cmd = cpu_to_le32(s_op.cmd_reg);
-+	snandc->regs->exec = cpu_to_le32(1);
-+	snandc->regs->addr0 = cpu_to_le32(s_op.addr1_reg);
-+	snandc->regs->addr1 = cpu_to_le32(s_op.addr2_reg);
-+
-+	qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
-+	qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-+
-+	ret = qcom_submit_descs(snandc);
-+	if (ret)
-+		dev_err(snandc->dev, "failure in submitting cmd descriptor\n");
-+
-+	return ret;
-+}
-+
-+static int qcom_spi_io_op(struct qcom_nand_controller *snandc, const struct spi_mem_op *op)
-+{
-+	int ret, val, opcode;
-+	bool copy = false, copy_ftr = false;
-+
-+	ret = qcom_spi_send_cmdaddr(snandc, op);
-+	if (ret)
-+		return ret;
-+
-+	snandc->buf_count = 0;
-+	snandc->buf_start = 0;
-+	qcom_clear_read_regs(snandc);
-+	qcom_clear_bam_transaction(snandc);
-+	opcode = op->cmd.opcode;
-+
-+	switch (opcode) {
-+	case SPINAND_READID:
-+		snandc->buf_count = 4;
-+		qcom_read_reg_dma(snandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
-+		copy = true;
-+		break;
-+	case SPINAND_GET_FEATURE:
-+		snandc->buf_count = 4;
-+		qcom_read_reg_dma(snandc, NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL);
-+		copy_ftr = true;
-+		break;
-+	case SPINAND_SET_FEATURE:
-+		snandc->regs->flash_feature = cpu_to_le32(*(u32 *)op->data.buf.out);
-+		qcom_write_reg_dma(snandc, &snandc->regs->flash_feature,
-+				   NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL);
-+		break;
-+	case SPINAND_PROGRAM_EXECUTE:
-+	case SPINAND_WRITE_EN:
-+	case SPINAND_RESET:
-+	case SPINAND_ERASE:
-+	case SPINAND_READ:
-+		return 0;
-+	default:
-+		return -EOPNOTSUPP;
-+	}
-+
-+	ret = qcom_submit_descs(snandc);
-+	if (ret)
-+		dev_err(snandc->dev, "failure in submitting descriptor for:%d\n", opcode);
-+
-+	if (copy) {
-+		qcom_nandc_dev_to_mem(snandc, true);
-+		memcpy(op->data.buf.in, snandc->reg_read_buf, snandc->buf_count);
-+	}
-+
-+	if (copy_ftr) {
-+		qcom_nandc_dev_to_mem(snandc, true);
-+		val = le32_to_cpu(*(__le32 *)snandc->reg_read_buf);
-+		val >>= 8;
-+		memcpy(op->data.buf.in, &val, snandc->buf_count);
-+	}
-+
-+	return ret;
-+}
-+
-+static bool qcom_spi_is_page_op(const struct spi_mem_op *op)
-+{
-+	if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && op->addr.buswidth != 4)
-+		return false;
-+
-+	if (op->data.dir == SPI_MEM_DATA_IN) {
-+		if (op->addr.buswidth == 4 && op->data.buswidth == 4)
-+			return true;
-+
-+		if (op->addr.nbytes == 2 && op->addr.buswidth == 1)
-+			return true;
-+
-+	} else if (op->data.dir == SPI_MEM_DATA_OUT) {
-+		if (op->data.buswidth == 4)
-+			return true;
-+		if (op->addr.nbytes == 2 && op->addr.buswidth == 1)
-+			return true;
-+	}
-+
-+	return false;
-+}
-+
-+static bool qcom_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
-+{
-+	if (!spi_mem_default_supports_op(mem, op))
-+		return false;
-+
-+	if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1)
-+		return false;
-+
-+	if (qcom_spi_is_page_op(op))
-+		return true;
-+
-+	return ((!op->addr.nbytes || op->addr.buswidth == 1) &&
-+		(!op->dummy.nbytes || op->dummy.buswidth == 1) &&
-+		(!op->data.nbytes || op->data.buswidth == 1));
-+}
-+
-+static int qcom_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
-+{
-+	struct qcom_nand_controller *snandc = spi_controller_get_devdata(mem->spi->controller);
-+
-+	dev_dbg(snandc->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
-+		op->addr.val, op->addr.buswidth, op->addr.nbytes,
-+		op->data.buswidth, op->data.nbytes);
-+
-+	if (qcom_spi_is_page_op(op)) {
-+		if (op->data.dir == SPI_MEM_DATA_IN)
-+			return qcom_spi_read_page(snandc, op);
-+		if (op->data.dir == SPI_MEM_DATA_OUT)
-+			return qcom_spi_write_page(snandc, op);
-+	} else {
-+		return qcom_spi_io_op(snandc, op);
-+	}
-+
-+	return 0;
-+}
-+
-+static const struct spi_controller_mem_ops qcom_spi_mem_ops = {
-+	.supports_op = qcom_spi_supports_op,
-+	.exec_op = qcom_spi_exec_op,
-+};
-+
-+static const struct spi_controller_mem_caps qcom_spi_mem_caps = {
-+	.ecc = true,
-+};
-+
-+static int qcom_spi_probe(struct platform_device *pdev)
-+{
-+	struct device *dev = &pdev->dev;
-+	struct spi_controller *ctlr;
-+	struct qcom_nand_controller *snandc;
-+	struct qpic_spi_nand *qspi;
-+	struct qpic_ecc *ecc;
-+	struct resource *res;
-+	const void *dev_data;
-+	int ret;
-+
-+	ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
-+	if (!ecc)
-+		return -ENOMEM;
-+
-+	qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL);
-+	if (!qspi)
-+		return -ENOMEM;
-+
-+	ctlr = __devm_spi_alloc_controller(dev, sizeof(*snandc), false);
-+	if (!ctlr)
-+		return -ENOMEM;
-+
-+	platform_set_drvdata(pdev, ctlr);
-+
-+	snandc = spi_controller_get_devdata(ctlr);
-+	qspi->snandc = snandc;
-+
-+	snandc->dev = dev;
-+	snandc->qspi = qspi;
-+	snandc->qspi->ctlr = ctlr;
-+	snandc->qspi->ecc = ecc;
-+
-+	dev_data = of_device_get_match_data(dev);
-+	if (!dev_data) {
-+		dev_err(&pdev->dev, "failed to get device data\n");
-+		return -ENODEV;
-+	}
-+
-+	snandc->props = dev_data;
-+	snandc->dev = &pdev->dev;
-+
-+	snandc->core_clk = devm_clk_get(dev, "core");
-+	if (IS_ERR(snandc->core_clk))
-+		return PTR_ERR(snandc->core_clk);
-+
-+	snandc->aon_clk = devm_clk_get(dev, "aon");
-+	if (IS_ERR(snandc->aon_clk))
-+		return PTR_ERR(snandc->aon_clk);
-+
-+	snandc->qspi->iomacro_clk = devm_clk_get(dev, "iom");
-+	if (IS_ERR(snandc->qspi->iomacro_clk))
-+		return PTR_ERR(snandc->qspi->iomacro_clk);
-+
-+	snandc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
-+	if (IS_ERR(snandc->base))
-+		return PTR_ERR(snandc->base);
-+
-+	snandc->base_phys = res->start;
-+	snandc->base_dma = dma_map_resource(dev, res->start, resource_size(res),
-+					    DMA_BIDIRECTIONAL, 0);
-+	if (dma_mapping_error(dev, snandc->base_dma))
-+		return -ENXIO;
-+
-+	ret = clk_prepare_enable(snandc->core_clk);
-+	if (ret)
-+		goto err_dis_core_clk;
-+
-+	ret = clk_prepare_enable(snandc->aon_clk);
-+	if (ret)
-+		goto err_dis_aon_clk;
-+
-+	ret = clk_prepare_enable(snandc->qspi->iomacro_clk);
-+	if (ret)
-+		goto err_dis_iom_clk;
-+
-+	ret = qcom_nandc_alloc(snandc);
-+	if (ret)
-+		goto err_snand_alloc;
-+
-+	ret = qcom_spi_init(snandc);
-+	if (ret)
-+		goto err_spi_init;
-+
-+	/* setup ECC engine */
-+	snandc->qspi->ecc_eng.dev = &pdev->dev;
-+	snandc->qspi->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
-+	snandc->qspi->ecc_eng.ops = &qcom_spi_ecc_engine_ops_pipelined;
-+	snandc->qspi->ecc_eng.priv = snandc;
-+
-+	ret = nand_ecc_register_on_host_hw_engine(&snandc->qspi->ecc_eng);
-+	if (ret) {
-+		dev_err(&pdev->dev, "failed to register ecc engine:%d\n", ret);
-+		goto err_spi_init;
-+	}
-+
-+	ctlr->num_chipselect = QPIC_QSPI_NUM_CS;
-+	ctlr->mem_ops = &qcom_spi_mem_ops;
-+	ctlr->mem_caps = &qcom_spi_mem_caps;
-+	ctlr->dev.of_node = pdev->dev.of_node;
-+	ctlr->mode_bits = SPI_TX_DUAL | SPI_RX_DUAL |
-+			    SPI_TX_QUAD | SPI_RX_QUAD;
-+
-+	ret = spi_register_controller(ctlr);
-+	if (ret) {
-+		dev_err(&pdev->dev, "spi_register_controller failed.\n");
-+		goto err_spi_init;
-+	}
-+
-+	return 0;
-+
-+err_spi_init:
-+	qcom_nandc_unalloc(snandc);
-+err_snand_alloc:
-+	clk_disable_unprepare(snandc->qspi->iomacro_clk);
-+err_dis_iom_clk:
-+	clk_disable_unprepare(snandc->aon_clk);
-+err_dis_aon_clk:
-+	clk_disable_unprepare(snandc->core_clk);
-+err_dis_core_clk:
-+	dma_unmap_resource(dev, res->start, resource_size(res),
-+			   DMA_BIDIRECTIONAL, 0);
-+	return ret;
-+}
-+
-+static void qcom_spi_remove(struct platform_device *pdev)
-+{
-+	struct spi_controller *ctlr = platform_get_drvdata(pdev);
-+	struct qcom_nand_controller *snandc = spi_controller_get_devdata(ctlr);
-+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-+
-+	spi_unregister_controller(ctlr);
-+
-+	qcom_nandc_unalloc(snandc);
-+
-+	clk_disable_unprepare(snandc->aon_clk);
-+	clk_disable_unprepare(snandc->core_clk);
-+	clk_disable_unprepare(snandc->qspi->iomacro_clk);
-+
-+	dma_unmap_resource(&pdev->dev, snandc->base_dma, resource_size(res),
-+			   DMA_BIDIRECTIONAL, 0);
-+}
-+
-+static const struct qcom_nandc_props ipq9574_snandc_props = {
-+	.dev_cmd_reg_start = 0x7000,
-+	.supports_bam = true,
-+};
-+
-+static const struct of_device_id qcom_snandc_of_match[] = {
-+	{
-+		.compatible = "qcom,ipq9574-snand",
-+		.data = &ipq9574_snandc_props,
-+	},
-+	{}
-+}
-+MODULE_DEVICE_TABLE(of, qcom_snandc_of_match);
-+
-+static struct platform_driver qcom_spi_driver = {
-+	.driver = {
-+		.name		= "qcom_snand",
-+		.of_match_table = qcom_snandc_of_match,
-+	},
-+	.probe = qcom_spi_probe,
-+	.remove_new = qcom_spi_remove,
-+};
-+module_platform_driver(qcom_spi_driver);
-+
-+MODULE_DESCRIPTION("SPI driver for QPIC QSPI cores");
-+MODULE_AUTHOR("Md Sadre Alam <[email protected]>");
-+MODULE_LICENSE("GPL");
-+
---- a/include/linux/mtd/nand-qpic-common.h
-+++ b/include/linux/mtd/nand-qpic-common.h
-@@ -325,6 +325,10 @@ struct nandc_regs {
- 	__le32 read_location_last1;
- 	__le32 read_location_last2;
- 	__le32 read_location_last3;
-+	__le32 spi_cfg;
-+	__le32 num_addr_cycle;
-+	__le32 busy_wait_cnt;
-+	__le32 flash_feature;
- 
- 	__le32 erased_cw_detect_cfg_clr;
- 	__le32 erased_cw_detect_cfg_set;
-@@ -339,6 +343,7 @@ struct nandc_regs {
-  *
-  * @core_clk:			controller clock
-  * @aon_clk:			another controller clock
-+ * @iomacro_clk:		io macro clock
-  *
-  * @regs:			a contiguous chunk of memory for DMA register
-  *				writes. contains the register values to be
-@@ -348,6 +353,7 @@ struct nandc_regs {
-  *				initialized via DT match data
-  *
-  * @controller:			base controller structure
-+ * @qspi:			qpic spi structure
-  * @host_list:			list containing all the chips attached to the
-  *				controller
-  *
-@@ -392,6 +398,7 @@ struct qcom_nand_controller {
- 	const struct qcom_nandc_props *props;
- 
- 	struct nand_controller *controller;
-+	struct qpic_spi_nand *qspi;
- 	struct list_head host_list;
- 
- 	union {

+ 0 - 28
target/linux/generic/backport-6.6/416-v6.15-02-spi-spi-qpic-snand-Fix-ECC_CFG_ECC_DISABLE-shift-in-.patch

@@ -1,28 +0,0 @@
-From cf1ba3cb245020459f2ca446b7a7b199839f5d83 Mon Sep 17 00:00:00 2001
-From: Dan Carpenter <[email protected]>
-Date: Thu, 6 Mar 2025 12:40:01 +0300
-Subject: [PATCH] spi: spi-qpic-snand: Fix ECC_CFG_ECC_DISABLE shift in
- qcom_spi_read_last_cw()
-
-The ECC_CFG_ECC_DISABLE define is BIT(0).  It's supposed to be used
-directly instead of used as a shifter.
-
-Fixes: 7304d1909080 ("spi: spi-qpic: add driver for QCOM SPI NAND flash Interface")
-Signed-off-by: Dan Carpenter <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Mark Brown <[email protected]>
----
- drivers/spi/spi-qpic-snand.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/spi/spi-qpic-snand.c
-+++ b/drivers/spi/spi-qpic-snand.c
-@@ -514,7 +514,7 @@ static int qcom_spi_read_last_cw(struct
- 	cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
- 		0 << CW_PER_PAGE;
- 	cfg1 = ecc_cfg->cfg1_raw;
--	ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
-+	ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
- 
- 	snandc->regs->cmd = snandc->qspi->cmd;
- 	snandc->regs->cfg0 = cpu_to_le32(cfg0);

+ 0 - 35
target/linux/generic/backport-6.6/416-v6.15-03-spi-spi-qpic-snand-avoid-memleak-in-qcom_spi_ecc_ini.patch

@@ -1,35 +0,0 @@
-From d450cdd9c4398add1f2aa7200f2c95f1e3b9f9fa Mon Sep 17 00:00:00 2001
-From: Gabor Juhos <[email protected]>
-Date: Thu, 13 Mar 2025 19:31:21 +0100
-Subject: [PATCH] spi: spi-qpic-snand: avoid memleak in
- qcom_spi_ecc_init_ctx_pipelined()
-
-When the allocation of the OOB buffer fails, the
-qcom_spi_ecc_init_ctx_pipelined() function returns without freeing
-the memory allocated for 'ecc_cfg' thus it can cause a memory leak.
-
-Call kfree() to free 'ecc_cfg' before returning from the function
-to avoid that.
-
-Fixes: 7304d1909080 ("spi: spi-qpic: add driver for QCOM SPI NAND flash Interface")
-Signed-off-by: Gabor Juhos <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Mark Brown <[email protected]>
----
- drivers/spi/spi-qpic-snand.c | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
-
---- a/drivers/spi/spi-qpic-snand.c
-+++ b/drivers/spi/spi-qpic-snand.c
-@@ -263,8 +263,10 @@ static int qcom_spi_ecc_init_ctx_pipelin
- 		return -ENOMEM;
- 	snandc->qspi->oob_buf = kzalloc(mtd->writesize + mtd->oobsize,
- 					GFP_KERNEL);
--	if (!snandc->qspi->oob_buf)
-+	if (!snandc->qspi->oob_buf) {
-+		kfree(ecc_cfg);
- 		return -ENOMEM;
-+	}
- 
- 	memset(snandc->qspi->oob_buf, 0xff, mtd->writesize + mtd->oobsize);
- 

+ 0 - 49
target/linux/generic/backport-6.6/416-v6.15-04-spi-SPI_QPIC_SNAND-should-be-tristate-and-depend-on-.patch

@@ -1,49 +0,0 @@
-From d32c4e58545f17caaa854415f854691e32d42075 Mon Sep 17 00:00:00 2001
-From: Geert Uytterhoeven <[email protected]>
-Date: Wed, 26 Mar 2025 15:22:19 +0100
-Subject: [PATCH] spi: SPI_QPIC_SNAND should be tristate and depend on MTD
-
-SPI_QPIC_SNAND is the only driver that selects MTD instead of depending
-on it, which could lead to circular dependencies.  Moreover, as
-SPI_QPIC_SNAND is bool, this forces MTD (and various related symbols) to
-be built-in, as can be seen in an allmodconfig kernel.
-
-Except for a missing semicolon, there is no reason why SPI_QPIC_SNAND
-cannot be tristate; all MODULE_*() boilerplate is already present.
-Hence make SPI_QPIC_SNAND tristate, let it depend on MTD, and add the
-missing semicolon.
-
-Fixes: 7304d1909080ef0c ("spi: spi-qpic: add driver for QCOM SPI NAND flash Interface")
-Signed-off-by: Geert Uytterhoeven <[email protected]>
-Link: https://patch.msgid.link/b63db431cbf35223a4400e44c296293d32c4543c.1742998909.git.geert+renesas@glider.be
-Signed-off-by: Mark Brown <[email protected]>
----
- drivers/spi/Kconfig          | 4 ++--
- drivers/spi/spi-qpic-snand.c | 2 +-
- 2 files changed, 3 insertions(+), 3 deletions(-)
-
---- a/drivers/spi/Kconfig
-+++ b/drivers/spi/Kconfig
-@@ -871,9 +871,9 @@ config SPI_QCOM_QSPI
- 	  QSPI(Quad SPI) driver for Qualcomm QSPI controller.
- 
- config SPI_QPIC_SNAND
--	bool "QPIC SNAND controller"
-+	tristate "QPIC SNAND controller"
- 	depends on ARCH_QCOM || COMPILE_TEST
--	select MTD
-+	depends on MTD
- 	help
- 	  QPIC_SNAND (QPIC SPI NAND) driver for Qualcomm QPIC controller.
- 	  QPIC controller supports both parallel nand and serial nand.
---- a/drivers/spi/spi-qpic-snand.c
-+++ b/drivers/spi/spi-qpic-snand.c
-@@ -1614,7 +1614,7 @@ static const struct of_device_id qcom_sn
- 		.data = &ipq9574_snandc_props,
- 	},
- 	{}
--}
-+};
- MODULE_DEVICE_TABLE(of, qcom_snandc_of_match);
- 
- static struct platform_driver qcom_spi_driver = {

+ 0 - 36
target/linux/generic/backport-6.6/416-v6.15-05-spi-spi-qpic-snand-propagate-errors-from-qcom_spi_block_erase.patch

@@ -1,36 +0,0 @@
-From: Gabor Juhos <[email protected]>
-Date: Wed, 23 Apr 2025 21:31:57 +0200
-Subject: [PATCH] spi: spi-qpic-snand: propagate errors from
- qcom_spi_block_erase()
-
-The qcom_spi_block_erase() function returns with error in case of
-failure. Change the qcom_spi_send_cmdaddr() function to propagate
-these errors to the callers instead of returning with success.
-
-Fixes: 7304d1909080 ("spi: spi-qpic: add driver for QCOM SPI NAND flash Interface")
-Signed-off-by: Gabor Juhos <[email protected]>
-Reviewed-by: Abel Vesa <[email protected]>
-Reviewed-by: Md Sadre Alam <[email protected]>
----
- drivers/spi/spi-qpic-snand.c | 3 +--
- 1 file changed, 1 insertion(+), 2 deletions(-)
-
-
----
-base-commit: 9c32cda43eb78f78c73aee4aa344b777714e259b
-change-id: 20250422-qpic-snand-propagate-error-9c95811ab811
-
-Best regards,
-
---- a/drivers/spi/spi-qpic-snand.c
-+++ b/drivers/spi/spi-qpic-snand.c
-@@ -1307,8 +1307,7 @@ static int qcom_spi_send_cmdaddr(struct
- 		snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg << 16);
- 		snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
- 		snandc->qspi->cmd = cpu_to_le32(cmd);
--		qcom_spi_block_erase(snandc);
--		return 0;
-+		return qcom_spi_block_erase(snandc);
- 	default:
- 		break;
- 	}

+ 0 - 35
target/linux/generic/backport-6.6/416-v6.15-06-spi-spi-qpic-snand-fix-NAND_READ_LOCATION_2-register-handling.patch

@@ -1,35 +0,0 @@
-From: Gabor Juhos <[email protected]>
-Date: Mon, 28 Apr 2025 09:30:55 +0200
-Subject: [PATCH] spi: spi-qpic-snand: fix NAND_READ_LOCATION_2 register
- handling
-
-The precomputed value for the NAND_READ_LOCATION_2 register should be
-stored in 'snandc->regs->read_location2'.
-
-Fix the qcom_spi_set_read_loc_first() function accordingly.
-
-Fixes: 7304d1909080 ("spi: spi-qpic: add driver for QCOM SPI NAND flash Interface")
-Signed-off-by: Gabor Juhos <[email protected]>
-Reviewed-by: Md Sadre Alam <[email protected]>
----
- drivers/spi/spi-qpic-snand.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-
----
-base-commit: 15cfe55ec58ace931a73e19e5367598734ceb074
-change-id: 20250428-qpic-snand-readloc2-fix-bccd07cf26d3
-
-Best regards,
-
---- a/drivers/spi/spi-qpic-snand.c
-+++ b/drivers/spi/spi-qpic-snand.c
-@@ -142,7 +142,7 @@ static void qcom_spi_set_read_loc_first(
- 	else if (reg == NAND_READ_LOCATION_1)
- 		snandc->regs->read_location1 = locreg_val;
- 	else if (reg == NAND_READ_LOCATION_2)
--		snandc->regs->read_location1 = locreg_val;
-+		snandc->regs->read_location2 = locreg_val;
- 	else if (reg == NAND_READ_LOCATION_3)
- 		snandc->regs->read_location3 = locreg_val;
- }

+ 0 - 29
target/linux/generic/backport-6.6/416-v6.15-07-spi-spi-qpic-snand-use-kmalloc-for-OOB-buffer-alloca.patch

@@ -1,29 +0,0 @@
-From f48d80503504257682e493dc17408f2f0b47bcfa Mon Sep 17 00:00:00 2001
-From: Gabor Juhos <[email protected]>
-Date: Thu, 20 Mar 2025 19:11:59 +0100
-Subject: [PATCH] spi: spi-qpic-snand: use kmalloc() for OOB buffer allocation
-
-The qcom_spi_ecc_init_ctx_pipelined() function allocates zeroed
-memory for the OOB buffer, then it fills the buffer with '0xff'
-bytes right after the allocation. In this case zeroing the memory
-during allocation is superfluous, so use kmalloc() instead of
-kzalloc() to avoid that.
-
-Signed-off-by: Gabor Juhos <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Mark Brown <[email protected]>
----
- drivers/spi/spi-qpic-snand.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/spi/spi-qpic-snand.c
-+++ b/drivers/spi/spi-qpic-snand.c
-@@ -261,7 +261,7 @@ static int qcom_spi_ecc_init_ctx_pipelin
- 	ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
- 	if (!ecc_cfg)
- 		return -ENOMEM;
--	snandc->qspi->oob_buf = kzalloc(mtd->writesize + mtd->oobsize,
-+	snandc->qspi->oob_buf = kmalloc(mtd->writesize + mtd->oobsize,
- 					GFP_KERNEL);
- 	if (!snandc->qspi->oob_buf) {
- 		kfree(ecc_cfg);

+ 0 - 30
target/linux/generic/backport-6.6/416-v6.16-08-spi-spi-qpic-snand-remove-unused-wlen-member-of-struct-qpic_spi_nand.patch

@@ -1,30 +0,0 @@
-From: Gabor Juhos <[email protected]>
-Date: Thu, 24 Apr 2025 20:10:59 +0200
-Subject: [PATCH] spi: spi-qpic-snand: remove unused 'wlen' member of
- 'struct qpic_spi_nand'
-
-The 'wlen' member of the qpic_spi_nand structure is never used in the
-code so remove that.
-
-Signed-off-by: Gabor Juhos <[email protected]>
----
- drivers/spi/spi-qpic-snand.c | 1 -
- 1 file changed, 1 deletion(-)
-
-
----
-base-commit: 9c32cda43eb78f78c73aee4aa344b777714e259b
-change-id: 20250424-qpic-snand-remove-wlen-c0cef3801a7f
-
-Best regards,
-
---- a/drivers/spi/spi-qpic-snand.c
-+++ b/drivers/spi/spi-qpic-snand.c
-@@ -116,7 +116,6 @@ struct qpic_spi_nand {
- 	struct nand_ecc_engine ecc_eng;
- 	u8 *data_buf;
- 	u8 *oob_buf;
--	u32 wlen;
- 	__le32 addr1;
- 	__le32 addr2;
- 	__le32 cmd;

+ 0 - 31
target/linux/generic/backport-6.6/417-v6.12-mtd-spinand-set-bitflip_threshold-to-75-of-ECC-stren.patch

@@ -1,31 +0,0 @@
-From 1824520e7477bedf76bd08c32261c755e6405cd9 Mon Sep 17 00:00:00 2001
-From: Daniel Golle <[email protected]>
-Date: Mon, 12 Aug 2024 02:56:41 +0100
-Subject: [PATCH] mtd: spinand: set bitflip_threshold to 75% of ECC strength
-
-Reporting an unclean read from SPI-NAND only when the maximum number
-of correctable bitflip errors has been hit seems a bit late.
-UBI LEB scrubbing, which depends on the lower MTD device reporting
-correctable bitflips, then only kicks in when it's almost too late.
-
-Set bitflip_threshold to 75% of the ECC strength, which is also the
-default for raw NAND.
-
-Signed-off-by: Daniel Golle <[email protected]>
-Reviewed-by: Frieder Schrempf <[email protected]>
-Signed-off-by: Miquel Raynal <[email protected]>
-Link: https://lore.kernel.org/linux-mtd/2117e387260b0a96f95b8e1652ff79e0e2d71d53.1723427450.git.daniel@makrotopia.org
----
- drivers/mtd/nand/spi/core.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/drivers/mtd/nand/spi/core.c
-+++ b/drivers/mtd/nand/spi/core.c
-@@ -1290,6 +1290,7 @@ static int spinand_init(struct spinand_d
- 	/* Propagate ECC information to mtd_info */
- 	mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
- 	mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
-+	mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
- 
- 	ret = spinand_create_dirmaps(spinand);
- 	if (ret) {

+ 0 - 65
target/linux/generic/backport-6.6/418-v6.12-mtd-spinand-winbond-add-support-for-W25N01KV.patch

@@ -1,65 +0,0 @@
-From e2a9fcb36e851adb5b25c4acea53a290fd48a636 Mon Sep 17 00:00:00 2001
-From: Robert Marko <[email protected]>
-Date: Mon, 5 Aug 2024 19:51:02 +0200
-Subject: [PATCH] mtd: spinand: winbond: add support for W25N01KV
-
-Add support for Winbond W25N01KV 1Gbit SPI-NAND.
-
-It has 4-bit on-die ECC.
-
-Signed-off-by: Robert Marko <[email protected]>
-Signed-off-by: Miquel Raynal <[email protected]>
-Link: https://lore.kernel.org/linux-mtd/[email protected]
----
- drivers/mtd/nand/spi/winbond.c | 26 ++++++++++++++++++++++++++
- 1 file changed, 26 insertions(+)
-
---- a/drivers/mtd/nand/spi/winbond.c
-+++ b/drivers/mtd/nand/spi/winbond.c
-@@ -74,6 +74,18 @@ static int w25m02gv_select_target(struct
- 	return spi_mem_exec_op(spinand->spimem, &op);
- }
- 
-+static int w25n01kv_ooblayout_ecc(struct mtd_info *mtd, int section,
-+				  struct mtd_oob_region *region)
-+{
-+	if (section > 3)
-+		return -ERANGE;
-+
-+	region->offset = 64 + (8 * section);
-+	region->length = 7;
-+
-+	return 0;
-+}
-+
- static int w25n02kv_ooblayout_ecc(struct mtd_info *mtd, int section,
- 				  struct mtd_oob_region *region)
- {
-@@ -98,6 +110,11 @@ static int w25n02kv_ooblayout_free(struc
- 	return 0;
- }
- 
-+static const struct mtd_ooblayout_ops w25n01kv_ooblayout = {
-+	.ecc = w25n01kv_ooblayout_ecc,
-+	.free = w25n02kv_ooblayout_free,
-+};
-+
- static const struct mtd_ooblayout_ops w25n02kv_ooblayout = {
- 	.ecc = w25n02kv_ooblayout_ecc,
- 	.free = w25n02kv_ooblayout_free,
-@@ -160,6 +177,15 @@ static const struct spinand_info winbond
- 					      &update_cache_variants),
- 		     0,
- 		     SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
-+	SPINAND_INFO("W25N01KV",
-+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae, 0x21),
-+		     NAND_MEMORG(1, 2048, 96, 64, 1024, 20, 1, 1, 1),
-+		     NAND_ECCREQ(4, 512),
-+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
-+					      &write_cache_variants,
-+					      &update_cache_variants),
-+		     0,
-+		     SPINAND_ECCINFO(&w25n01kv_ooblayout, w25n02kv_ecc_get_status)),
- 	SPINAND_INFO("W25N02KV",
- 		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x22),
- 		     NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),

+ 0 - 124
target/linux/generic/backport-6.6/419-v6.17-mtd-spinand-add-support-for-FudanMicro-FM25S01A.patch

@@ -1,124 +0,0 @@
-From 5f284dc15ca8695d0394414045ac64616a3b0e69 Mon Sep 17 00:00:00 2001
-From: Tianling Shen <[email protected]>
-Date: Mon, 25 Aug 2025 01:00:13 +0800
-Subject: [PATCH] mtd: spinand: add support for FudanMicro FM25S01A
-
-Add support for FudanMicro FM25S01A SPI NAND.
-Datasheet: http://eng.fmsh.com/nvm/FM25S01A_ds_eng.pdf
-
-Signed-off-by: Tianling Shen <[email protected]>
-Signed-off-by: Miquel Raynal <[email protected]>
-Link: https://lore.kernel.org/linux-mtd/[email protected]
----
- drivers/mtd/nand/spi/Makefile |  2 +-
- drivers/mtd/nand/spi/core.c   |  1 +
- drivers/mtd/nand/spi/fmsh.c   | 74 +++++++++++++++++++++++++++++++++++
- include/linux/mtd/spinand.h   |  1 +
- 4 files changed, 77 insertions(+), 1 deletion(-)
- create mode 100644 drivers/mtd/nand/spi/fmsh.c
-
---- a/drivers/mtd/nand/spi/Makefile
-+++ b/drivers/mtd/nand/spi/Makefile
-@@ -1,4 +1,4 @@
- # SPDX-License-Identifier: GPL-2.0
--spinand-objs := core.o alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o
-+spinand-objs := core.o alliancememory.o ato.o esmt.o fmsh.o foresee.o gigadevice.o macronix.o
- spinand-objs += micron.o paragon.o toshiba.o winbond.o xtx.o
- obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
---- a/drivers/mtd/nand/spi/core.c
-+++ b/drivers/mtd/nand/spi/core.c
-@@ -943,6 +943,7 @@ static const struct spinand_manufacturer
- 	&alliancememory_spinand_manufacturer,
- 	&ato_spinand_manufacturer,
- 	&esmt_c8_spinand_manufacturer,
-+	&fmsh_spinand_manufacturer,
- 	&foresee_spinand_manufacturer,
- 	&gigadevice_spinand_manufacturer,
- 	&macronix_spinand_manufacturer,
---- /dev/null
-+++ b/drivers/mtd/nand/spi/fmsh.c
-@@ -0,0 +1,74 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (c) 2020-2021 Rockchip Electronics Co., Ltd.
-+ *
-+ * Author: Dingqiang Lin <[email protected]>
-+ */
-+
-+#include <linux/device.h>
-+#include <linux/kernel.h>
-+#include <linux/mtd/spinand.h>
-+
-+#define SPINAND_MFR_FMSH		0xA1
-+
-+static SPINAND_OP_VARIANTS(read_cache_variants,
-+		SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
-+		SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
-+		SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
-+		SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
-+		SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
-+		SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
-+
-+static SPINAND_OP_VARIANTS(write_cache_variants,
-+		SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
-+		SPINAND_PROG_LOAD(true, 0, NULL, 0));
-+
-+static SPINAND_OP_VARIANTS(update_cache_variants,
-+		SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
-+		SPINAND_PROG_LOAD(false, 0, NULL, 0));
-+
-+static int fm25s01a_ooblayout_ecc(struct mtd_info *mtd, int section,
-+				  struct mtd_oob_region *region)
-+{
-+	return -ERANGE;
-+}
-+
-+static int fm25s01a_ooblayout_free(struct mtd_info *mtd, int section,
-+				   struct mtd_oob_region *region)
-+{
-+	if (section)
-+		return -ERANGE;
-+
-+	region->offset = 2;
-+	region->length = 62;
-+
-+	return 0;
-+}
-+
-+static const struct mtd_ooblayout_ops fm25s01a_ooblayout = {
-+	.ecc = fm25s01a_ooblayout_ecc,
-+	.free = fm25s01a_ooblayout_free,
-+};
-+
-+static const struct spinand_info fmsh_spinand_table[] = {
-+	SPINAND_INFO("FM25S01A",
-+		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xE4),
-+		     NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
-+		     NAND_ECCREQ(1, 512),
-+		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
-+					      &write_cache_variants,
-+					      &update_cache_variants),
-+		     SPINAND_HAS_QE_BIT,
-+		     SPINAND_ECCINFO(&fm25s01a_ooblayout, NULL)),
-+};
-+
-+static const struct spinand_manufacturer_ops fmsh_spinand_manuf_ops = {
-+};
-+
-+const struct spinand_manufacturer fmsh_spinand_manufacturer = {
-+	.id = SPINAND_MFR_FMSH,
-+	.name = "Fudan Micro",
-+	.chips = fmsh_spinand_table,
-+	.nchips = ARRAY_SIZE(fmsh_spinand_table),
-+	.ops = &fmsh_spinand_manuf_ops,
-+};
---- a/include/linux/mtd/spinand.h
-+++ b/include/linux/mtd/spinand.h
-@@ -263,6 +263,7 @@ struct spinand_manufacturer {
- extern const struct spinand_manufacturer alliancememory_spinand_manufacturer;
- extern const struct spinand_manufacturer ato_spinand_manufacturer;
- extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer;
-+extern const struct spinand_manufacturer fmsh_spinand_manufacturer;
- extern const struct spinand_manufacturer foresee_spinand_manufacturer;
- extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
- extern const struct spinand_manufacturer macronix_spinand_manufacturer;

+ 0 - 75
target/linux/generic/backport-6.6/600-v6.10-net-Remove-conditional-threaded-NAPI-wakeup-based-on.patch

@@ -1,75 +0,0 @@
-From 56364c910691f6d10ba88c964c9041b9ab777bd6 Mon Sep 17 00:00:00 2001
-From: Sebastian Andrzej Siewior <[email protected]>
-Date: Mon, 25 Mar 2024 08:40:28 +0100
-Subject: [PATCH 1/4] net: Remove conditional threaded-NAPI wakeup based on
- task state.
-
-A NAPI thread is scheduled by first setting NAPI_STATE_SCHED bit. If
-successful (the bit was not yet set) then the NAPI_STATE_SCHED_THREADED
-is set but only if thread's state is not TASK_INTERRUPTIBLE (is
-TASK_RUNNING) followed by task wakeup.
-
-If the task is idle (TASK_INTERRUPTIBLE) then the
-NAPI_STATE_SCHED_THREADED bit is not set. The thread is no relying on
-the bit but always leaving the wait-loop after returning from schedule()
-because there must have been a wakeup.
-
-The smpboot-threads implementation for per-CPU threads requires an
-explicit condition and does not support "if we get out of schedule()
-then there must be something to do".
-
-Removing this optimisation simplifies the following integration.
-
-Set NAPI_STATE_SCHED_THREADED unconditionally on wakeup and rely on it
-in the wait path by removing the `woken' condition.
-
-Acked-by: Jakub Kicinski <[email protected]>
-Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
-Signed-off-by: Paolo Abeni <[email protected]>
----
- net/core/dev.c | 14 ++------------
- 1 file changed, 2 insertions(+), 12 deletions(-)
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -4526,13 +4526,7 @@ static inline void ____napi_schedule(str
- 		 */
- 		thread = READ_ONCE(napi->thread);
- 		if (thread) {
--			/* Avoid doing set_bit() if the thread is in
--			 * INTERRUPTIBLE state, cause napi_thread_wait()
--			 * makes sure to proceed with napi polling
--			 * if the thread is explicitly woken from here.
--			 */
--			if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
--				set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
-+			set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
- 			wake_up_process(thread);
- 			return;
- 		}
-@@ -6688,8 +6682,6 @@ static int napi_poll(struct napi_struct
- 
- static int napi_thread_wait(struct napi_struct *napi)
- {
--	bool woken = false;
--
- 	set_current_state(TASK_INTERRUPTIBLE);
- 
- 	while (!kthread_should_stop()) {
-@@ -6698,15 +6690,13 @@ static int napi_thread_wait(struct napi_
- 		 * Testing SCHED bit is not enough because SCHED bit might be
- 		 * set by some other busy poll thread or by napi_disable().
- 		 */
--		if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
-+		if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) {
- 			WARN_ON(!list_empty(&napi->poll_list));
- 			__set_current_state(TASK_RUNNING);
- 			return 0;
- 		}
- 
- 		schedule();
--		/* woken being true indicates this thread owns this napi. */
--		woken = true;
- 		set_current_state(TASK_INTERRUPTIBLE);
- 	}
- 	__set_current_state(TASK_RUNNING);

+ 0 - 330
target/linux/generic/backport-6.6/601-v6.10-net-Allow-to-use-SMP-threads-for-backlog-NAPI.patch

@@ -1,330 +0,0 @@
-From dad6b97702639fba27a2bd3e986982ad6f0db3a7 Mon Sep 17 00:00:00 2001
-From: Sebastian Andrzej Siewior <[email protected]>
-Date: Mon, 25 Mar 2024 08:40:29 +0100
-Subject: [PATCH 2/4] net: Allow to use SMP threads for backlog NAPI.
-
-Backlog NAPI is a per-CPU NAPI struct only (with no device behind it)
-used by drivers which don't do NAPI them self, RPS and parts of the
-stack which need to avoid recursive deadlocks while processing a packet.
-
-The non-NAPI driver use the CPU local backlog NAPI. If RPS is enabled
-then a flow for the skb is computed and based on the flow the skb can be
-enqueued on a remote CPU. Scheduling/ raising the softirq (for backlog's
-NAPI) on the remote CPU isn't trivial because the softirq is only
-scheduled on the local CPU and performed after the hardirq is done.
-In order to schedule a softirq on the remote CPU, an IPI is sent to the
-remote CPU which schedules the backlog-NAPI on the then local CPU.
-
-On PREEMPT_RT interrupts are force-threaded. The soft interrupts are
-raised within the interrupt thread and processed after the interrupt
-handler completed still within the context of the interrupt thread. The
-softirq is handled in the context where it originated.
-
-With force-threaded interrupts enabled, ksoftirqd is woken up if a
-softirq is raised from hardirq context. This is the case if it is raised
-from an IPI. Additionally there is a warning on PREEMPT_RT if the
-softirq is raised from the idle thread.
-This was done for two reasons:
-- With threaded interrupts the processing should happen in thread
-  context (where it originated) and ksoftirqd is the only thread for
-  this context if raised from hardirq. Using the currently running task
-  instead would "punish" a random task.
-- Once ksoftirqd is active it consumes all further softirqs until it
-  stops running. This changed recently and is no longer the case.
-
-Instead of keeping the backlog NAPI in ksoftirqd (in force-threaded/
-PREEMPT_RT setups) I am proposing NAPI-threads for backlog.
-The "proper" setup with threaded-NAPI is not doable because the threads
-are not pinned to an individual CPU and can be modified by the user.
-Additionally a dummy network device would have to be assigned. Also
-CPU-hotplug has to be considered if additional CPUs show up.
-All this can be probably done/ solved but the smpboot-threads already
-provide this infrastructure.
-
-Sending UDP packets over loopback expects that the packet is processed
-within the call. Delaying it by handing it over to the thread hurts
-performance. It is not beneficial to the outcome if the context switch
-happens immediately after enqueue or after a while to process a few
-packets in a batch.
-There is no need to always use the thread if the backlog NAPI is
-requested on the local CPU. This restores the loopback throuput. The
-performance drops mostly to the same value after enabling RPS on the
-loopback comparing the IPI and the tread result.
-
-Create NAPI-threads for backlog if request during boot. The thread runs
-the inner loop from napi_threaded_poll(), the wait part is different. It
-checks for NAPI_STATE_SCHED (the backlog NAPI can not be disabled).
-
-The NAPI threads for backlog are optional, it has to be enabled via the boot
-argument "thread_backlog_napi". It is mandatory for PREEMPT_RT to avoid the
-wakeup of ksoftirqd from the IPI.
-
-Acked-by: Jakub Kicinski <[email protected]>
-Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
-Signed-off-by: Paolo Abeni <[email protected]>
----
- net/core/dev.c | 148 +++++++++++++++++++++++++++++++++++++------------
- 1 file changed, 113 insertions(+), 35 deletions(-)
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -78,6 +78,7 @@
- #include <linux/slab.h>
- #include <linux/sched.h>
- #include <linux/sched/mm.h>
-+#include <linux/smpboot.h>
- #include <linux/mutex.h>
- #include <linux/rwsem.h>
- #include <linux/string.h>
-@@ -217,6 +218,31 @@ static inline struct hlist_head *dev_ind
- 	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
- }
- 
-+#ifndef CONFIG_PREEMPT_RT
-+
-+static DEFINE_STATIC_KEY_FALSE(use_backlog_threads_key);
-+
-+static int __init setup_backlog_napi_threads(char *arg)
-+{
-+	static_branch_enable(&use_backlog_threads_key);
-+	return 0;
-+}
-+early_param("thread_backlog_napi", setup_backlog_napi_threads);
-+
-+static bool use_backlog_threads(void)
-+{
-+	return static_branch_unlikely(&use_backlog_threads_key);
-+}
-+
-+#else
-+
-+static bool use_backlog_threads(void)
-+{
-+	return true;
-+}
-+
-+#endif
-+
- static inline void rps_lock_irqsave(struct softnet_data *sd,
- 				    unsigned long *flags)
- {
-@@ -4494,6 +4520,7 @@ EXPORT_SYMBOL(__dev_direct_xmit);
- /*************************************************************************
-  *			Receiver routines
-  *************************************************************************/
-+static DEFINE_PER_CPU(struct task_struct *, backlog_napi);
- 
- int netdev_max_backlog __read_mostly = 1000;
- EXPORT_SYMBOL(netdev_max_backlog);
-@@ -4526,12 +4553,16 @@ static inline void ____napi_schedule(str
- 		 */
- 		thread = READ_ONCE(napi->thread);
- 		if (thread) {
-+			if (use_backlog_threads() && thread == raw_cpu_read(backlog_napi))
-+				goto use_local_napi;
-+
- 			set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
- 			wake_up_process(thread);
- 			return;
- 		}
- 	}
- 
-+use_local_napi:
- 	list_add_tail(&napi->poll_list, &sd->poll_list);
- 	WRITE_ONCE(napi->list_owner, smp_processor_id());
- 	/* If not called from net_rx_action()
-@@ -4777,6 +4808,11 @@ static void napi_schedule_rps(struct sof
- 
- #ifdef CONFIG_RPS
- 	if (sd != mysd) {
-+		if (use_backlog_threads()) {
-+			__napi_schedule_irqoff(&sd->backlog);
-+			return;
-+		}
-+
- 		sd->rps_ipi_next = mysd->rps_ipi_list;
- 		mysd->rps_ipi_list = sd;
- 
-@@ -6000,7 +6036,7 @@ static void net_rps_action_and_irq_enabl
- #ifdef CONFIG_RPS
- 	struct softnet_data *remsd = sd->rps_ipi_list;
- 
--	if (remsd) {
-+	if (!use_backlog_threads() && remsd) {
- 		sd->rps_ipi_list = NULL;
- 
- 		local_irq_enable();
-@@ -6015,7 +6051,7 @@ static void net_rps_action_and_irq_enabl
- static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
- {
- #ifdef CONFIG_RPS
--	return sd->rps_ipi_list != NULL;
-+	return !use_backlog_threads() && sd->rps_ipi_list;
- #else
- 	return false;
- #endif
-@@ -6059,7 +6095,7 @@ static int process_backlog(struct napi_s
- 			 * We can use a plain write instead of clear_bit(),
- 			 * and we dont need an smp_mb() memory barrier.
- 			 */
--			napi->state = 0;
-+			napi->state &= NAPIF_STATE_THREADED;
- 			again = false;
- 		} else {
- 			skb_queue_splice_tail_init(&sd->input_pkt_queue,
-@@ -6725,43 +6761,48 @@ static void skb_defer_free_flush(struct
- 	}
- }
- 
--static int napi_threaded_poll(void *data)
-+static void napi_threaded_poll_loop(struct napi_struct *napi)
- {
--	struct napi_struct *napi = data;
- 	struct softnet_data *sd;
--	void *have;
-+	unsigned long last_qs = jiffies;
- 
--	while (!napi_thread_wait(napi)) {
--		unsigned long last_qs = jiffies;
-+	for (;;) {
-+		bool repoll = false;
-+		void *have;
- 
--		for (;;) {
--			bool repoll = false;
-+		local_bh_disable();
-+		sd = this_cpu_ptr(&softnet_data);
-+		sd->in_napi_threaded_poll = true;
- 
--			local_bh_disable();
--			sd = this_cpu_ptr(&softnet_data);
--			sd->in_napi_threaded_poll = true;
--
--			have = netpoll_poll_lock(napi);
--			__napi_poll(napi, &repoll);
--			netpoll_poll_unlock(have);
--
--			sd->in_napi_threaded_poll = false;
--			barrier();
--
--			if (sd_has_rps_ipi_waiting(sd)) {
--				local_irq_disable();
--				net_rps_action_and_irq_enable(sd);
--			}
--			skb_defer_free_flush(sd);
--			local_bh_enable();
-+		have = netpoll_poll_lock(napi);
-+		__napi_poll(napi, &repoll);
-+		netpoll_poll_unlock(have);
-+
-+		sd->in_napi_threaded_poll = false;
-+		barrier();
-+
-+		if (sd_has_rps_ipi_waiting(sd)) {
-+			local_irq_disable();
-+			net_rps_action_and_irq_enable(sd);
-+		}
-+		skb_defer_free_flush(sd);
-+		local_bh_enable();
- 
--			if (!repoll)
--				break;
-+		if (!repoll)
-+			break;
- 
--			rcu_softirq_qs_periodic(last_qs);
--			cond_resched();
--		}
-+		rcu_softirq_qs_periodic(last_qs);
-+		cond_resched();
- 	}
-+}
-+
-+static int napi_threaded_poll(void *data)
-+{
-+	struct napi_struct *napi = data;
-+
-+	while (!napi_thread_wait(napi))
-+		napi_threaded_poll_loop(napi);
-+
- 	return 0;
- }
- 
-@@ -11346,7 +11387,7 @@ static int dev_cpu_dead(unsigned int old
- 
- 		list_del_init(&napi->poll_list);
- 		if (napi->poll == process_backlog)
--			napi->state = 0;
-+			napi->state &= NAPIF_STATE_THREADED;
- 		else
- 			____napi_schedule(sd, napi);
- 	}
-@@ -11354,12 +11395,14 @@ static int dev_cpu_dead(unsigned int old
- 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
- 	local_irq_enable();
- 
-+	if (!use_backlog_threads()) {
- #ifdef CONFIG_RPS
--	remsd = oldsd->rps_ipi_list;
--	oldsd->rps_ipi_list = NULL;
-+		remsd = oldsd->rps_ipi_list;
-+		oldsd->rps_ipi_list = NULL;
- #endif
--	/* send out pending IPI's on offline CPU */
--	net_rps_send_ipi(remsd);
-+		/* send out pending IPI's on offline CPU */
-+		net_rps_send_ipi(remsd);
-+	}
- 
- 	/* Process offline CPU's input_pkt_queue */
- 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
-@@ -11622,6 +11665,38 @@ static struct pernet_operations __net_in
-  *
-  */
- 
-+static int backlog_napi_should_run(unsigned int cpu)
-+{
-+	struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
-+	struct napi_struct *napi = &sd->backlog;
-+
-+	return test_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
-+}
-+
-+static void run_backlog_napi(unsigned int cpu)
-+{
-+	struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
-+
-+	napi_threaded_poll_loop(&sd->backlog);
-+}
-+
-+static void backlog_napi_setup(unsigned int cpu)
-+{
-+	struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
-+	struct napi_struct *napi = &sd->backlog;
-+
-+	napi->thread = this_cpu_read(backlog_napi);
-+	set_bit(NAPI_STATE_THREADED, &napi->state);
-+}
-+
-+static struct smp_hotplug_thread backlog_threads = {
-+	.store			= &backlog_napi,
-+	.thread_should_run	= backlog_napi_should_run,
-+	.thread_fn		= run_backlog_napi,
-+	.thread_comm		= "backlog_napi/%u",
-+	.setup			= backlog_napi_setup,
-+};
-+
- /*
-  *       This is called single threaded during boot, so no need
-  *       to take the rtnl semaphore.
-@@ -11672,7 +11747,10 @@ static int __init net_dev_init(void)
- 		init_gro_hash(&sd->backlog);
- 		sd->backlog.poll = process_backlog;
- 		sd->backlog.weight = weight_p;
-+		INIT_LIST_HEAD(&sd->backlog.poll_list);
- 	}
-+	if (use_backlog_threads())
-+		smpboot_register_percpu_thread(&backlog_threads);
- 
- 	dev_boot_phase = 0;
- 

+ 0 - 121
target/linux/generic/backport-6.6/602-v6.10-net-Use-backlog-NAPI-to-clean-up-the-defer_list.patch

@@ -1,121 +0,0 @@
-From 80d2eefcb4c84aa9018b2a997ab3a4c567bc821a Mon Sep 17 00:00:00 2001
-From: Sebastian Andrzej Siewior <[email protected]>
-Date: Mon, 25 Mar 2024 08:40:30 +0100
-Subject: [PATCH 3/4] net: Use backlog-NAPI to clean up the defer_list.
-
-The defer_list is a per-CPU list which is used to free skbs outside of
-the socket lock and on the CPU on which they have been allocated.
-The list is processed during NAPI callbacks so ideally the list is
-cleaned up.
-Should the amount of skbs on the list exceed a certain water mark then
-the softirq is triggered remotely on the target CPU by invoking a remote
-function call. The raise of the softirqs via a remote function call
-leads to waking the ksoftirqd on PREEMPT_RT which is undesired.
-The backlog-NAPI threads already provide the infrastructure which can be
-utilized to perform the cleanup of the defer_list.
-
-The NAPI state is updated with the input_pkt_queue.lock acquired. It
-order not to break the state, it is needed to also wake the backlog-NAPI
-thread with the lock held. This requires to acquire the use the lock in
-rps_lock_irq*() if the backlog-NAPI threads are used even with RPS
-disabled.
-
-Move the logic of remotely starting softirqs to clean up the defer_list
-into kick_defer_list_purge(). Make sure a lock is held in
-rps_lock_irq*() if backlog-NAPI threads are used. Schedule backlog-NAPI
-for defer_list cleanup if backlog-NAPI is available.
-
-Acked-by: Jakub Kicinski <[email protected]>
-Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
-Signed-off-by: Paolo Abeni <[email protected]>
----
- include/linux/netdevice.h |  1 +
- net/core/dev.c            | 25 +++++++++++++++++++++----
- net/core/skbuff.c         |  4 ++--
- 3 files changed, 24 insertions(+), 6 deletions(-)
-
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -3309,6 +3309,7 @@ static inline void dev_xmit_recursion_de
- 	__this_cpu_dec(softnet_data.xmit.recursion);
- }
- 
-+void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
- void __netif_schedule(struct Qdisc *q);
- void netif_schedule_queue(struct netdev_queue *txq);
- 
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -246,7 +246,7 @@ static bool use_backlog_threads(void)
- static inline void rps_lock_irqsave(struct softnet_data *sd,
- 				    unsigned long *flags)
- {
--	if (IS_ENABLED(CONFIG_RPS))
-+	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- 		spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
- 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
- 		local_irq_save(*flags);
-@@ -254,7 +254,7 @@ static inline void rps_lock_irqsave(stru
- 
- static inline void rps_lock_irq_disable(struct softnet_data *sd)
- {
--	if (IS_ENABLED(CONFIG_RPS))
-+	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- 		spin_lock_irq(&sd->input_pkt_queue.lock);
- 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
- 		local_irq_disable();
-@@ -263,7 +263,7 @@ static inline void rps_lock_irq_disable(
- static inline void rps_unlock_irq_restore(struct softnet_data *sd,
- 					  unsigned long *flags)
- {
--	if (IS_ENABLED(CONFIG_RPS))
-+	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- 		spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
- 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
- 		local_irq_restore(*flags);
-@@ -271,7 +271,7 @@ static inline void rps_unlock_irq_restor
- 
- static inline void rps_unlock_irq_enable(struct softnet_data *sd)
- {
--	if (IS_ENABLED(CONFIG_RPS))
-+	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- 		spin_unlock_irq(&sd->input_pkt_queue.lock);
- 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
- 		local_irq_enable();
-@@ -4827,6 +4827,23 @@ static void napi_schedule_rps(struct sof
- 	__napi_schedule_irqoff(&mysd->backlog);
- }
- 
-+void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu)
-+{
-+	unsigned long flags;
-+
-+	if (use_backlog_threads()) {
-+		rps_lock_irqsave(sd, &flags);
-+
-+		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
-+			__napi_schedule_irqoff(&sd->backlog);
-+
-+		rps_unlock_irq_restore(sd, &flags);
-+
-+	} else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
-+		smp_call_function_single_async(cpu, &sd->defer_csd);
-+	}
-+}
-+
- #ifdef CONFIG_NET_FLOW_LIMIT
- int netdev_flow_limit_table_len __read_mostly = (1 << 12);
- #endif
---- a/net/core/skbuff.c
-+++ b/net/core/skbuff.c
-@@ -6867,8 +6867,8 @@ nodefer:	__kfree_skb(skb);
- 	/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
- 	 * if we are unlucky enough (this seems very unlikely).
- 	 */
--	if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
--		smp_call_function_single_async(cpu, &sd->defer_csd);
-+	if (unlikely(kick))
-+		kick_defer_list_purge(sd, cpu);
- }
- 
- static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,

+ 0 - 164
target/linux/generic/backport-6.6/603-v6.10-net-Rename-rps_lock-to-backlog_lock.patch

@@ -1,164 +0,0 @@
-From 765b11f8f4e20b7433e4ba4a3e9106a0d59501ed Mon Sep 17 00:00:00 2001
-From: Sebastian Andrzej Siewior <[email protected]>
-Date: Mon, 25 Mar 2024 08:40:31 +0100
-Subject: [PATCH 4/4] net: Rename rps_lock to backlog_lock.
-
-The rps_lock.*() functions use the inner lock of a sk_buff_head for
-locking. This lock is used if RPS is enabled, otherwise the list is
-accessed lockless and disabling interrupts is enough for the
-synchronisation because it is only accessed CPU local. Not only the list
-is protected but also the NAPI state protected.
-With the addition of backlog threads, the lock is also needed because of
-the cross CPU access even without RPS. The clean up of the defer_list
-list is also done via backlog threads (if enabled).
-
-It has been suggested to rename the locking function since it is no
-longer just RPS.
-
-Rename the rps_lock*() functions to backlog_lock*().
-
-Suggested-by: Jakub Kicinski <[email protected]>
-Acked-by: Jakub Kicinski <[email protected]>
-Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
-Signed-off-by: Paolo Abeni <[email protected]>
----
- net/core/dev.c | 34 +++++++++++++++++-----------------
- 1 file changed, 17 insertions(+), 17 deletions(-)
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -243,8 +243,8 @@ static bool use_backlog_threads(void)
- 
- #endif
- 
--static inline void rps_lock_irqsave(struct softnet_data *sd,
--				    unsigned long *flags)
-+static inline void backlog_lock_irq_save(struct softnet_data *sd,
-+					 unsigned long *flags)
- {
- 	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- 		spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
-@@ -252,7 +252,7 @@ static inline void rps_lock_irqsave(stru
- 		local_irq_save(*flags);
- }
- 
--static inline void rps_lock_irq_disable(struct softnet_data *sd)
-+static inline void backlog_lock_irq_disable(struct softnet_data *sd)
- {
- 	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- 		spin_lock_irq(&sd->input_pkt_queue.lock);
-@@ -260,8 +260,8 @@ static inline void rps_lock_irq_disable(
- 		local_irq_disable();
- }
- 
--static inline void rps_unlock_irq_restore(struct softnet_data *sd,
--					  unsigned long *flags)
-+static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
-+					      unsigned long *flags)
- {
- 	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- 		spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
-@@ -269,7 +269,7 @@ static inline void rps_unlock_irq_restor
- 		local_irq_restore(*flags);
- }
- 
--static inline void rps_unlock_irq_enable(struct softnet_data *sd)
-+static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
- {
- 	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
- 		spin_unlock_irq(&sd->input_pkt_queue.lock);
-@@ -4832,12 +4832,12 @@ void kick_defer_list_purge(struct softne
- 	unsigned long flags;
- 
- 	if (use_backlog_threads()) {
--		rps_lock_irqsave(sd, &flags);
-+		backlog_lock_irq_save(sd, &flags);
- 
- 		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
- 			__napi_schedule_irqoff(&sd->backlog);
- 
--		rps_unlock_irq_restore(sd, &flags);
-+		backlog_unlock_irq_restore(sd, &flags);
- 
- 	} else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
- 		smp_call_function_single_async(cpu, &sd->defer_csd);
-@@ -4899,7 +4899,7 @@ static int enqueue_to_backlog(struct sk_
- 	reason = SKB_DROP_REASON_NOT_SPECIFIED;
- 	sd = &per_cpu(softnet_data, cpu);
- 
--	rps_lock_irqsave(sd, &flags);
-+	backlog_lock_irq_save(sd, &flags);
- 	if (!netif_running(skb->dev))
- 		goto drop;
- 	qlen = skb_queue_len(&sd->input_pkt_queue);
-@@ -4908,7 +4908,7 @@ static int enqueue_to_backlog(struct sk_
- enqueue:
- 			__skb_queue_tail(&sd->input_pkt_queue, skb);
- 			input_queue_tail_incr_save(sd, qtail);
--			rps_unlock_irq_restore(sd, &flags);
-+			backlog_unlock_irq_restore(sd, &flags);
- 			return NET_RX_SUCCESS;
- 		}
- 
-@@ -4923,7 +4923,7 @@ enqueue:
- 
- drop:
- 	sd->dropped++;
--	rps_unlock_irq_restore(sd, &flags);
-+	backlog_unlock_irq_restore(sd, &flags);
- 
- 	dev_core_stats_rx_dropped_inc(skb->dev);
- 	kfree_skb_reason(skb, reason);
-@@ -5954,7 +5954,7 @@ static void flush_backlog(struct work_st
- 	local_bh_disable();
- 	sd = this_cpu_ptr(&softnet_data);
- 
--	rps_lock_irq_disable(sd);
-+	backlog_lock_irq_disable(sd);
- 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
- 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
- 			__skb_unlink(skb, &sd->input_pkt_queue);
-@@ -5962,7 +5962,7 @@ static void flush_backlog(struct work_st
- 			input_queue_head_incr(sd);
- 		}
- 	}
--	rps_unlock_irq_enable(sd);
-+	backlog_unlock_irq_enable(sd);
- 
- 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
- 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
-@@ -5980,14 +5980,14 @@ static bool flush_required(int cpu)
- 	struct softnet_data *sd = &per_cpu(softnet_data, cpu);
- 	bool do_flush;
- 
--	rps_lock_irq_disable(sd);
-+	backlog_lock_irq_disable(sd);
- 
- 	/* as insertion into process_queue happens with the rps lock held,
- 	 * process_queue access may race only with dequeue
- 	 */
- 	do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
- 		   !skb_queue_empty_lockless(&sd->process_queue);
--	rps_unlock_irq_enable(sd);
-+	backlog_unlock_irq_enable(sd);
- 
- 	return do_flush;
- #endif
-@@ -6102,7 +6102,7 @@ static int process_backlog(struct napi_s
- 
- 		}
- 
--		rps_lock_irq_disable(sd);
-+		backlog_lock_irq_disable(sd);
- 		if (skb_queue_empty(&sd->input_pkt_queue)) {
- 			/*
- 			 * Inline a custom version of __napi_complete().
-@@ -6118,7 +6118,7 @@ static int process_backlog(struct napi_s
- 			skb_queue_splice_tail_init(&sd->input_pkt_queue,
- 						   &sd->process_queue);
- 		}
--		rps_unlock_irq_enable(sd);
-+		backlog_unlock_irq_enable(sd);
- 	}
- 
- 	return work;

+ 0 - 41
target/linux/generic/backport-6.6/604-01-v6.13-net-phylink-move-manual-flow-control-setting.patch

@@ -1,41 +0,0 @@
-From 8cc5f4cb94c0b1c7c1ba8013c14fd02ffb1a25f3 Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Fri, 8 Nov 2024 16:01:44 +0000
-Subject: [PATCH 1/5] net: phylink: move manual flow control setting
-
-Move the handling of manual flow control configuration to a common
-location during resolve. We currently evaluate this for all but
-fixed links.
-
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/phy/phylink.c | 5 +++--
- 1 file changed, 3 insertions(+), 2 deletions(-)
-
---- a/drivers/net/phy/phylink.c
-+++ b/drivers/net/phy/phylink.c
-@@ -1433,7 +1433,6 @@ static void phylink_resolve(struct work_
- 		switch (pl->cur_link_an_mode) {
- 		case MLO_AN_PHY:
- 			link_state = pl->phy_state;
--			phylink_apply_manual_flow(pl, &link_state);
- 			mac_config = link_state.link;
- 			break;
- 
-@@ -1494,11 +1493,13 @@ static void phylink_resolve(struct work_
- 				link_state.pause = pl->phy_state.pause;
- 				mac_config = true;
- 			}
--			phylink_apply_manual_flow(pl, &link_state);
- 			break;
- 		}
- 	}
- 
-+	if (pl->cur_link_an_mode != MLO_AN_FIXED)
-+		phylink_apply_manual_flow(pl, &link_state);
-+
- 	if (mac_config) {
- 		if (link_state.interface != pl->link_config.interface) {
- 			/* The interface has changed, force the link down and

+ 0 - 42
target/linux/generic/backport-6.6/604-02-v6.13-net-phylink-move-MLO_AN_FIXED-resolve-handling-to-if.patch

@@ -1,42 +0,0 @@
-From 92abfcb4ced482afbe65d18980e6734fe1e62a34 Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Fri, 8 Nov 2024 16:01:50 +0000
-Subject: [PATCH 2/5] net: phylink: move MLO_AN_FIXED resolve handling to if()
- statement
-
-The switch() statement doesn't sit very well with the preceeding if()
-statements, and results in excessive indentation that spoils code
-readability. Begin cleaning this up by converting the MLO_AN_FIXED case
-to an if() statement.
-
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/phy/phylink.c | 8 +++-----
- 1 file changed, 3 insertions(+), 5 deletions(-)
-
---- a/drivers/net/phy/phylink.c
-+++ b/drivers/net/phy/phylink.c
-@@ -1429,6 +1429,9 @@ static void phylink_resolve(struct work_
- 	} else if (pl->mac_link_dropped) {
- 		link_state.link = false;
- 		retrigger = true;
-+	} else if (pl->cur_link_an_mode == MLO_AN_FIXED) {
-+		phylink_get_fixed_state(pl, &link_state);
-+		mac_config = link_state.link;
- 	} else {
- 		switch (pl->cur_link_an_mode) {
- 		case MLO_AN_PHY:
-@@ -1436,11 +1439,6 @@ static void phylink_resolve(struct work_
- 			mac_config = link_state.link;
- 			break;
- 
--		case MLO_AN_FIXED:
--			phylink_get_fixed_state(pl, &link_state);
--			mac_config = link_state.link;
--			break;
--
- 		case MLO_AN_INBAND:
- 			phylink_mac_pcs_get_state(pl, &link_state);
- 

+ 0 - 37
target/linux/generic/backport-6.6/604-03-v6.13-net-phylink-move-MLO_AN_PHY-resolve-handling-to-if-s.patch

@@ -1,37 +0,0 @@
-From f0f46c2a3d8ea9d1427298c8103a777d9e616c29 Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Fri, 8 Nov 2024 16:01:55 +0000
-Subject: [PATCH 3/5] net: phylink: move MLO_AN_PHY resolve handling to if()
- statement
-
-The switch() statement doesn't sit very well with the preceeding if()
-statements, and results in excessive indentation that spoils code
-readability. Continue cleaning this up by converting the MLO_AN_PHY
-case to use an if() statmeent.
-
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/phy/phylink.c | 8 +++-----
- 1 file changed, 3 insertions(+), 5 deletions(-)
-
---- a/drivers/net/phy/phylink.c
-+++ b/drivers/net/phy/phylink.c
-@@ -1432,13 +1432,11 @@ static void phylink_resolve(struct work_
- 	} else if (pl->cur_link_an_mode == MLO_AN_FIXED) {
- 		phylink_get_fixed_state(pl, &link_state);
- 		mac_config = link_state.link;
-+	} else if (pl->cur_link_an_mode == MLO_AN_PHY) {
-+		link_state = pl->phy_state;
-+		mac_config = link_state.link;
- 	} else {
- 		switch (pl->cur_link_an_mode) {
--		case MLO_AN_PHY:
--			link_state = pl->phy_state;
--			mac_config = link_state.link;
--			break;
--
- 		case MLO_AN_INBAND:
- 			phylink_mac_pcs_get_state(pl, &link_state);
- 

+ 0 - 127
target/linux/generic/backport-6.6/604-04-v6.13-net-phylink-remove-switch-statement-in-resolve-handl.patch

@@ -1,127 +0,0 @@
-From d1a16dbbd84e02d2a6dcfcb8d5c4b8b2c0289f00 Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Fri, 8 Nov 2024 16:02:00 +0000
-Subject: [PATCH 4/5] net: phylink: remove switch() statement in resolve
- handling
-
-The switch() statement doesn't sit very well with the preceeding if()
-statements, so let's just convert everything to if()s. As a result of
-the two preceding commits, there is now only one case in the switch()
-statement. Remove the switch statement and reduce the code indentation.
-Code reformatting will be in the following commit.
-
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/phy/phylink.c | 94 +++++++++++++++++++--------------------
- 1 file changed, 45 insertions(+), 49 deletions(-)
-
---- a/drivers/net/phy/phylink.c
-+++ b/drivers/net/phy/phylink.c
-@@ -1436,60 +1436,56 @@ static void phylink_resolve(struct work_
- 		link_state = pl->phy_state;
- 		mac_config = link_state.link;
- 	} else {
--		switch (pl->cur_link_an_mode) {
--		case MLO_AN_INBAND:
--			phylink_mac_pcs_get_state(pl, &link_state);
--
--			/* The PCS may have a latching link-fail indicator.
--			 * If the link was up, bring the link down and
--			 * re-trigger the resolve. Otherwise, re-read the
--			 * PCS state to get the current status of the link.
-+		phylink_mac_pcs_get_state(pl, &link_state);
-+
-+		/* The PCS may have a latching link-fail indicator.
-+		 * If the link was up, bring the link down and
-+		 * re-trigger the resolve. Otherwise, re-read the
-+		 * PCS state to get the current status of the link.
-+		 */
-+		if (!link_state.link) {
-+			if (cur_link_state)
-+				retrigger = true;
-+			else
-+				phylink_mac_pcs_get_state(pl,
-+							  &link_state);
-+		}
-+
-+		/* If we have a phy, the "up" state is the union of
-+		 * both the PHY and the MAC
-+		 */
-+		if (pl->phydev)
-+			link_state.link &= pl->phy_state.link;
-+
-+		/* Only update if the PHY link is up */
-+		if (pl->phydev && pl->phy_state.link) {
-+			/* If the interface has changed, force a
-+			 * link down event if the link isn't already
-+			 * down, and re-resolve.
- 			 */
--			if (!link_state.link) {
--				if (cur_link_state)
--					retrigger = true;
--				else
--					phylink_mac_pcs_get_state(pl,
--								  &link_state);
-+			if (link_state.interface !=
-+			    pl->phy_state.interface) {
-+				retrigger = true;
-+				link_state.link = false;
- 			}
-+			link_state.interface = pl->phy_state.interface;
- 
--			/* If we have a phy, the "up" state is the union of
--			 * both the PHY and the MAC
-+			/* If we are doing rate matching, then the
-+			 * link speed/duplex comes from the PHY
- 			 */
--			if (pl->phydev)
--				link_state.link &= pl->phy_state.link;
--
--			/* Only update if the PHY link is up */
--			if (pl->phydev && pl->phy_state.link) {
--				/* If the interface has changed, force a
--				 * link down event if the link isn't already
--				 * down, and re-resolve.
--				 */
--				if (link_state.interface !=
--				    pl->phy_state.interface) {
--					retrigger = true;
--					link_state.link = false;
--				}
--				link_state.interface = pl->phy_state.interface;
--
--				/* If we are doing rate matching, then the
--				 * link speed/duplex comes from the PHY
--				 */
--				if (pl->phy_state.rate_matching) {
--					link_state.rate_matching =
--						pl->phy_state.rate_matching;
--					link_state.speed = pl->phy_state.speed;
--					link_state.duplex =
--						pl->phy_state.duplex;
--				}
--
--				/* If we have a PHY, we need to update with
--				 * the PHY flow control bits.
--				 */
--				link_state.pause = pl->phy_state.pause;
--				mac_config = true;
-+			if (pl->phy_state.rate_matching) {
-+				link_state.rate_matching =
-+					pl->phy_state.rate_matching;
-+				link_state.speed = pl->phy_state.speed;
-+				link_state.duplex =
-+					pl->phy_state.duplex;
- 			}
--			break;
-+
-+			/* If we have a PHY, we need to update with
-+			 * the PHY flow control bits.
-+			 */
-+			link_state.pause = pl->phy_state.pause;
-+			mac_config = true;
- 		}
- 	}
- 

+ 0 - 85
target/linux/generic/backport-6.6/604-05-v6.13-net-phylink-clean-up-phylink_resolve.patch

@@ -1,85 +0,0 @@
-From bc08ce37d99a3992e975a0f397503cb23404f25a Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Fri, 8 Nov 2024 16:02:05 +0000
-Subject: [PATCH 5/5] net: phylink: clean up phylink_resolve()
-
-Now that we have reduced the indentation level, clean up the code
-formatting.
-
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/phy/phylink.c | 35 ++++++++++++++++-------------------
- 1 file changed, 16 insertions(+), 19 deletions(-)
-
---- a/drivers/net/phy/phylink.c
-+++ b/drivers/net/phy/phylink.c
-@@ -1438,51 +1438,48 @@ static void phylink_resolve(struct work_
- 	} else {
- 		phylink_mac_pcs_get_state(pl, &link_state);
- 
--		/* The PCS may have a latching link-fail indicator.
--		 * If the link was up, bring the link down and
--		 * re-trigger the resolve. Otherwise, re-read the
--		 * PCS state to get the current status of the link.
-+		/* The PCS may have a latching link-fail indicator. If the link
-+		 * was up, bring the link down and re-trigger the resolve.
-+		 * Otherwise, re-read the PCS state to get the current status
-+		 * of the link.
- 		 */
- 		if (!link_state.link) {
- 			if (cur_link_state)
- 				retrigger = true;
- 			else
--				phylink_mac_pcs_get_state(pl,
--							  &link_state);
-+				phylink_mac_pcs_get_state(pl, &link_state);
- 		}
- 
--		/* If we have a phy, the "up" state is the union of
--		 * both the PHY and the MAC
-+		/* If we have a phy, the "up" state is the union of both the
-+		 * PHY and the MAC
- 		 */
- 		if (pl->phydev)
- 			link_state.link &= pl->phy_state.link;
- 
- 		/* Only update if the PHY link is up */
- 		if (pl->phydev && pl->phy_state.link) {
--			/* If the interface has changed, force a
--			 * link down event if the link isn't already
--			 * down, and re-resolve.
-+			/* If the interface has changed, force a link down
-+			 * event if the link isn't already down, and re-resolve.
- 			 */
--			if (link_state.interface !=
--			    pl->phy_state.interface) {
-+			if (link_state.interface != pl->phy_state.interface) {
- 				retrigger = true;
- 				link_state.link = false;
- 			}
-+
- 			link_state.interface = pl->phy_state.interface;
- 
--			/* If we are doing rate matching, then the
--			 * link speed/duplex comes from the PHY
-+			/* If we are doing rate matching, then the link
-+			 * speed/duplex comes from the PHY
- 			 */
- 			if (pl->phy_state.rate_matching) {
- 				link_state.rate_matching =
- 					pl->phy_state.rate_matching;
- 				link_state.speed = pl->phy_state.speed;
--				link_state.duplex =
--					pl->phy_state.duplex;
-+				link_state.duplex = pl->phy_state.duplex;
- 			}
- 
--			/* If we have a PHY, we need to update with
--			 * the PHY flow control bits.
-+			/* If we have a PHY, we need to update with the PHY
-+			 * flow control bits.
- 			 */
- 			link_state.pause = pl->phy_state.pause;
- 			mac_config = true;

+ 0 - 166
target/linux/generic/backport-6.6/605-v6.8-net-phylink-move-phylink_pcs_neg_mode-into-phylink.c.patch

@@ -1,166 +0,0 @@
-From 5e5401d6612ef599ad45785b941eebda7effc90f Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Thu, 4 Jan 2024 09:47:36 +0000
-Subject: [PATCH] net: phylink: move phylink_pcs_neg_mode() into phylink.c
-
-Move phylink_pcs_neg_mode() from the header file into the .c file since
-nothing should be using it.
-
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: David S. Miller <[email protected]>
----
- drivers/net/phy/phylink.c | 66 +++++++++++++++++++++++++++++++++++++++
- include/linux/phylink.h   | 66 ---------------------------------------
- 2 files changed, 66 insertions(+), 66 deletions(-)
-
---- a/drivers/net/phy/phylink.c
-+++ b/drivers/net/phy/phylink.c
-@@ -1116,6 +1116,72 @@ static void phylink_pcs_an_restart(struc
- 		pl->pcs->ops->pcs_an_restart(pl->pcs);
- }
- 
-+/**
-+ * phylink_pcs_neg_mode() - helper to determine PCS inband mode
-+ * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
-+ * @interface: interface mode to be used
-+ * @advertising: adertisement ethtool link mode mask
-+ *
-+ * Determines the negotiation mode to be used by the PCS, and returns
-+ * one of:
-+ *
-+ * - %PHYLINK_PCS_NEG_NONE: interface mode does not support inband
-+ * - %PHYLINK_PCS_NEG_OUTBAND: an out of band mode (e.g. reading the PHY)
-+ *   will be used.
-+ * - %PHYLINK_PCS_NEG_INBAND_DISABLED: inband mode selected but autoneg
-+ *   disabled
-+ * - %PHYLINK_PCS_NEG_INBAND_ENABLED: inband mode selected and autoneg enabled
-+ *
-+ * Note: this is for cases where the PCS itself is involved in negotiation
-+ * (e.g. Clause 37, SGMII and similar) not Clause 73.
-+ */
-+static unsigned int phylink_pcs_neg_mode(unsigned int mode,
-+					 phy_interface_t interface,
-+					 const unsigned long *advertising)
-+{
-+	unsigned int neg_mode;
-+
-+	switch (interface) {
-+	case PHY_INTERFACE_MODE_SGMII:
-+	case PHY_INTERFACE_MODE_QSGMII:
-+	case PHY_INTERFACE_MODE_QUSGMII:
-+	case PHY_INTERFACE_MODE_USXGMII:
-+		/* These protocols are designed for use with a PHY which
-+		 * communicates its negotiation result back to the MAC via
-+		 * inband communication. Note: there exist PHYs that run
-+		 * with SGMII but do not send the inband data.
-+		 */
-+		if (!phylink_autoneg_inband(mode))
-+			neg_mode = PHYLINK_PCS_NEG_OUTBAND;
-+		else
-+			neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
-+		break;
-+
-+	case PHY_INTERFACE_MODE_1000BASEX:
-+	case PHY_INTERFACE_MODE_2500BASEX:
-+		/* 1000base-X is designed for use media-side for Fibre
-+		 * connections, and thus the Autoneg bit needs to be
-+		 * taken into account. We also do this for 2500base-X
-+		 * as well, but drivers may not support this, so may
-+		 * need to override this.
-+		 */
-+		if (!phylink_autoneg_inband(mode))
-+			neg_mode = PHYLINK_PCS_NEG_OUTBAND;
-+		else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
-+					   advertising))
-+			neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
-+		else
-+			neg_mode = PHYLINK_PCS_NEG_INBAND_DISABLED;
-+		break;
-+
-+	default:
-+		neg_mode = PHYLINK_PCS_NEG_NONE;
-+		break;
-+	}
-+
-+	return neg_mode;
-+}
-+
- static void phylink_major_config(struct phylink *pl, bool restart,
- 				  const struct phylink_link_state *state)
- {
---- a/include/linux/phylink.h
-+++ b/include/linux/phylink.h
-@@ -99,72 +99,6 @@ static inline bool phylink_autoneg_inban
- }
- 
- /**
-- * phylink_pcs_neg_mode() - helper to determine PCS inband mode
-- * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
-- * @interface: interface mode to be used
-- * @advertising: adertisement ethtool link mode mask
-- *
-- * Determines the negotiation mode to be used by the PCS, and returns
-- * one of:
-- *
-- * - %PHYLINK_PCS_NEG_NONE: interface mode does not support inband
-- * - %PHYLINK_PCS_NEG_OUTBAND: an out of band mode (e.g. reading the PHY)
-- *   will be used.
-- * - %PHYLINK_PCS_NEG_INBAND_DISABLED: inband mode selected but autoneg
-- *   disabled
-- * - %PHYLINK_PCS_NEG_INBAND_ENABLED: inband mode selected and autoneg enabled
-- *
-- * Note: this is for cases where the PCS itself is involved in negotiation
-- * (e.g. Clause 37, SGMII and similar) not Clause 73.
-- */
--static inline unsigned int phylink_pcs_neg_mode(unsigned int mode,
--						phy_interface_t interface,
--						const unsigned long *advertising)
--{
--	unsigned int neg_mode;
--
--	switch (interface) {
--	case PHY_INTERFACE_MODE_SGMII:
--	case PHY_INTERFACE_MODE_QSGMII:
--	case PHY_INTERFACE_MODE_QUSGMII:
--	case PHY_INTERFACE_MODE_USXGMII:
--		/* These protocols are designed for use with a PHY which
--		 * communicates its negotiation result back to the MAC via
--		 * inband communication. Note: there exist PHYs that run
--		 * with SGMII but do not send the inband data.
--		 */
--		if (!phylink_autoneg_inband(mode))
--			neg_mode = PHYLINK_PCS_NEG_OUTBAND;
--		else
--			neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
--		break;
--
--	case PHY_INTERFACE_MODE_1000BASEX:
--	case PHY_INTERFACE_MODE_2500BASEX:
--		/* 1000base-X is designed for use media-side for Fibre
--		 * connections, and thus the Autoneg bit needs to be
--		 * taken into account. We also do this for 2500base-X
--		 * as well, but drivers may not support this, so may
--		 * need to override this.
--		 */
--		if (!phylink_autoneg_inband(mode))
--			neg_mode = PHYLINK_PCS_NEG_OUTBAND;
--		else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
--					   advertising))
--			neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
--		else
--			neg_mode = PHYLINK_PCS_NEG_INBAND_DISABLED;
--		break;
--
--	default:
--		neg_mode = PHYLINK_PCS_NEG_NONE;
--		break;
--	}
--
--	return neg_mode;
--}
--
--/**
-  * struct phylink_link_state - link state structure
-  * @advertising: ethtool bitmask containing advertised link modes
-  * @lp_advertising: ethtool bitmask containing link partner advertised link

+ 0 - 95
target/linux/generic/backport-6.6/606-01-v6.14-net-phylink-pass-phylink-and-pcs-into-phylink_pcs_ne.patch

@@ -1,95 +0,0 @@
-From 17ed1911f9c8d4f9af8e13b2c95103ee06dadc0f Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Tue, 3 Dec 2024 15:30:47 +0000
-Subject: [PATCH 01/13] net: phylink: pass phylink and pcs into
- phylink_pcs_neg_mode()
-
-Move the call to phylink_pcs_neg_mode() in phylink_major_config() after
-we have selected the appropriate PCS to allow the PCS to be passed in.
-
-Add struct phylink and struct phylink_pcs pointers to
-phylink_pcs_neg_mode() and pass in the appropriate structures. Set
-pl->pcs_neg_mode before returning, and remove the return value.
-
-This will allow the capabilities of the PCS and any PHY to be used when
-deciding which pcs_neg_mode should be used.
-
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/phy/phylink.c | 26 +++++++++++++-------------
- 1 file changed, 13 insertions(+), 13 deletions(-)
-
---- a/drivers/net/phy/phylink.c
-+++ b/drivers/net/phy/phylink.c
-@@ -1118,7 +1118,8 @@ static void phylink_pcs_an_restart(struc
- 
- /**
-  * phylink_pcs_neg_mode() - helper to determine PCS inband mode
-- * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
-+ * @pl: a pointer to a &struct phylink returned from phylink_create()
-+ * @pcs: a pointer to &struct phylink_pcs
-  * @interface: interface mode to be used
-  * @advertising: adertisement ethtool link mode mask
-  *
-@@ -1135,11 +1136,13 @@ static void phylink_pcs_an_restart(struc
-  * Note: this is for cases where the PCS itself is involved in negotiation
-  * (e.g. Clause 37, SGMII and similar) not Clause 73.
-  */
--static unsigned int phylink_pcs_neg_mode(unsigned int mode,
--					 phy_interface_t interface,
--					 const unsigned long *advertising)
-+static void phylink_pcs_neg_mode(struct phylink *pl, struct phylink_pcs *pcs,
-+				 phy_interface_t interface,
-+				 const unsigned long *advertising)
- {
--	unsigned int neg_mode;
-+	unsigned int neg_mode, mode;
-+
-+	mode = pl->cur_link_an_mode;
- 
- 	switch (interface) {
- 	case PHY_INTERFACE_MODE_SGMII:
-@@ -1179,7 +1182,7 @@ static unsigned int phylink_pcs_neg_mode
- 		break;
- 	}
- 
--	return neg_mode;
-+	pl->pcs_neg_mode = neg_mode;
- }
- 
- static void phylink_major_config(struct phylink *pl, bool restart,
-@@ -1193,10 +1196,6 @@ static void phylink_major_config(struct
- 
- 	phylink_dbg(pl, "major config %s\n", phy_modes(state->interface));
- 
--	pl->pcs_neg_mode = phylink_pcs_neg_mode(pl->cur_link_an_mode,
--						state->interface,
--						state->advertising);
--
- 	if (pl->using_mac_select_pcs) {
- 		pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface);
- 		if (IS_ERR(pcs)) {
-@@ -1209,6 +1208,8 @@ static void phylink_major_config(struct
- 		pcs_changed = pcs && pl->pcs != pcs;
- 	}
- 
-+	phylink_pcs_neg_mode(pl, pcs, state->interface, state->advertising);
-+
- 	phylink_pcs_poll_stop(pl);
- 
- 	if (pl->mac_ops->mac_prepare) {
-@@ -1299,9 +1300,8 @@ static int phylink_change_inband_advert(
- 		    pl->link_config.pause);
- 
- 	/* Recompute the PCS neg mode */
--	pl->pcs_neg_mode = phylink_pcs_neg_mode(pl->cur_link_an_mode,
--					pl->link_config.interface,
--					pl->link_config.advertising);
-+	phylink_pcs_neg_mode(pl, pl->pcs, pl->link_config.interface,
-+			     pl->link_config.advertising);
- 
- 	neg_mode = pl->cur_link_an_mode;
- 	if (pl->pcs->neg_mode)

+ 0 - 281
target/linux/generic/backport-6.6/606-02-v6.14-net-phylink-split-cur_link_an_mode-into-requested-an.patch

@@ -1,281 +0,0 @@
-From 1f92ead7e15003f632b5f138e8138095e0997d3d Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Tue, 3 Dec 2024 15:30:52 +0000
-Subject: [PATCH 02/13] net: phylink: split cur_link_an_mode into requested and
- active
-
-There is an interdependence between the current link_an_mode and
-pcs_neg_mode that some drivers rely upon to know whether inband or PHY
-mode will be used.
-
-In order to support detection of PCS and PHY inband capabilities
-resulting in automatic selection of inband or PHY mode, we need to
-cater for this, and support changing the MAC link_an_mode. However, we
-end up with an inter-dependency between the current link_an_mode and
-pcs_neg_mode.
-
-To solve this, split the current link_an_mode into the requested
-link_an_mode and active link_an_mode. The requested link_an_mode will
-always be passed to phylink_pcs_neg_mode(), and the active link_an_mode
-will be used for everything else, and only updated during
-phylink_major_config(). This will ensure that phylink_pcs_neg_mode()'s
-link_an_mode will not depend on the active link_an_mode that will,
-in a future patch, depend on pcs_neg_mode.
-
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/phy/phylink.c | 60 ++++++++++++++++++++-------------------
- 1 file changed, 31 insertions(+), 29 deletions(-)
-
---- a/drivers/net/phy/phylink.c
-+++ b/drivers/net/phy/phylink.c
-@@ -56,7 +56,8 @@ struct phylink {
- 	struct phy_device *phydev;
- 	phy_interface_t link_interface;	/* PHY_INTERFACE_xxx */
- 	u8 cfg_link_an_mode;		/* MLO_AN_xxx */
--	u8 cur_link_an_mode;
-+	u8 req_link_an_mode;		/* Requested MLO_AN_xxx mode */
-+	u8 act_link_an_mode;		/* Active MLO_AN_xxx mode */
- 	u8 link_port;			/* The current non-phy ethtool port */
- 	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
- 
-@@ -1098,13 +1099,13 @@ static void phylink_mac_config(struct ph
- 
- 	phylink_dbg(pl,
- 		    "%s: mode=%s/%s/%s adv=%*pb pause=%02x\n",
--		    __func__, phylink_an_mode_str(pl->cur_link_an_mode),
-+		    __func__, phylink_an_mode_str(pl->act_link_an_mode),
- 		    phy_modes(st.interface),
- 		    phy_rate_matching_to_str(st.rate_matching),
- 		    __ETHTOOL_LINK_MODE_MASK_NBITS, st.advertising,
- 		    st.pause);
- 
--	pl->mac_ops->mac_config(pl->config, pl->cur_link_an_mode, &st);
-+	pl->mac_ops->mac_config(pl->config, pl->act_link_an_mode, &st);
- }
- 
- static void phylink_pcs_an_restart(struct phylink *pl)
-@@ -1112,7 +1113,7 @@ static void phylink_pcs_an_restart(struc
- 	if (pl->pcs && linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- 					 pl->link_config.advertising) &&
- 	    phy_interface_mode_is_8023z(pl->link_config.interface) &&
--	    phylink_autoneg_inband(pl->cur_link_an_mode))
-+	    phylink_autoneg_inband(pl->act_link_an_mode))
- 		pl->pcs->ops->pcs_an_restart(pl->pcs);
- }
- 
-@@ -1142,7 +1143,7 @@ static void phylink_pcs_neg_mode(struct
- {
- 	unsigned int neg_mode, mode;
- 
--	mode = pl->cur_link_an_mode;
-+	mode = pl->req_link_an_mode;
- 
- 	switch (interface) {
- 	case PHY_INTERFACE_MODE_SGMII:
-@@ -1183,6 +1184,7 @@ static void phylink_pcs_neg_mode(struct
- 	}
- 
- 	pl->pcs_neg_mode = neg_mode;
-+	pl->act_link_an_mode = mode;
- }
- 
- static void phylink_major_config(struct phylink *pl, bool restart,
-@@ -1213,7 +1215,7 @@ static void phylink_major_config(struct
- 	phylink_pcs_poll_stop(pl);
- 
- 	if (pl->mac_ops->mac_prepare) {
--		err = pl->mac_ops->mac_prepare(pl->config, pl->cur_link_an_mode,
-+		err = pl->mac_ops->mac_prepare(pl->config, pl->act_link_an_mode,
- 					       state->interface);
- 		if (err < 0) {
- 			phylink_err(pl, "mac_prepare failed: %pe\n",
-@@ -1247,7 +1249,7 @@ static void phylink_major_config(struct
- 	if (pl->pcs_state == PCS_STATE_STARTING || pcs_changed)
- 		phylink_pcs_enable(pl->pcs);
- 
--	neg_mode = pl->cur_link_an_mode;
-+	neg_mode = pl->act_link_an_mode;
- 	if (pl->pcs && pl->pcs->neg_mode)
- 		neg_mode = pl->pcs_neg_mode;
- 
-@@ -1263,7 +1265,7 @@ static void phylink_major_config(struct
- 		phylink_pcs_an_restart(pl);
- 
- 	if (pl->mac_ops->mac_finish) {
--		err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode,
-+		err = pl->mac_ops->mac_finish(pl->config, pl->act_link_an_mode,
- 					      state->interface);
- 		if (err < 0)
- 			phylink_err(pl, "mac_finish failed: %pe\n",
-@@ -1294,7 +1296,7 @@ static int phylink_change_inband_advert(
- 		return 0;
- 
- 	phylink_dbg(pl, "%s: mode=%s/%s adv=%*pb pause=%02x\n", __func__,
--		    phylink_an_mode_str(pl->cur_link_an_mode),
-+		    phylink_an_mode_str(pl->req_link_an_mode),
- 		    phy_modes(pl->link_config.interface),
- 		    __ETHTOOL_LINK_MODE_MASK_NBITS, pl->link_config.advertising,
- 		    pl->link_config.pause);
-@@ -1303,7 +1305,7 @@ static int phylink_change_inband_advert(
- 	phylink_pcs_neg_mode(pl, pl->pcs, pl->link_config.interface,
- 			     pl->link_config.advertising);
- 
--	neg_mode = pl->cur_link_an_mode;
-+	neg_mode = pl->act_link_an_mode;
- 	if (pl->pcs->neg_mode)
- 		neg_mode = pl->pcs_neg_mode;
- 
-@@ -1368,7 +1370,7 @@ static void phylink_mac_initial_config(s
- {
- 	struct phylink_link_state link_state;
- 
--	switch (pl->cur_link_an_mode) {
-+	switch (pl->req_link_an_mode) {
- 	case MLO_AN_PHY:
- 		link_state = pl->phy_state;
- 		break;
-@@ -1442,14 +1444,14 @@ static void phylink_link_up(struct phyli
- 
- 	pl->cur_interface = link_state.interface;
- 
--	neg_mode = pl->cur_link_an_mode;
-+	neg_mode = pl->act_link_an_mode;
- 	if (pl->pcs && pl->pcs->neg_mode)
- 		neg_mode = pl->pcs_neg_mode;
- 
- 	phylink_pcs_link_up(pl->pcs, neg_mode, pl->cur_interface, speed,
- 			    duplex);
- 
--	pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->cur_link_an_mode,
-+	pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->act_link_an_mode,
- 				 pl->cur_interface, speed, duplex,
- 				 !!(link_state.pause & MLO_PAUSE_TX), rx_pause);
- 
-@@ -1469,7 +1471,7 @@ static void phylink_link_down(struct phy
- 
- 	if (ndev)
- 		netif_carrier_off(ndev);
--	pl->mac_ops->mac_link_down(pl->config, pl->cur_link_an_mode,
-+	pl->mac_ops->mac_link_down(pl->config, pl->act_link_an_mode,
- 				   pl->cur_interface);
- 	phylink_info(pl, "Link is Down\n");
- }
-@@ -1495,10 +1497,10 @@ static void phylink_resolve(struct work_
- 	} else if (pl->mac_link_dropped) {
- 		link_state.link = false;
- 		retrigger = true;
--	} else if (pl->cur_link_an_mode == MLO_AN_FIXED) {
-+	} else if (pl->act_link_an_mode == MLO_AN_FIXED) {
- 		phylink_get_fixed_state(pl, &link_state);
- 		mac_config = link_state.link;
--	} else if (pl->cur_link_an_mode == MLO_AN_PHY) {
-+	} else if (pl->act_link_an_mode == MLO_AN_PHY) {
- 		link_state = pl->phy_state;
- 		mac_config = link_state.link;
- 	} else {
-@@ -1552,7 +1554,7 @@ static void phylink_resolve(struct work_
- 		}
- 	}
- 
--	if (pl->cur_link_an_mode != MLO_AN_FIXED)
-+	if (pl->act_link_an_mode != MLO_AN_FIXED)
- 		phylink_apply_manual_flow(pl, &link_state);
- 
- 	if (mac_config) {
-@@ -1729,7 +1731,7 @@ struct phylink *phylink_create(struct ph
- 		}
- 	}
- 
--	pl->cur_link_an_mode = pl->cfg_link_an_mode;
-+	pl->req_link_an_mode = pl->cfg_link_an_mode;
- 
- 	ret = phylink_register_sfp(pl, fwnode);
- 	if (ret < 0) {
-@@ -2126,7 +2128,7 @@ void phylink_start(struct phylink *pl)
- 	ASSERT_RTNL();
- 
- 	phylink_info(pl, "configuring for %s/%s link mode\n",
--		     phylink_an_mode_str(pl->cur_link_an_mode),
-+		     phylink_an_mode_str(pl->req_link_an_mode),
- 		     phy_modes(pl->link_config.interface));
- 
- 	/* Always set the carrier off */
-@@ -2385,7 +2387,7 @@ int phylink_ethtool_ksettings_get(struct
- 
- 	linkmode_copy(kset->link_modes.supported, pl->supported);
- 
--	switch (pl->cur_link_an_mode) {
-+	switch (pl->act_link_an_mode) {
- 	case MLO_AN_FIXED:
- 		/* We are using fixed settings. Report these as the
- 		 * current link settings - and note that these also
-@@ -2477,7 +2479,7 @@ int phylink_ethtool_ksettings_set(struct
- 		/* If we have a fixed link, refuse to change link parameters.
- 		 * If the link parameters match, accept them but do nothing.
- 		 */
--		if (pl->cur_link_an_mode == MLO_AN_FIXED) {
-+		if (pl->req_link_an_mode == MLO_AN_FIXED) {
- 			if (s->speed != pl->link_config.speed ||
- 			    s->duplex != pl->link_config.duplex)
- 				return -EINVAL;
-@@ -2493,7 +2495,7 @@ int phylink_ethtool_ksettings_set(struct
- 		 * is our default case) but do not allow the advertisement to
- 		 * be changed. If the advertisement matches, simply return.
- 		 */
--		if (pl->cur_link_an_mode == MLO_AN_FIXED) {
-+		if (pl->req_link_an_mode == MLO_AN_FIXED) {
- 			if (!linkmode_equal(config.advertising,
- 					    pl->link_config.advertising))
- 				return -EINVAL;
-@@ -2533,7 +2535,7 @@ int phylink_ethtool_ksettings_set(struct
- 		linkmode_copy(support, pl->supported);
- 		if (phylink_validate(pl, support, &config)) {
- 			phylink_err(pl, "validation of %s/%s with support %*pb failed\n",
--				    phylink_an_mode_str(pl->cur_link_an_mode),
-+				    phylink_an_mode_str(pl->req_link_an_mode),
- 				    phy_modes(config.interface),
- 				    __ETHTOOL_LINK_MODE_MASK_NBITS, support);
- 			return -EINVAL;
-@@ -2633,7 +2635,7 @@ int phylink_ethtool_set_pauseparam(struc
- 
- 	ASSERT_RTNL();
- 
--	if (pl->cur_link_an_mode == MLO_AN_FIXED)
-+	if (pl->req_link_an_mode == MLO_AN_FIXED)
- 		return -EOPNOTSUPP;
- 
- 	if (!phylink_test(pl->supported, Pause) &&
-@@ -2897,7 +2899,7 @@ static int phylink_mii_read(struct phyli
- 	struct phylink_link_state state;
- 	int val = 0xffff;
- 
--	switch (pl->cur_link_an_mode) {
-+	switch (pl->act_link_an_mode) {
- 	case MLO_AN_FIXED:
- 		if (phy_id == 0) {
- 			phylink_get_fixed_state(pl, &state);
-@@ -2922,7 +2924,7 @@ static int phylink_mii_read(struct phyli
- static int phylink_mii_write(struct phylink *pl, unsigned int phy_id,
- 			     unsigned int reg, unsigned int val)
- {
--	switch (pl->cur_link_an_mode) {
-+	switch (pl->act_link_an_mode) {
- 	case MLO_AN_FIXED:
- 		break;
- 
-@@ -3125,9 +3127,9 @@ static void phylink_sfp_set_config(struc
- 		changed = true;
- 	}
- 
--	if (pl->cur_link_an_mode != mode ||
-+	if (pl->req_link_an_mode != mode ||
- 	    pl->link_config.interface != state->interface) {
--		pl->cur_link_an_mode = mode;
-+		pl->req_link_an_mode = mode;
- 		pl->link_config.interface = state->interface;
- 
- 		changed = true;

+ 0 - 66
target/linux/generic/backport-6.6/606-03-v6.14-net-phylink-add-debug-for-phylink_major_config.patch

@@ -1,66 +0,0 @@
-From 4e7d000286fe8e12f2d88032711ffab3ab658b12 Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Tue, 3 Dec 2024 15:30:57 +0000
-Subject: [PATCH 03/13] net: phylink: add debug for phylink_major_config()
-
-Now that we have a more complexity in phylink_major_config(), augment
-the debugging so we can see what's going on there.
-
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/phy/phylink.c | 27 ++++++++++++++++++++++++++-
- 1 file changed, 26 insertions(+), 1 deletion(-)
-
---- a/drivers/net/phy/phylink.c
-+++ b/drivers/net/phy/phylink.c
-@@ -163,6 +163,24 @@ static const char *phylink_an_mode_str(u
- 	return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown";
- }
- 
-+static const char *phylink_pcs_mode_str(unsigned int mode)
-+{
-+	if (!mode)
-+		return "none";
-+
-+	if (mode & PHYLINK_PCS_NEG_OUTBAND)
-+		return "outband";
-+
-+	if (mode & PHYLINK_PCS_NEG_INBAND) {
-+		if (mode & PHYLINK_PCS_NEG_ENABLED)
-+			return "inband,an-enabled";
-+		else
-+			return "inband,an-disabled";
-+	}
-+
-+	return "unknown";
-+}
-+
- static unsigned int phylink_interface_signal_rate(phy_interface_t interface)
- {
- 	switch (interface) {
-@@ -1196,7 +1214,9 @@ static void phylink_major_config(struct
- 	unsigned int neg_mode;
- 	int err;
- 
--	phylink_dbg(pl, "major config %s\n", phy_modes(state->interface));
-+	phylink_dbg(pl, "major config, requested %s/%s\n",
-+		    phylink_an_mode_str(pl->req_link_an_mode),
-+		    phy_modes(state->interface));
- 
- 	if (pl->using_mac_select_pcs) {
- 		pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface);
-@@ -1212,6 +1232,11 @@ static void phylink_major_config(struct
- 
- 	phylink_pcs_neg_mode(pl, pcs, state->interface, state->advertising);
- 
-+	phylink_dbg(pl, "major config, active %s/%s/%s\n",
-+		    phylink_an_mode_str(pl->act_link_an_mode),
-+		    phylink_pcs_mode_str(pl->pcs_neg_mode),
-+		    phy_modes(state->interface));
-+
- 	phylink_pcs_poll_stop(pl);
- 
- 	if (pl->mac_ops->mac_prepare) {

+ 0 - 118
target/linux/generic/backport-6.6/606-04-v6.14-net-phy-add-phy_inband_caps.patch

@@ -1,118 +0,0 @@
-From b4c7698dd95f253c6958d8c6ac219098009bf28a Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Tue, 3 Dec 2024 15:31:02 +0000
-Subject: [PATCH 04/13] net: phy: add phy_inband_caps()
-
-Add a method to query the PHY's in-band capabilities for a PHY
-interface mode.
-
-Where the interface mode does not have in-band capability, or the PHY
-driver has not been updated to return this information, then
-phy_inband_caps() should return zero. Otherwise, PHY drivers will
-return a value consisting of the following flags:
-
-LINK_INBAND_DISABLE indicates that the hardware does not support
-in-band signalling, or can have in-band signalling configured via
-software to be disabled.
-
-LINK_INBAND_ENABLE indicates that the hardware will use in-band
-signalling, or can have in-band signalling configured via software
-to be enabled.
-
-LINK_INBAND_BYPASS indicates that the hardware has the ability to
-bypass in-band signalling when enabled after a timeout if the link
-partner does not respond to its in-band signalling.
-
-This reports the PHY capabilities for the particular interface mode,
-not the current configuration.
-
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/phy/phy.c | 21 +++++++++++++++++++++
- include/linux/phy.h   | 28 ++++++++++++++++++++++++++++
- 2 files changed, 49 insertions(+)
-
---- a/drivers/net/phy/phy.c
-+++ b/drivers/net/phy/phy.c
-@@ -973,6 +973,27 @@ static int phy_check_link_status(struct
- }
- 
- /**
-+ * phy_inband_caps - query which in-band signalling modes are supported
-+ * @phydev: a pointer to a &struct phy_device
-+ * @interface: the interface mode for the PHY
-+ *
-+ * Returns zero if it is unknown what in-band signalling is supported by the
-+ * PHY (e.g. because the PHY driver doesn't implement the method.) Otherwise,
-+ * returns a bit mask of the LINK_INBAND_* values from
-+ * &enum link_inband_signalling to describe which inband modes are supported
-+ * by the PHY for this interface mode.
-+ */
-+unsigned int phy_inband_caps(struct phy_device *phydev,
-+			     phy_interface_t interface)
-+{
-+	if (phydev->drv && phydev->drv->inband_caps)
-+		return phydev->drv->inband_caps(phydev, interface);
-+
-+	return 0;
-+}
-+EXPORT_SYMBOL_GPL(phy_inband_caps);
-+
-+/**
-  * _phy_start_aneg - start auto-negotiation for this PHY device
-  * @phydev: the phy_device struct
-  *
---- a/include/linux/phy.h
-+++ b/include/linux/phy.h
-@@ -790,6 +790,24 @@ struct phy_tdr_config {
- #define PHY_PAIR_ALL -1
- 
- /**
-+ * enum link_inband_signalling - in-band signalling modes that are supported
-+ *
-+ * @LINK_INBAND_DISABLE: in-band signalling can be disabled
-+ * @LINK_INBAND_ENABLE: in-band signalling can be enabled without bypass
-+ * @LINK_INBAND_BYPASS: in-band signalling can be enabled with bypass
-+ *
-+ * The possible and required bits can only be used if the valid bit is set.
-+ * If possible is clear, that means inband signalling can not be used.
-+ * Required is only valid when possible is set, and means that inband
-+ * signalling must be used.
-+ */
-+enum link_inband_signalling {
-+	LINK_INBAND_DISABLE		= BIT(0),
-+	LINK_INBAND_ENABLE		= BIT(1),
-+	LINK_INBAND_BYPASS		= BIT(2),
-+};
-+
-+/**
-  * struct phy_plca_cfg - Configuration of the PLCA (Physical Layer Collision
-  * Avoidance) Reconciliation Sublayer.
-  *
-@@ -919,6 +937,14 @@ struct phy_driver {
- 	int (*get_features)(struct phy_device *phydev);
- 
- 	/**
-+	 * @inband_caps: query whether in-band is supported for the given PHY
-+	 * interface mode. Returns a bitmask of bits defined by enum
-+	 * link_inband_signalling.
-+	 */
-+	unsigned int (*inband_caps)(struct phy_device *phydev,
-+				    phy_interface_t interface);
-+
-+	/**
- 	 * @get_rate_matching: Get the supported type of rate matching for a
- 	 * particular phy interface. This is used by phy consumers to determine
- 	 * whether to advertise lower-speed modes for that interface. It is
-@@ -1735,6 +1761,8 @@ void phy_stop(struct phy_device *phydev)
- int phy_config_aneg(struct phy_device *phydev);
- int phy_start_aneg(struct phy_device *phydev);
- int phy_aneg_done(struct phy_device *phydev);
-+unsigned int phy_inband_caps(struct phy_device *phydev,
-+			     phy_interface_t interface);
- int phy_speed_down(struct phy_device *phydev, bool sync);
- int phy_speed_up(struct phy_device *phydev);
- bool phy_check_valid(int speed, int duplex, unsigned long *features);

+ 0 - 41
target/linux/generic/backport-6.6/606-05-v6.14-net-phy-bcm84881-implement-phy_inband_caps-method.patch

@@ -1,41 +0,0 @@
-From c64c7fa0a774d9da72071a8517e359992baac982 Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Tue, 3 Dec 2024 15:31:07 +0000
-Subject: [PATCH 05/13] net: phy: bcm84881: implement phy_inband_caps() method
-
-BCM84881 has no support for inband signalling, so this is a trivial
-implementation that returns no support for inband.
-
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Acked-by: Florian Fainelli <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/phy/bcm84881.c | 10 ++++++++++
- 1 file changed, 10 insertions(+)
-
---- a/drivers/net/phy/bcm84881.c
-+++ b/drivers/net/phy/bcm84881.c
-@@ -223,11 +223,21 @@ static int bcm84881_read_status(struct p
- 	return genphy_c45_read_mdix(phydev);
- }
- 
-+/* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII
-+ * or 802.3z control word, so inband will not work.
-+ */
-+static unsigned int bcm84881_inband_caps(struct phy_device *phydev,
-+					 phy_interface_t interface)
-+{
-+	return LINK_INBAND_DISABLE;
-+}
-+
- static struct phy_driver bcm84881_drivers[] = {
- 	{
- 		.phy_id		= 0xae025150,
- 		.phy_id_mask	= 0xfffffff0,
- 		.name		= "Broadcom BCM84881",
-+		.inband_caps	= bcm84881_inband_caps,
- 		.config_init	= bcm84881_config_init,
- 		.probe		= bcm84881_probe,
- 		.get_features	= bcm84881_get_features,

+ 0 - 63
target/linux/generic/backport-6.6/606-06-v6.14-net-phy-marvell-implement-phy_inband_caps-method.patch

@@ -1,63 +0,0 @@
-From 1c86828dff88e28b8ade6bddeee0163a023faf91 Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Tue, 3 Dec 2024 15:31:12 +0000
-Subject: [PATCH 06/13] net: phy: marvell: implement phy_inband_caps() method
-
-Provide an implementation for phy_inband_caps() for Marvell PHYs used
-on SFP modules, so that phylink knows the PHYs capabilities.
-
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/phy/marvell.c | 17 +++++++++++++++++
- 1 file changed, 17 insertions(+)
-
---- a/drivers/net/phy/marvell.c
-+++ b/drivers/net/phy/marvell.c
-@@ -673,6 +673,20 @@ static int marvell_config_aneg_fiber(str
- 	return genphy_check_and_restart_aneg(phydev, changed);
- }
- 
-+static unsigned int m88e1111_inband_caps(struct phy_device *phydev,
-+					 phy_interface_t interface)
-+{
-+	/* In 1000base-X and SGMII modes, the inband mode can be changed
-+	 * through the Fibre page BMCR ANENABLE bit.
-+	 */
-+	if (interface == PHY_INTERFACE_MODE_1000BASEX ||
-+	    interface == PHY_INTERFACE_MODE_SGMII)
-+		return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE |
-+		       LINK_INBAND_BYPASS;
-+
-+	return 0;
-+}
-+
- static int m88e1111_config_aneg(struct phy_device *phydev)
- {
- 	int extsr = phy_read(phydev, MII_M1111_PHY_EXT_SR);
-@@ -3329,6 +3343,7 @@ static struct phy_driver marvell_drivers
- 		.name = "Marvell 88E1112",
- 		/* PHY_GBIT_FEATURES */
- 		.probe = marvell_probe,
-+		.inband_caps = m88e1111_inband_caps,
- 		.config_init = m88e1112_config_init,
- 		.config_aneg = marvell_config_aneg,
- 		.config_intr = marvell_config_intr,
-@@ -3349,6 +3364,7 @@ static struct phy_driver marvell_drivers
- 		.name = "Marvell 88E1111",
- 		/* PHY_GBIT_FEATURES */
- 		.probe = marvell_probe,
-+		.inband_caps = m88e1111_inband_caps,
- 		.config_init = m88e1111gbe_config_init,
- 		.config_aneg = m88e1111_config_aneg,
- 		.read_status = marvell_read_status,
-@@ -3370,6 +3386,7 @@ static struct phy_driver marvell_drivers
- 		.name = "Marvell 88E1111 (Finisar)",
- 		/* PHY_GBIT_FEATURES */
- 		.probe = marvell_probe,
-+		.inband_caps = m88e1111_inband_caps,
- 		.config_init = m88e1111gbe_config_init,
- 		.config_aneg = m88e1111_config_aneg,
- 		.read_status = marvell_read_status,

+ 0 - 79
target/linux/generic/backport-6.6/606-07-v6.14-net-phy-add-phy_config_inband.patch

@@ -1,79 +0,0 @@
-From 5d58a890c02770ba8d790b1f3c6e8c0e20514dc2 Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Tue, 3 Dec 2024 15:31:18 +0000
-Subject: [PATCH 07/13] net: phy: add phy_config_inband()
-
-Add a method to configure the PHY's in-band mode.
-
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/phy/phy.c | 32 ++++++++++++++++++++++++++++++++
- include/linux/phy.h   |  6 ++++++
- 2 files changed, 38 insertions(+)
-
---- a/drivers/net/phy/phy.c
-+++ b/drivers/net/phy/phy.c
-@@ -994,6 +994,38 @@ unsigned int phy_inband_caps(struct phy_
- EXPORT_SYMBOL_GPL(phy_inband_caps);
- 
- /**
-+ * phy_config_inband - configure the desired PHY in-band mode
-+ * @phydev: the phy_device struct
-+ * @modes: in-band modes to configure
-+ *
-+ * Description: disables, enables or enables-with-bypass in-band signalling
-+ *   between the PHY and host system.
-+ *
-+ * Returns: zero on success, or negative errno value.
-+ */
-+int phy_config_inband(struct phy_device *phydev, unsigned int modes)
-+{
-+	int err;
-+
-+	if (!!(modes & LINK_INBAND_DISABLE) +
-+	    !!(modes & LINK_INBAND_ENABLE) +
-+	    !!(modes & LINK_INBAND_BYPASS) != 1)
-+		return -EINVAL;
-+
-+	mutex_lock(&phydev->lock);
-+	if (!phydev->drv)
-+		err = -EIO;
-+	else if (!phydev->drv->config_inband)
-+		err = -EOPNOTSUPP;
-+	else
-+		err = phydev->drv->config_inband(phydev, modes);
-+	mutex_unlock(&phydev->lock);
-+
-+	return err;
-+}
-+EXPORT_SYMBOL(phy_config_inband);
-+
-+/**
-  * _phy_start_aneg - start auto-negotiation for this PHY device
-  * @phydev: the phy_device struct
-  *
---- a/include/linux/phy.h
-+++ b/include/linux/phy.h
-@@ -945,6 +945,11 @@ struct phy_driver {
- 				    phy_interface_t interface);
- 
- 	/**
-+	 * @config_inband: configure in-band mode for the PHY
-+	 */
-+	int (*config_inband)(struct phy_device *phydev, unsigned int modes);
-+
-+	/**
- 	 * @get_rate_matching: Get the supported type of rate matching for a
- 	 * particular phy interface. This is used by phy consumers to determine
- 	 * whether to advertise lower-speed modes for that interface. It is
-@@ -1763,6 +1768,7 @@ int phy_start_aneg(struct phy_device *ph
- int phy_aneg_done(struct phy_device *phydev);
- unsigned int phy_inband_caps(struct phy_device *phydev,
- 			     phy_interface_t interface);
-+int phy_config_inband(struct phy_device *phydev, unsigned int modes);
- int phy_speed_down(struct phy_device *phydev, bool sync);
- int phy_speed_up(struct phy_device *phydev);
- bool phy_check_valid(int speed, int duplex, unsigned long *features);

+ 0 - 77
target/linux/generic/backport-6.6/606-08-v6.14-net-phy-marvell-implement-config_inband-method.patch

@@ -1,77 +0,0 @@
-From a219912e0fec73c346e64ef47013cb2e152f88fc Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Tue, 3 Dec 2024 15:31:23 +0000
-Subject: [PATCH 08/13] net: phy: marvell: implement config_inband() method
-
-Implement the config_inband() method for Marvell 88E1112, 88E1111,
-and Finisar's 88E1111 variant.
-
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/phy/marvell.c | 31 +++++++++++++++++++++++++++++++
- 1 file changed, 31 insertions(+)
-
---- a/drivers/net/phy/marvell.c
-+++ b/drivers/net/phy/marvell.c
-@@ -687,6 +687,34 @@ static unsigned int m88e1111_inband_caps
- 	return 0;
- }
- 
-+static int m88e1111_config_inband(struct phy_device *phydev, unsigned int modes)
-+{
-+	u16 extsr, bmcr;
-+	int err;
-+
-+	if (phydev->interface != PHY_INTERFACE_MODE_1000BASEX &&
-+	    phydev->interface != PHY_INTERFACE_MODE_SGMII)
-+		return -EINVAL;
-+
-+	if (modes == LINK_INBAND_BYPASS)
-+		extsr = MII_M1111_HWCFG_SERIAL_AN_BYPASS;
-+	else
-+		extsr = 0;
-+
-+	if (modes == LINK_INBAND_DISABLE)
-+		bmcr = 0;
-+	else
-+		bmcr = BMCR_ANENABLE;
-+
-+	err = phy_modify(phydev, MII_M1111_PHY_EXT_SR,
-+			 MII_M1111_HWCFG_SERIAL_AN_BYPASS, extsr);
-+	if (err < 0)
-+		return extsr;
-+
-+	return phy_modify_paged(phydev, MII_MARVELL_FIBER_PAGE, MII_BMCR,
-+				BMCR_ANENABLE, bmcr);
-+}
-+
- static int m88e1111_config_aneg(struct phy_device *phydev)
- {
- 	int extsr = phy_read(phydev, MII_M1111_PHY_EXT_SR);
-@@ -3344,6 +3372,7 @@ static struct phy_driver marvell_drivers
- 		/* PHY_GBIT_FEATURES */
- 		.probe = marvell_probe,
- 		.inband_caps = m88e1111_inband_caps,
-+		.config_inband = m88e1111_config_inband,
- 		.config_init = m88e1112_config_init,
- 		.config_aneg = marvell_config_aneg,
- 		.config_intr = marvell_config_intr,
-@@ -3365,6 +3394,7 @@ static struct phy_driver marvell_drivers
- 		/* PHY_GBIT_FEATURES */
- 		.probe = marvell_probe,
- 		.inband_caps = m88e1111_inband_caps,
-+		.config_inband = m88e1111_config_inband,
- 		.config_init = m88e1111gbe_config_init,
- 		.config_aneg = m88e1111_config_aneg,
- 		.read_status = marvell_read_status,
-@@ -3387,6 +3417,7 @@ static struct phy_driver marvell_drivers
- 		/* PHY_GBIT_FEATURES */
- 		.probe = marvell_probe,
- 		.inband_caps = m88e1111_inband_caps,
-+		.config_inband = m88e1111_config_inband,
- 		.config_init = m88e1111gbe_config_init,
- 		.config_aneg = m88e1111_config_aneg,
- 		.read_status = marvell_read_status,

+ 0 - 159
target/linux/generic/backport-6.6/606-09-v6.14-net-phylink-add-pcs_inband_caps-method.patch

@@ -1,159 +0,0 @@
-From df874f9e52c340cc6f0a0014a97b778f67d46849 Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Tue, 3 Dec 2024 15:31:28 +0000
-Subject: [PATCH 09/13] net: phylink: add pcs_inband_caps() method
-
-Add a pcs_inband_caps() method to query the PCS for its inband link
-capabilities, and use this to determine whether link modes used with
-optical SFPs can be supported.
-
-When a PCS does not provide a method, we allow inband negotiation to
-be either on or off, making this a no-op until the pcs_inband_caps()
-method is implemented by a PCS driver.
-
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/phy/phylink.c | 60 +++++++++++++++++++++++++++++++++++++++
- include/linux/phylink.h   | 17 +++++++++++
- 2 files changed, 77 insertions(+)
-
---- a/drivers/net/phy/phylink.c
-+++ b/drivers/net/phy/phylink.c
-@@ -1038,6 +1038,15 @@ static void phylink_resolve_an_pause(str
- 	}
- }
- 
-+static unsigned int phylink_pcs_inband_caps(struct phylink_pcs *pcs,
-+				    phy_interface_t interface)
-+{
-+	if (pcs && pcs->ops->pcs_inband_caps)
-+		return pcs->ops->pcs_inband_caps(pcs, interface);
-+
-+	return 0;
-+}
-+
- static void phylink_pcs_pre_config(struct phylink_pcs *pcs,
- 				   phy_interface_t interface)
- {
-@@ -1091,6 +1100,24 @@ static void phylink_pcs_link_up(struct p
- 		pcs->ops->pcs_link_up(pcs, neg_mode, interface, speed, duplex);
- }
- 
-+/* Query inband for a specific interface mode, asking the MAC for the
-+ * PCS which will be used to handle the interface mode.
-+ */
-+static unsigned int phylink_inband_caps(struct phylink *pl,
-+					 phy_interface_t interface)
-+{
-+	struct phylink_pcs *pcs;
-+
-+	if (!pl->mac_ops->mac_select_pcs)
-+		return 0;
-+
-+	pcs = pl->mac_ops->mac_select_pcs(pl->config, interface);
-+	if (!pcs)
-+		return 0;
-+
-+	return phylink_pcs_inband_caps(pcs, interface);
-+}
-+
- static void phylink_pcs_poll_stop(struct phylink *pl)
- {
- 	if (pl->cfg_link_an_mode == MLO_AN_INBAND)
-@@ -2443,6 +2470,26 @@ int phylink_ethtool_ksettings_get(struct
- }
- EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get);
- 
-+static bool phylink_validate_pcs_inband_autoneg(struct phylink *pl,
-+					        phy_interface_t interface,
-+						unsigned long *adv)
-+{
-+	unsigned int inband = phylink_inband_caps(pl, interface);
-+	unsigned int mask;
-+
-+	/* If the PCS doesn't implement inband support, be permissive. */
-+	if (!inband)
-+		return true;
-+
-+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, adv))
-+		mask = LINK_INBAND_ENABLE;
-+	else
-+		mask = LINK_INBAND_DISABLE;
-+
-+	/* Check whether the PCS implements the required mode */
-+	return !!(inband & mask);
-+}
-+
- /**
-  * phylink_ethtool_ksettings_set() - set the link settings
-  * @pl: a pointer to a &struct phylink returned from phylink_create()
-@@ -2578,6 +2625,13 @@ int phylink_ethtool_ksettings_set(struct
- 	    phylink_is_empty_linkmode(config.advertising))
- 		return -EINVAL;
- 
-+	/* Validate the autonegotiation state. We don't have a PHY in this
-+	 * situation, so the PCS is the media-facing entity.
-+	 */
-+	if (!phylink_validate_pcs_inband_autoneg(pl, config.interface,
-+						 config.advertising))
-+		return -EINVAL;
-+
- 	mutex_lock(&pl->state_mutex);
- 	pl->link_config.speed = config.speed;
- 	pl->link_config.duplex = config.duplex;
-@@ -3274,6 +3328,12 @@ static int phylink_sfp_config_optical(st
- 	phylink_dbg(pl, "optical SFP: chosen %s interface\n",
- 		    phy_modes(interface));
- 
-+	if (!phylink_validate_pcs_inband_autoneg(pl, interface,
-+						 config.advertising)) {
-+		phylink_err(pl, "autoneg setting not compatible with PCS");
-+		return -EINVAL;
-+	}
-+
- 	config.interface = interface;
- 
- 	/* Ignore errors if we're expecting a PHY to attach later */
---- a/include/linux/phylink.h
-+++ b/include/linux/phylink.h
-@@ -432,6 +432,7 @@ struct phylink_pcs {
- /**
-  * struct phylink_pcs_ops - MAC PCS operations structure.
-  * @pcs_validate: validate the link configuration.
-+ * @pcs_inband_caps: query inband support for interface mode.
-  * @pcs_enable: enable the PCS.
-  * @pcs_disable: disable the PCS.
-  * @pcs_pre_config: pre-mac_config method (for errata)
-@@ -445,6 +446,8 @@ struct phylink_pcs {
- struct phylink_pcs_ops {
- 	int (*pcs_validate)(struct phylink_pcs *pcs, unsigned long *supported,
- 			    const struct phylink_link_state *state);
-+	unsigned int (*pcs_inband_caps)(struct phylink_pcs *pcs,
-+					phy_interface_t interface);
- 	int (*pcs_enable)(struct phylink_pcs *pcs);
- 	void (*pcs_disable)(struct phylink_pcs *pcs);
- 	void (*pcs_pre_config)(struct phylink_pcs *pcs,
-@@ -481,6 +484,20 @@ int pcs_validate(struct phylink_pcs *pcs
- 		 const struct phylink_link_state *state);
- 
- /**
-+ * pcs_inband_caps - query PCS in-band capabilities for interface mode.
-+ * @pcs: a pointer to a &struct phylink_pcs.
-+ * @interface: interface mode to be queried
-+ *
-+ * Returns zero if it is unknown what in-band signalling is supported by the
-+ * PHY (e.g. because the PHY driver doesn't implement the method.) Otherwise,
-+ * returns a bit mask of the LINK_INBAND_* values from
-+ * &enum link_inband_signalling to describe which inband modes are supported
-+ * for this interface mode.
-+ */
-+unsigned int pcs_inband_caps(struct phylink_pcs *pcs,
-+			     phy_interface_t interface);
-+
-+/**
-  * pcs_enable() - enable the PCS.
-  * @pcs: a pointer to a &struct phylink_pcs.
-  */

+ 0 - 64
target/linux/generic/backport-6.6/606-10-v6.14-net-mvneta-implement-pcs_inband_caps-method.patch

@@ -1,64 +0,0 @@
-From 513e8fb8fa32035b3325e2e14fb9598f8cb545e9 Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Tue, 3 Dec 2024 15:31:33 +0000
-Subject: [PATCH 10/13] net: mvneta: implement pcs_inband_caps() method
-
-Report the PCS in-band capabilities to phylink for Marvell NETA
-interfaces.
-
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/ethernet/marvell/mvneta.c | 27 +++++++++++++++++----------
- 1 file changed, 17 insertions(+), 10 deletions(-)
-
---- a/drivers/net/ethernet/marvell/mvneta.c
-+++ b/drivers/net/ethernet/marvell/mvneta.c
-@@ -3959,20 +3959,27 @@ static struct mvneta_port *mvneta_pcs_to
- 	return container_of(pcs, struct mvneta_port, phylink_pcs);
- }
- 
--static int mvneta_pcs_validate(struct phylink_pcs *pcs,
--			       unsigned long *supported,
--			       const struct phylink_link_state *state)
-+static unsigned int mvneta_pcs_inband_caps(struct phylink_pcs *pcs,
-+					   phy_interface_t interface)
- {
--	/* We only support QSGMII, SGMII, 802.3z and RGMII modes.
--	 * When in 802.3z mode, we must have AN enabled:
-+	/* When operating in an 802.3z mode, we must have AN enabled:
- 	 * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
- 	 * When <PortType> = 1 (1000BASE-X) this field must be set to 1."
-+	 * Therefore, inband is "required".
- 	 */
--	if (phy_interface_mode_is_8023z(state->interface) &&
--	    !phylink_test(state->advertising, Autoneg))
--		return -EINVAL;
-+	if (phy_interface_mode_is_8023z(interface))
-+		return LINK_INBAND_ENABLE;
- 
--	return 0;
-+	/* QSGMII, SGMII and RGMII can be configured to use inband
-+	 * signalling of the AN result. Indicate these as "possible".
-+	 */
-+	if (interface == PHY_INTERFACE_MODE_SGMII ||
-+	    interface == PHY_INTERFACE_MODE_QSGMII ||
-+	    phy_interface_mode_is_rgmii(interface))
-+		return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
-+
-+	/* For any other modes, indicate that inband is not supported. */
-+	return LINK_INBAND_DISABLE;
- }
- 
- static void mvneta_pcs_get_state(struct phylink_pcs *pcs,
-@@ -4070,7 +4077,7 @@ static void mvneta_pcs_an_restart(struct
- }
- 
- static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = {
--	.pcs_validate = mvneta_pcs_validate,
-+	.pcs_inband_caps = mvneta_pcs_inband_caps,
- 	.pcs_get_state = mvneta_pcs_get_state,
- 	.pcs_config = mvneta_pcs_config,
- 	.pcs_an_restart = mvneta_pcs_an_restart,

+ 0 - 62
target/linux/generic/backport-6.6/606-11-v6.14-net-mvpp2-implement-pcs_inband_caps-method.patch

@@ -1,62 +0,0 @@
-From d4169f0c7665afb8d8adb5e1b1df3db88517d0ad Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Tue, 3 Dec 2024 15:31:38 +0000
-Subject: [PATCH 11/13] net: mvpp2: implement pcs_inband_caps() method
-
-Report the PCS in-band capabilities to phylink for Marvell PP2
-interfaces.
-
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- .../net/ethernet/marvell/mvpp2/mvpp2_main.c   | 25 ++++++++++++-------
- 1 file changed, 16 insertions(+), 9 deletions(-)
-
---- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
-+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
-@@ -6214,19 +6214,26 @@ static const struct phylink_pcs_ops mvpp
- 	.pcs_config = mvpp2_xlg_pcs_config,
- };
- 
--static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs,
--				   unsigned long *supported,
--				   const struct phylink_link_state *state)
-+static unsigned int mvpp2_gmac_pcs_inband_caps(struct phylink_pcs *pcs,
-+					       phy_interface_t interface)
- {
--	/* When in 802.3z mode, we must have AN enabled:
-+	/* When operating in an 802.3z mode, we must have AN enabled:
- 	 * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
- 	 * When <PortType> = 1 (1000BASE-X) this field must be set to 1.
-+	 * Therefore, inband is "required".
- 	 */
--	if (phy_interface_mode_is_8023z(state->interface) &&
--	    !phylink_test(state->advertising, Autoneg))
--		return -EINVAL;
-+	if (phy_interface_mode_is_8023z(interface))
-+		return LINK_INBAND_ENABLE;
- 
--	return 0;
-+	/* SGMII and RGMII can be configured to use inband signalling of the
-+	 * AN result. Indicate these as "possible".
-+	 */
-+	if (interface == PHY_INTERFACE_MODE_SGMII ||
-+	    phy_interface_mode_is_rgmii(interface))
-+		return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
-+
-+	/* For any other modes, indicate that inband is not supported. */
-+	return LINK_INBAND_DISABLE;
- }
- 
- static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
-@@ -6333,7 +6340,7 @@ static void mvpp2_gmac_pcs_an_restart(st
- }
- 
- static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
--	.pcs_validate = mvpp2_gmac_pcs_validate,
-+	.pcs_inband_caps = mvpp2_gmac_pcs_inband_caps,
- 	.pcs_get_state = mvpp2_gmac_pcs_get_state,
- 	.pcs_config = mvpp2_gmac_pcs_config,
- 	.pcs_an_restart = mvpp2_gmac_pcs_an_restart,

+ 0 - 228
target/linux/generic/backport-6.6/606-12-v6.14-net-phylink-add-negotiation-of-in-band-capabilities.patch

@@ -1,228 +0,0 @@
-From 5fd0f1a02e750e2db4038dee60edea669ce5aab1 Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Tue, 3 Dec 2024 15:31:43 +0000
-Subject: [PATCH 12/13] net: phylink: add negotiation of in-band capabilities
-
-Support for in-band signalling with Serdes links is uncertain. Some
-PHYs do not support in-band for e.g. SGMII. Some PCS do not support
-in-band for 2500Base-X. Some PCS require in-band for Base-X protocols.
-
-Simply using what is in DT is insufficient when we have hot-pluggable
-PHYs e.g. in the form of SFP modules, which may not provide the
-in-band signalling.
-
-In order to address this, we have introduced phy_inband_caps() and
-pcs_inband_caps() functions to allow phylink to retrieve the
-capabilities from each end of the PCS/PHY link. This commit adds code
-to resolve whether in-band will be used in the various scenarios that
-we have: In-band not being used, PHY present using SGMII or Base-X,
-PHY not present. We also deal with no capabilties provided.
-
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/phy/phylink.c | 154 +++++++++++++++++++++++++++++++++++---
- 1 file changed, 144 insertions(+), 10 deletions(-)
-
---- a/drivers/net/phy/phylink.c
-+++ b/drivers/net/phy/phylink.c
-@@ -75,6 +75,7 @@ struct phylink {
- 
- 	struct mutex state_mutex;
- 	struct phylink_link_state phy_state;
-+	unsigned int phy_ib_mode;
- 	struct work_struct resolve;
- 	unsigned int pcs_neg_mode;
- 	unsigned int pcs_state;
-@@ -1186,10 +1187,18 @@ static void phylink_pcs_neg_mode(struct
- 				 phy_interface_t interface,
- 				 const unsigned long *advertising)
- {
-+	unsigned int pcs_ib_caps = 0;
-+	unsigned int phy_ib_caps = 0;
- 	unsigned int neg_mode, mode;
-+	enum {
-+		INBAND_CISCO_SGMII,
-+		INBAND_BASEX,
-+	} type;
- 
- 	mode = pl->req_link_an_mode;
- 
-+	pl->phy_ib_mode = 0;
-+
- 	switch (interface) {
- 	case PHY_INTERFACE_MODE_SGMII:
- 	case PHY_INTERFACE_MODE_QSGMII:
-@@ -1200,10 +1209,7 @@ static void phylink_pcs_neg_mode(struct
- 		 * inband communication. Note: there exist PHYs that run
- 		 * with SGMII but do not send the inband data.
- 		 */
--		if (!phylink_autoneg_inband(mode))
--			neg_mode = PHYLINK_PCS_NEG_OUTBAND;
--		else
--			neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
-+		type = INBAND_CISCO_SGMII;
- 		break;
- 
- 	case PHY_INTERFACE_MODE_1000BASEX:
-@@ -1214,18 +1220,139 @@ static void phylink_pcs_neg_mode(struct
- 		 * as well, but drivers may not support this, so may
- 		 * need to override this.
- 		 */
--		if (!phylink_autoneg_inband(mode))
-+		type = INBAND_BASEX;
-+		break;
-+
-+	default:
-+		pl->pcs_neg_mode = PHYLINK_PCS_NEG_NONE;
-+		pl->act_link_an_mode = mode;
-+		return;
-+	}
-+
-+	if (pcs)
-+		pcs_ib_caps = phylink_pcs_inband_caps(pcs, interface);
-+
-+	if (pl->phydev)
-+		phy_ib_caps = phy_inband_caps(pl->phydev, interface);
-+
-+	phylink_dbg(pl, "interface %s inband modes: pcs=%02x phy=%02x\n",
-+		    phy_modes(interface), pcs_ib_caps, phy_ib_caps);
-+
-+	if (!phylink_autoneg_inband(mode)) {
-+		bool pcs_ib_only = false;
-+		bool phy_ib_only = false;
-+
-+		if (pcs_ib_caps && pcs_ib_caps != LINK_INBAND_DISABLE) {
-+			/* PCS supports reporting in-band capabilities, and
-+			 * supports more than disable mode.
-+			 */
-+			if (pcs_ib_caps & LINK_INBAND_DISABLE)
-+				neg_mode = PHYLINK_PCS_NEG_OUTBAND;
-+			else if (pcs_ib_caps & LINK_INBAND_ENABLE)
-+				pcs_ib_only = true;
-+		}
-+
-+		if (phy_ib_caps && phy_ib_caps != LINK_INBAND_DISABLE) {
-+			/* PHY supports in-band capabilities, and supports
-+			 * more than disable mode.
-+			 */
-+			if (phy_ib_caps & LINK_INBAND_DISABLE)
-+				pl->phy_ib_mode = LINK_INBAND_DISABLE;
-+			else if (phy_ib_caps & LINK_INBAND_BYPASS)
-+				pl->phy_ib_mode = LINK_INBAND_BYPASS;
-+			else if (phy_ib_caps & LINK_INBAND_ENABLE)
-+				phy_ib_only = true;
-+		}
-+
-+		/* If either the PCS or PHY requires inband to be enabled,
-+		 * this is an invalid configuration. Provide a diagnostic
-+		 * message for this case, but don't try to force the issue.
-+		 */
-+		if (pcs_ib_only || phy_ib_only)
-+			phylink_warn(pl,
-+				     "firmware wants %s mode, but %s%s%s requires inband\n",
-+				     phylink_an_mode_str(mode),
-+				     pcs_ib_only ? "PCS" : "",
-+				     pcs_ib_only && phy_ib_only ? " and " : "",
-+				     phy_ib_only ? "PHY" : "");
-+
-+		neg_mode = PHYLINK_PCS_NEG_OUTBAND;
-+	} else if (type == INBAND_CISCO_SGMII || pl->phydev) {
-+		/* For SGMII modes which are designed to be used with PHYs, or
-+		 * Base-X with a PHY, we try to use in-band mode where-ever
-+		 * possible. However, there are some PHYs e.g. BCM84881 which
-+		 * do not support in-band.
-+		 */
-+		const unsigned int inband_ok = LINK_INBAND_ENABLE |
-+					       LINK_INBAND_BYPASS;
-+		const unsigned int outband_ok = LINK_INBAND_DISABLE |
-+						LINK_INBAND_BYPASS;
-+		/* PCS	PHY
-+		 * D E	D E
-+		 * 0 0  0 0	no information			inband enabled
-+		 * 1 0  0 0	pcs doesn't support		outband
-+		 * 0 1  0 0	pcs required			inband enabled
-+		 * 1 1  0 0	pcs optional			inband enabled
-+		 * 0 0  1 0	phy doesn't support		outband
-+		 * 1 0  1 0	pcs+phy doesn't support		outband
-+		 * 0 1  1 0	pcs required, phy doesn't support, invalid
-+		 * 1 1  1 0	pcs optional, phy doesn't support, outband
-+		 * 0 0  0 1	phy required			inband enabled
-+		 * 1 0  0 1	pcs doesn't support, phy required, invalid
-+		 * 0 1  0 1	pcs+phy required		inband enabled
-+		 * 1 1  0 1	pcs optional, phy required	inband enabled
-+		 * 0 0  1 1	phy optional			inband enabled
-+		 * 1 0  1 1	pcs doesn't support, phy optional, outband
-+		 * 0 1  1 1	pcs required, phy optional	inband enabled
-+		 * 1 1  1 1	pcs+phy optional		inband enabled
-+		 */
-+		if ((!pcs_ib_caps || pcs_ib_caps & inband_ok) &&
-+		    (!phy_ib_caps || phy_ib_caps & inband_ok)) {
-+			/* In-band supported or unknown at both ends. Enable
-+			 * in-band mode with or without bypass at the PHY.
-+			 */
-+			if (phy_ib_caps & LINK_INBAND_ENABLE)
-+				pl->phy_ib_mode = LINK_INBAND_ENABLE;
-+			else if (phy_ib_caps & LINK_INBAND_BYPASS)
-+				pl->phy_ib_mode = LINK_INBAND_BYPASS;
-+
-+			neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
-+		} else if ((!pcs_ib_caps || pcs_ib_caps & outband_ok) &&
-+			   (!phy_ib_caps || phy_ib_caps & outband_ok)) {
-+			/* Either in-band not supported at at least one end.
-+			 * In-band bypass at the other end is possible.
-+			 */
-+			if (phy_ib_caps & LINK_INBAND_DISABLE)
-+				pl->phy_ib_mode = LINK_INBAND_DISABLE;
-+			else if (phy_ib_caps & LINK_INBAND_BYPASS)
-+				pl->phy_ib_mode = LINK_INBAND_BYPASS;
-+
- 			neg_mode = PHYLINK_PCS_NEG_OUTBAND;
-+			if (pl->phydev)
-+				mode = MLO_AN_PHY;
-+		} else {
-+			/* invalid */
-+			phylink_warn(pl, "%s: incompatible in-band capabilities, trying in-band",
-+				     phy_modes(interface));
-+			neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
-+		}
-+	} else {
-+		/* For Base-X without a PHY */
-+		if (pcs_ib_caps == LINK_INBAND_DISABLE)
-+			/* If the PCS doesn't support inband, then inband must
-+			 * be disabled.
-+			 */
-+			neg_mode = PHYLINK_PCS_NEG_INBAND_DISABLED;
-+		else if (pcs_ib_caps == LINK_INBAND_ENABLE)
-+			/* If the PCS requires inband, then inband must always
-+			 * be enabled.
-+			 */
-+			neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
- 		else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- 					   advertising))
- 			neg_mode = PHYLINK_PCS_NEG_INBAND_ENABLED;
- 		else
- 			neg_mode = PHYLINK_PCS_NEG_INBAND_DISABLED;
--		break;
--
--	default:
--		neg_mode = PHYLINK_PCS_NEG_NONE;
--		break;
- 	}
- 
- 	pl->pcs_neg_mode = neg_mode;
-@@ -1324,6 +1451,13 @@ static void phylink_major_config(struct
- 				    ERR_PTR(err));
- 	}
- 
-+	if (pl->phydev && pl->phy_ib_mode) {
-+		err = phy_config_inband(pl->phydev, pl->phy_ib_mode);
-+		if (err < 0)
-+			phylink_err(pl, "phy_config_inband: %pe\n",
-+				    ERR_PTR(err));
-+	}
-+
- 	if (pl->sfp_bus) {
- 		rate_kbd = phylink_interface_signal_rate(state->interface);
- 		if (rate_kbd)

+ 0 - 110
target/linux/generic/backport-6.6/606-13-v6.14-net-phylink-remove-phylink_phy_no_inband.patch

@@ -1,110 +0,0 @@
-From 77ac9a8b2536e0eaca6c6f21070068458bf55981 Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Tue, 3 Dec 2024 15:31:48 +0000
-Subject: [PATCH 13/13] net: phylink: remove phylink_phy_no_inband()
-
-Remove phylink_phy_no_inband() now that we are handling the lack of
-inband negotiation by querying the capabilities of the PHY and PCS,
-and the BCM84881 PHY driver provides us the information necessary to
-make the decision.
-
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/phy/phylink.c | 27 ++++++---------------------
- 1 file changed, 6 insertions(+), 21 deletions(-)
-
---- a/drivers/net/phy/phylink.c
-+++ b/drivers/net/phy/phylink.c
-@@ -3320,10 +3320,11 @@ static phy_interface_t phylink_choose_sf
- 	return interface;
- }
- 
--static void phylink_sfp_set_config(struct phylink *pl, u8 mode,
-+static void phylink_sfp_set_config(struct phylink *pl,
- 				   unsigned long *supported,
- 				   struct phylink_link_state *state)
- {
-+	u8 mode = MLO_AN_INBAND;
- 	bool changed = false;
- 
- 	phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n",
-@@ -3357,8 +3358,7 @@ static void phylink_sfp_set_config(struc
- 		phylink_mac_initial_config(pl, false);
- }
- 
--static int phylink_sfp_config_phy(struct phylink *pl, u8 mode,
--				  struct phy_device *phy)
-+static int phylink_sfp_config_phy(struct phylink *pl, struct phy_device *phy)
- {
- 	__ETHTOOL_DECLARE_LINK_MODE_MASK(support1);
- 	__ETHTOOL_DECLARE_LINK_MODE_MASK(support);
-@@ -3398,7 +3398,7 @@ static int phylink_sfp_config_phy(struct
- 	if (ret) {
- 		phylink_err(pl,
- 			    "validation of %s/%s with support %*pb failed: %pe\n",
--			    phylink_an_mode_str(mode),
-+			    phylink_an_mode_str(pl->req_link_an_mode),
- 			    phy_modes(config.interface),
- 			    __ETHTOOL_LINK_MODE_MASK_NBITS, support,
- 			    ERR_PTR(ret));
-@@ -3407,7 +3407,7 @@ static int phylink_sfp_config_phy(struct
- 
- 	pl->link_port = pl->sfp_port;
- 
--	phylink_sfp_set_config(pl, mode, support, &config);
-+	phylink_sfp_set_config(pl, support, &config);
- 
- 	return 0;
- }
-@@ -3481,7 +3481,7 @@ static int phylink_sfp_config_optical(st
- 
- 	pl->link_port = pl->sfp_port;
- 
--	phylink_sfp_set_config(pl, MLO_AN_INBAND, pl->sfp_support, &config);
-+	phylink_sfp_set_config(pl, pl->sfp_support, &config);
- 
- 	return 0;
- }
-@@ -3552,20 +3552,10 @@ static void phylink_sfp_link_up(void *up
- 	phylink_enable_and_run_resolve(pl, PHYLINK_DISABLE_LINK);
- }
- 
--/* The Broadcom BCM84881 in the Methode DM7052 is unable to provide a SGMII
-- * or 802.3z control word, so inband will not work.
-- */
--static bool phylink_phy_no_inband(struct phy_device *phy)
--{
--	return phy->is_c45 && phy_id_compare(phy->c45_ids.device_ids[1],
--					     0xae025150, 0xfffffff0);
--}
--
- static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
- {
- 	struct phylink *pl = upstream;
- 	phy_interface_t interface;
--	u8 mode;
- 	int ret;
- 
- 	/*
-@@ -3577,17 +3567,12 @@ static int phylink_sfp_connect_phy(void
- 	 */
- 	phy_support_asym_pause(phy);
- 
--	if (phylink_phy_no_inband(phy))
--		mode = MLO_AN_PHY;
--	else
--		mode = MLO_AN_INBAND;
--
- 	/* Set the PHY's host supported interfaces */
- 	phy_interface_and(phy->host_interfaces, phylink_sfp_interfaces,
- 			  pl->config->supported_interfaces);
- 
- 	/* Do the initial configuration */
--	ret = phylink_sfp_config_phy(pl, mode, phy);
-+	ret = phylink_sfp_config_phy(pl, phy);
- 	if (ret < 0)
- 		return ret;
- 

+ 0 - 53
target/linux/generic/backport-6.6/607-01-v6.14-net-pcs-pcs-lynx-implement-pcs_inband_caps-method.patch

@@ -1,53 +0,0 @@
-From 6561f0e547be221f411fda5eddfcc5bd8bb058a5 Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Thu, 5 Dec 2024 09:42:24 +0000
-Subject: [PATCH 1/3] net: pcs: pcs-lynx: implement pcs_inband_caps() method
-
-Report the PCS in-band capabilities to phylink for the Lynx PCS.
-
-Reviewed-by: Maxime Chevallier <[email protected]>
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/pcs/pcs-lynx.c | 22 ++++++++++++++++++++++
- 1 file changed, 22 insertions(+)
-
---- a/drivers/net/pcs/pcs-lynx.c
-+++ b/drivers/net/pcs/pcs-lynx.c
-@@ -35,6 +35,27 @@ enum sgmii_speed {
- #define phylink_pcs_to_lynx(pl_pcs) container_of((pl_pcs), struct lynx_pcs, pcs)
- #define lynx_to_phylink_pcs(lynx) (&(lynx)->pcs)
- 
-+static unsigned int lynx_pcs_inband_caps(struct phylink_pcs *pcs,
-+					 phy_interface_t interface)
-+{
-+	switch (interface) {
-+	case PHY_INTERFACE_MODE_1000BASEX:
-+	case PHY_INTERFACE_MODE_SGMII:
-+	case PHY_INTERFACE_MODE_QSGMII:
-+		return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
-+
-+	case PHY_INTERFACE_MODE_10GBASER:
-+	case PHY_INTERFACE_MODE_2500BASEX:
-+		return LINK_INBAND_DISABLE;
-+
-+	case PHY_INTERFACE_MODE_USXGMII:
-+		return LINK_INBAND_ENABLE;
-+
-+	default:
-+		return 0;
-+	}
-+}
-+
- static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs,
- 				       struct phylink_link_state *state)
- {
-@@ -307,6 +328,7 @@ static void lynx_pcs_link_up(struct phyl
- }
- 
- static const struct phylink_pcs_ops lynx_pcs_phylink_ops = {
-+	.pcs_inband_caps = lynx_pcs_inband_caps,
- 	.pcs_get_state = lynx_pcs_get_state,
- 	.pcs_config = lynx_pcs_config,
- 	.pcs_an_restart = lynx_pcs_an_restart,

+ 0 - 47
target/linux/generic/backport-6.6/607-02-v6.14-net-pcs-pcs-mtk-lynxi-implement-pcs_inband_caps-meth.patch

@@ -1,47 +0,0 @@
-From 520d29bdda86915b3caf8c72825a574bff212553 Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Thu, 5 Dec 2024 09:42:29 +0000
-Subject: [PATCH 2/3] net: pcs: pcs-mtk-lynxi: implement pcs_inband_caps()
- method
-
-Report the PCS in-band capabilities to phylink for the LynxI PCS.
-
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/pcs/pcs-mtk-lynxi.c | 16 ++++++++++++++++
- 1 file changed, 16 insertions(+)
-
---- a/drivers/net/pcs/pcs-mtk-lynxi.c
-+++ b/drivers/net/pcs/pcs-mtk-lynxi.c
-@@ -88,6 +88,21 @@ static struct mtk_pcs_lynxi *pcs_to_mtk_
- 	return container_of(pcs, struct mtk_pcs_lynxi, pcs);
- }
- 
-+static unsigned int mtk_pcs_lynxi_inband_caps(struct phylink_pcs *pcs,
-+					      phy_interface_t interface)
-+{
-+	switch (interface) {
-+	case PHY_INTERFACE_MODE_1000BASEX:
-+	case PHY_INTERFACE_MODE_2500BASEX:
-+	case PHY_INTERFACE_MODE_SGMII:
-+	case PHY_INTERFACE_MODE_QSGMII:
-+		return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
-+
-+	default:
-+		return 0;
-+	}
-+}
-+
- static void mtk_pcs_lynxi_get_state(struct phylink_pcs *pcs,
- 				    struct phylink_link_state *state)
- {
-@@ -241,6 +256,7 @@ static void mtk_pcs_lynxi_disable(struct
- }
- 
- static const struct phylink_pcs_ops mtk_pcs_lynxi_ops = {
-+	.pcs_inband_caps = mtk_pcs_lynxi_inband_caps,
- 	.pcs_get_state = mtk_pcs_lynxi_get_state,
- 	.pcs_config = mtk_pcs_lynxi_config,
- 	.pcs_an_restart = mtk_pcs_lynxi_restart_an,

+ 0 - 58
target/linux/generic/backport-6.6/607-03-v6.14-net-pcs-xpcs-implement-pcs_inband_caps-method.patch

@@ -1,58 +0,0 @@
-From 484d0170d6c6bbb5213d037664e9a551f793bacd Mon Sep 17 00:00:00 2001
-From: "Russell King (Oracle)" <[email protected]>
-Date: Thu, 5 Dec 2024 09:42:34 +0000
-Subject: [PATCH 3/3] net: pcs: xpcs: implement pcs_inband_caps() method
-
-Report the PCS inband capabilities to phylink for XPCS.
-
-Signed-off-by: Russell King (Oracle) <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/pcs/pcs-xpcs.c | 28 ++++++++++++++++++++++++++++
- 1 file changed, 28 insertions(+)
-
---- a/drivers/net/pcs/pcs-xpcs.c
-+++ b/drivers/net/pcs/pcs-xpcs.c
-@@ -628,6 +628,33 @@ static int xpcs_validate(struct phylink_
- 	return 0;
- }
- 
-+static unsigned int xpcs_inband_caps(struct phylink_pcs *pcs,
-+				     phy_interface_t interface)
-+{
-+	struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
-+	const struct xpcs_compat *compat;
-+
-+	compat = xpcs_find_compat(xpcs->id, interface);
-+	if (!compat)
-+		return 0;
-+
-+	switch (compat->an_mode) {
-+	case DW_AN_C73:
-+		return LINK_INBAND_ENABLE;
-+
-+	case DW_AN_C37_SGMII:
-+	case DW_AN_C37_1000BASEX:
-+		return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
-+
-+	case DW_10GBASER:
-+	case DW_2500BASEX:
-+		return LINK_INBAND_DISABLE;
-+
-+	default:
-+		return 0;
-+	}
-+}
-+
- void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
- {
- 	int i, j;
-@@ -1331,6 +1358,7 @@ static const struct xpcs_id xpcs_id_list
- 
- static const struct phylink_pcs_ops xpcs_phylink_ops = {
- 	.pcs_validate = xpcs_validate,
-+	.pcs_inband_caps = xpcs_inband_caps,
- 	.pcs_config = xpcs_config,
- 	.pcs_get_state = xpcs_get_state,
- 	.pcs_an_restart = xpcs_an_restart,

+ 0 - 30
target/linux/generic/backport-6.6/610-v6.9-net-mdio-add-2.5g-and-5g-related-PMA-speed-constants.patch

@@ -1,30 +0,0 @@
-From 6c06c88fa838fcc1b7e5380facd086f57fd9d1c4 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Marek=20Beh=C3=BAn?= <[email protected]>
-Date: Sun, 4 Feb 2024 15:16:46 +0100
-Subject: [PATCH] net: mdio: add 2.5g and 5g related PMA speed constants
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Add constants indicating 2.5g and 5g ability in the MMD PMA speed
-register.
-
-Signed-off-by: Marek Behún <[email protected]>
-Signed-off-by: Heiner Kallweit <[email protected]>
-Link: https://lore.kernel.org/r/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- include/uapi/linux/mdio.h | 2 ++
- 1 file changed, 2 insertions(+)
-
---- a/include/uapi/linux/mdio.h
-+++ b/include/uapi/linux/mdio.h
-@@ -138,6 +138,8 @@
- #define MDIO_PMA_SPEED_1000		0x0010	/* 1000M capable */
- #define MDIO_PMA_SPEED_100		0x0020	/* 100M capable */
- #define MDIO_PMA_SPEED_10		0x0040	/* 10M capable */
-+#define MDIO_PMA_SPEED_2_5G		0x2000	/* 2.5G capable */
-+#define MDIO_PMA_SPEED_5G		0x4000	/* 5G capable */
- #define MDIO_PCS_SPEED_10P2B		0x0002	/* 10PASS-TS/2BASE-TL capable */
- #define MDIO_PCS_SPEED_2_5G		0x0040	/* 2.5G capable */
- #define MDIO_PCS_SPEED_5G		0x0080	/* 5G capable */

+ 0 - 94
target/linux/generic/backport-6.6/611-01-v6.11-udp-Allow-GSO-transmit-from-devices-with-no-checksum.patch

@@ -1,94 +0,0 @@
-From: Jakub Sitnicki <[email protected]>
-Date: Wed, 26 Jun 2024 19:51:26 +0200
-Subject: [PATCH] udp: Allow GSO transmit from devices with no checksum offload
-
-Today sending a UDP GSO packet from a TUN device results in an EIO error:
-
-  import fcntl, os, struct
-  from socket import *
-
-  TUNSETIFF = 0x400454CA
-  IFF_TUN = 0x0001
-  IFF_NO_PI = 0x1000
-  UDP_SEGMENT = 103
-
-  tun_fd = os.open("/dev/net/tun", os.O_RDWR)
-  ifr = struct.pack("16sH", b"tun0", IFF_TUN | IFF_NO_PI)
-  fcntl.ioctl(tun_fd, TUNSETIFF, ifr)
-
-  os.system("ip addr add 192.0.2.1/24 dev tun0")
-  os.system("ip link set dev tun0 up")
-
-  s = socket(AF_INET, SOCK_DGRAM)
-  s.setsockopt(SOL_UDP, UDP_SEGMENT, 1200)
-  s.sendto(b"x" * 3000, ("192.0.2.2", 9)) # EIO
-
-This is due to a check in the udp stack if the egress device offers
-checksum offload. While TUN/TAP devices, by default, don't advertise this
-capability because it requires support from the TUN/TAP reader.
-
-However, the GSO stack has a software fallback for checksum calculation,
-which we can use. This way we don't force UDP_SEGMENT users to handle the
-EIO error and implement a segmentation fallback.
-
-Lift the restriction so that UDP_SEGMENT can be used with any egress
-device. We also need to adjust the UDP GSO code to match the GSO stack
-expectation about ip_summed field, as set in commit 8d63bee643f1 ("net:
-avoid skb_warn_bad_offload false positives on UFO"). Otherwise we will hit
-the bad offload check.
-
-Users should, however, expect a potential performance impact when
-batch-sending packets with UDP_SEGMENT without checksum offload on the
-egress device. In such case the packet payload is read twice: first during
-the sendmsg syscall when copying data from user memory, and then in the GSO
-stack for checksum computation. This double memory read can be less
-efficient than a regular sendmsg where the checksum is calculated during
-the initial data copy from user memory.
-
-Signed-off-by: Jakub Sitnicki <[email protected]>
-Reviewed-by: Willem de Bruijn <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
-
---- a/net/ipv4/udp.c
-+++ b/net/ipv4/udp.c
-@@ -942,8 +942,7 @@ static int udp_send_skb(struct sk_buff *
- 			kfree_skb(skb);
- 			return -EINVAL;
- 		}
--		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
--		    dst_xfrm(skb_dst(skb))) {
-+		if (is_udplite || dst_xfrm(skb_dst(skb))) {
- 			kfree_skb(skb);
- 			return -EIO;
- 		}
---- a/net/ipv4/udp_offload.c
-+++ b/net/ipv4/udp_offload.c
-@@ -448,6 +448,14 @@ struct sk_buff *__udp_gso_segment(struct
- 	else
- 		uh->check = gso_make_checksum(seg, ~check) ? : CSUM_MANGLED_0;
- 
-+	/* On the TX path, CHECKSUM_NONE and CHECKSUM_UNNECESSARY have the same
-+	 * meaning. However, check for bad offloads in the GSO stack expects the
-+	 * latter, if the checksum was calculated in software. To vouch for the
-+	 * segment skbs we actually need to set it on the gso_skb.
-+	 */
-+	if (gso_skb->ip_summed == CHECKSUM_NONE)
-+		gso_skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
- 	/* update refcount for the packet */
- 	if (copy_dtor) {
- 		int delta = sum_truesize - gso_skb->truesize;
---- a/net/ipv6/udp.c
-+++ b/net/ipv6/udp.c
-@@ -1258,8 +1258,7 @@ static int udp_v6_send_skb(struct sk_buf
- 			kfree_skb(skb);
- 			return -EINVAL;
- 		}
--		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
--		    dst_xfrm(skb_dst(skb))) {
-+		if (is_udplite || dst_xfrm(skb_dst(skb))) {
- 			kfree_skb(skb);
- 			return -EIO;
- 		}

+ 0 - 69
target/linux/generic/backport-6.6/611-02-v6.11-net-Make-USO-depend-on-CSUM-offload.patch

@@ -1,69 +0,0 @@
-From: Jakub Sitnicki <[email protected]>
-Date: Thu, 8 Aug 2024 11:56:21 +0200
-Subject: [PATCH] net: Make USO depend on CSUM offload
-
-UDP segmentation offload inherently depends on checksum offload. It should
-not be possible to disable checksum offload while leaving USO enabled.
-Enforce this dependency in code.
-
-There is a single tx-udp-segmentation feature flag to indicate support for
-both IPv4/6, hence the devices wishing to support USO must offer checksum
-offload for both IP versions.
-
-Fixes: 10154dbded6d ("udp: Allow GSO transmit from devices with no checksum offload")
-Suggested-by: Willem de Bruijn <[email protected]>
-Signed-off-by: Jakub Sitnicki <[email protected]>
-Reviewed-by: Willem de Bruijn <[email protected]>
-Link: https://patch.msgid.link/20240808-udp-gso-egress-from-tunnel-v4-1-f5c5b4149ab9@cloudflare.com
-Signed-off-by: Jakub Kicinski <[email protected]>
----
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -9808,6 +9808,15 @@ static void netdev_sync_lower_features(s
- 	}
- }
- 
-+static bool netdev_has_ip_or_hw_csum(netdev_features_t features)
-+{
-+	netdev_features_t ip_csum_mask = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-+	bool ip_csum = (features & ip_csum_mask) == ip_csum_mask;
-+	bool hw_csum = features & NETIF_F_HW_CSUM;
-+
-+	return ip_csum || hw_csum;
-+}
-+
- static netdev_features_t netdev_fix_features(struct net_device *dev,
- 	netdev_features_t features)
- {
-@@ -9889,15 +9898,9 @@ static netdev_features_t netdev_fix_feat
- 		features &= ~NETIF_F_LRO;
- 	}
- 
--	if (features & NETIF_F_HW_TLS_TX) {
--		bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
--			(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
--		bool hw_csum = features & NETIF_F_HW_CSUM;
--
--		if (!ip_csum && !hw_csum) {
--			netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
--			features &= ~NETIF_F_HW_TLS_TX;
--		}
-+	if ((features & NETIF_F_HW_TLS_TX) && !netdev_has_ip_or_hw_csum(features)) {
-+		netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
-+		features &= ~NETIF_F_HW_TLS_TX;
- 	}
- 
- 	if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
-@@ -9905,6 +9908,11 @@ static netdev_features_t netdev_fix_feat
- 		features &= ~NETIF_F_HW_TLS_RX;
- 	}
- 
-+	if ((features & NETIF_F_GSO_UDP_L4) && !netdev_has_ip_or_hw_csum(features)) {
-+		netdev_dbg(dev, "Dropping USO feature since no CSUM feature.\n");
-+		features &= ~NETIF_F_GSO_UDP_L4;
-+	}
-+
- 	return features;
- }
- 

+ 0 - 86
target/linux/generic/backport-6.6/611-03-v6.11-udp-Fall-back-to-software-USO-if-IPv6-extension-head.patch

@@ -1,86 +0,0 @@
-From: Jakub Sitnicki <[email protected]>
-Date: Thu, 8 Aug 2024 11:56:22 +0200
-Subject: [PATCH] udp: Fall back to software USO if IPv6 extension headers are
- present
-
-In commit 10154dbded6d ("udp: Allow GSO transmit from devices with no
-checksum offload") we have intentionally allowed UDP GSO packets marked
-CHECKSUM_NONE to pass to the GSO stack, so that they can be segmented and
-checksummed by a software fallback when the egress device lacks these
-features.
-
-What was not taken into consideration is that a CHECKSUM_NONE skb can be
-handed over to the GSO stack also when the egress device advertises the
-tx-udp-segmentation / NETIF_F_GSO_UDP_L4 feature.
-
-This will happen when there are IPv6 extension headers present, which we
-check for in __ip6_append_data(). Syzbot has discovered this scenario,
-producing a warning as below:
-
-  ip6tnl0: caps=(0x00000006401d7869, 0x00000006401d7869)
-  WARNING: CPU: 0 PID: 5112 at net/core/dev.c:3293 skb_warn_bad_offload+0x166/0x1a0 net/core/dev.c:3291
-  Modules linked in:
-  CPU: 0 PID: 5112 Comm: syz-executor391 Not tainted 6.10.0-rc7-syzkaller-01603-g80ab5445da62 #0
-  Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 06/07/2024
-  RIP: 0010:skb_warn_bad_offload+0x166/0x1a0 net/core/dev.c:3291
-  [...]
-  Call Trace:
-   <TASK>
-   __skb_gso_segment+0x3be/0x4c0 net/core/gso.c:127
-   skb_gso_segment include/net/gso.h:83 [inline]
-   validate_xmit_skb+0x585/0x1120 net/core/dev.c:3661
-   __dev_queue_xmit+0x17a4/0x3e90 net/core/dev.c:4415
-   neigh_output include/net/neighbour.h:542 [inline]
-   ip6_finish_output2+0xffa/0x1680 net/ipv6/ip6_output.c:137
-   ip6_finish_output+0x41e/0x810 net/ipv6/ip6_output.c:222
-   ip6_send_skb+0x112/0x230 net/ipv6/ip6_output.c:1958
-   udp_v6_send_skb+0xbf5/0x1870 net/ipv6/udp.c:1292
-   udpv6_sendmsg+0x23b3/0x3270 net/ipv6/udp.c:1588
-   sock_sendmsg_nosec net/socket.c:730 [inline]
-   __sock_sendmsg+0xef/0x270 net/socket.c:745
-   ____sys_sendmsg+0x525/0x7d0 net/socket.c:2585
-   ___sys_sendmsg net/socket.c:2639 [inline]
-   __sys_sendmmsg+0x3b2/0x740 net/socket.c:2725
-   __do_sys_sendmmsg net/socket.c:2754 [inline]
-   __se_sys_sendmmsg net/socket.c:2751 [inline]
-   __x64_sys_sendmmsg+0xa0/0xb0 net/socket.c:2751
-   do_syscall_x64 arch/x86/entry/common.c:52 [inline]
-   do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83
-   entry_SYSCALL_64_after_hwframe+0x77/0x7f
-   [...]
-   </TASK>
-
-We are hitting the bad offload warning because when an egress device is
-capable of handling segmentation offload requested by
-skb_shinfo(skb)->gso_type, the chain of gso_segment callbacks won't produce
-any segment skbs and return NULL. See the skb_gso_ok() branch in
-{__udp,tcp,sctp}_gso_segment helpers.
-
-To fix it, force a fallback to software USO when processing a packet with
-IPv6 extension headers, since we don't know if these can checksummed by
-all devices which offer USO.
-
-Fixes: 10154dbded6d ("udp: Allow GSO transmit from devices with no checksum offload")
-Reported-by: [email protected]
-Closes: https://lore.kernel.org/all/[email protected]/
-Reviewed-by: Willem de Bruijn <[email protected]>
-Signed-off-by: Jakub Sitnicki <[email protected]>
-Link: https://patch.msgid.link/20240808-udp-gso-egress-from-tunnel-v4-2-f5c5b4149ab9@cloudflare.com
-Signed-off-by: Jakub Kicinski <[email protected]>
----
-
---- a/net/ipv4/udp_offload.c
-+++ b/net/ipv4/udp_offload.c
-@@ -343,6 +343,12 @@ struct sk_buff *__udp_gso_segment(struct
- 		     !(skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)))
- 		return ERR_PTR(-EINVAL);
- 
-+	/* We don't know if egress device can segment and checksum the packet
-+	 * when IPv6 extension headers are present. Fall back to software GSO.
-+	 */
-+	if (gso_skb->ip_summed != CHECKSUM_PARTIAL)
-+		features &= ~(NETIF_F_GSO_UDP_L4 | NETIF_F_CSUM_MASK);
-+
- 	if (skb_gso_ok(gso_skb, features | NETIF_F_GSO_ROBUST)) {
- 		/* Packet is from an untrusted source, reset gso_segs. */
- 		skb_shinfo(gso_skb)->gso_segs = DIV_ROUND_UP(gso_skb->len - sizeof(*uh),

+ 0 - 29
target/linux/generic/backport-6.6/612-v6.9-net-get-stats64-if-device-if-driver-is-configured.patch

@@ -1,29 +0,0 @@
-From: Breno Leitao <[email protected]>
-Date: Wed, 28 Feb 2024 03:31:21 -0800
-Subject: [PATCH] net: get stats64 if device if driver is configured
-
-If the network driver is relying in the net core to do stats allocation,
-then we want to dev_get_tstats64() instead of netdev_stats_to_stats64(),
-since there are per-cpu stats that needs to be taken in consideration.
-
-This will also simplify the drivers in regard to statistics. Once the
-driver sets NETDEV_PCPU_STAT_TSTATS, it doesn't not need to allocate the
-stacks, neither it needs to set `.ndo_get_stats64 = dev_get_tstats64`
-for the generic stats collection function anymore.
-
-Signed-off-by: Breno Leitao <[email protected]>
-Reviewed-by: Simon Horman <[email protected]>
-Signed-off-by: Paolo Abeni <[email protected]>
----
-
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -10715,6 +10715,8 @@ struct rtnl_link_stats64 *dev_get_stats(
- 		ops->ndo_get_stats64(dev, storage);
- 	} else if (ops->ndo_get_stats) {
- 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
-+	} else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) {
-+		dev_get_tstats64(dev, storage);
- 	} else {
- 		netdev_stats_to_stats64(storage, &dev->stats);
- 	}

+ 0 - 139
target/linux/generic/backport-6.6/620-01-v6.7-page_pool-fragment-API-support-for-32-bit-arch-with-.patch

@@ -1,139 +0,0 @@
-From: Yunsheng Lin <[email protected]>
-Date: Fri, 13 Oct 2023 14:48:21 +0800
-Subject: [PATCH] page_pool: fragment API support for 32-bit arch with 64-bit
- DMA
-
-Currently page_pool_alloc_frag() is not supported in 32-bit
-arch with 64-bit DMA because of the overlap issue between
-pp_frag_count and dma_addr_upper in 'struct page' for those
-arches, which seems to be quite common, see [1], which means
-driver may need to handle it when using fragment API.
-
-It is assumed that the combination of the above arch with an
-address space >16TB does not exist, as all those arches have
-64b equivalent, it seems logical to use the 64b version for a
-system with a large address space. It is also assumed that dma
-address is page aligned when we are dma mapping a page aligned
-buffer, see [2].
-
-That means we're storing 12 bits of 0 at the lower end for a
-dma address, we can reuse those bits for the above arches to
-support 32b+12b, which is 16TB of memory.
-
-If we make a wrong assumption, a warning is emitted so that
-user can report to us.
-
-1. https://lore.kernel.org/all/[email protected]/
-2. https://lore.kernel.org/all/[email protected]/
-
-Tested-by: Alexander Lobakin <[email protected]>
-Signed-off-by: Yunsheng Lin <[email protected]>
-CC: Lorenzo Bianconi <[email protected]>
-CC: Alexander Duyck <[email protected]>
-CC: Liang Chen <[email protected]>
-CC: Guillaume Tucker <[email protected]>
-CC: Matthew Wilcox <[email protected]>
-CC: Linux-MM <[email protected]>
-Link: https://lore.kernel.org/r/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
-
---- a/include/linux/mm_types.h
-+++ b/include/linux/mm_types.h
-@@ -125,18 +125,7 @@ struct page {
- 			struct page_pool *pp;
- 			unsigned long _pp_mapping_pad;
- 			unsigned long dma_addr;
--			union {
--				/**
--				 * dma_addr_upper: might require a 64-bit
--				 * value on 32-bit architectures.
--				 */
--				unsigned long dma_addr_upper;
--				/**
--				 * For frag page support, not supported in
--				 * 32-bit architectures with 64-bit DMA.
--				 */
--				atomic_long_t pp_frag_count;
--			};
-+			atomic_long_t pp_frag_count;
- 		};
- 		struct {	/* Tail pages of compound page */
- 			unsigned long compound_head;	/* Bit zero is set */
---- a/include/net/page_pool/helpers.h
-+++ b/include/net/page_pool/helpers.h
-@@ -197,7 +197,7 @@ static inline void page_pool_recycle_dir
- 	page_pool_put_full_page(pool, page, true);
- }
- 
--#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT	\
-+#define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA	\
- 		(sizeof(dma_addr_t) > sizeof(unsigned long))
- 
- /**
-@@ -211,17 +211,25 @@ static inline dma_addr_t page_pool_get_d
- {
- 	dma_addr_t ret = page->dma_addr;
- 
--	if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
--		ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
-+	if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
-+		ret <<= PAGE_SHIFT;
- 
- 	return ret;
- }
- 
--static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
-+static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
- {
-+	if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
-+		page->dma_addr = addr >> PAGE_SHIFT;
-+
-+		/* We assume page alignment to shave off bottom bits,
-+		 * if this "compression" doesn't work we need to drop.
-+		 */
-+		return addr != (dma_addr_t)page->dma_addr << PAGE_SHIFT;
-+	}
-+
- 	page->dma_addr = addr;
--	if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
--		page->dma_addr_upper = upper_32_bits(addr);
-+	return false;
- }
- 
- static inline bool page_pool_put(struct page_pool *pool)
---- a/net/core/page_pool.c
-+++ b/net/core/page_pool.c
-@@ -207,10 +207,6 @@ static int page_pool_init(struct page_po
- 		 */
- 	}
- 
--	if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT &&
--	    pool->p.flags & PP_FLAG_PAGE_FRAG)
--		return -EINVAL;
--
- #ifdef CONFIG_PAGE_POOL_STATS
- 	pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
- 	if (!pool->recycle_stats)
-@@ -359,12 +355,20 @@ static bool page_pool_dma_map(struct pag
- 	if (dma_mapping_error(pool->p.dev, dma))
- 		return false;
- 
--	page_pool_set_dma_addr(page, dma);
-+	if (page_pool_set_dma_addr(page, dma))
-+		goto unmap_failed;
- 
- 	if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
- 		page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
- 
- 	return true;
-+
-+unmap_failed:
-+	WARN_ON_ONCE("unexpected DMA address, please report to netdev@");
-+	dma_unmap_page_attrs(pool->p.dev, dma,
-+			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
-+			     DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
-+	return false;
- }
- 
- static void page_pool_set_pp_info(struct page_pool *pool,

+ 0 - 183
target/linux/generic/backport-6.6/620-02-v6.7-page_pool-unify-frag_count-handling-in-page_pool_is_.patch

@@ -1,183 +0,0 @@
-From: Yunsheng Lin <[email protected]>
-Date: Fri, 20 Oct 2023 17:59:48 +0800
-Subject: [PATCH] page_pool: unify frag_count handling in
- page_pool_is_last_frag()
-
-Currently when page_pool_create() is called with
-PP_FLAG_PAGE_FRAG flag, page_pool_alloc_pages() is only
-allowed to be called under the below constraints:
-1. page_pool_fragment_page() need to be called to setup
-   page->pp_frag_count immediately.
-2. page_pool_defrag_page() often need to be called to drain
-   the page->pp_frag_count when there is no more user will
-   be holding on to that page.
-
-Those constraints exist in order to support a page to be
-split into multi fragments.
-
-And those constraints have some overhead because of the
-cache line dirtying/bouncing and atomic update.
-
-Those constraints are unavoidable for case when we need a
-page to be split into more than one fragment, but there is
-also case that we want to avoid the above constraints and
-their overhead when a page can't be split as it can only
-hold a fragment as requested by user, depending on different
-use cases:
-use case 1: allocate page without page splitting.
-use case 2: allocate page with page splitting.
-use case 3: allocate page with or without page splitting
-            depending on the fragment size.
-
-Currently page pool only provide page_pool_alloc_pages() and
-page_pool_alloc_frag() API to enable the 1 & 2 separately,
-so we can not use a combination of 1 & 2 to enable 3, it is
-not possible yet because of the per page_pool flag
-PP_FLAG_PAGE_FRAG.
-
-So in order to allow allocating unsplit page without the
-overhead of split page while still allow allocating split
-page we need to remove the per page_pool flag in
-page_pool_is_last_frag(), as best as I can think of, it seems
-there are two methods as below:
-1. Add per page flag/bit to indicate a page is split or
-   not, which means we might need to update that flag/bit
-   everytime the page is recycled, dirtying the cache line
-   of 'struct page' for use case 1.
-2. Unify the page->pp_frag_count handling for both split and
-   unsplit page by assuming all pages in the page pool is split
-   into a big fragment initially.
-
-As page pool already supports use case 1 without dirtying the
-cache line of 'struct page' whenever a page is recyclable, we
-need to support the above use case 3 with minimal overhead,
-especially not adding any noticeable overhead for use case 1,
-and we are already doing an optimization by not updating
-pp_frag_count in page_pool_defrag_page() for the last fragment
-user, this patch chooses to unify the pp_frag_count handling
-to support the above use case 3.
-
-There is no noticeable performance degradation and some
-justification for unifying the frag_count handling with this
-patch applied using a micro-benchmark testing in [1].
-
-1. https://lore.kernel.org/all/[email protected]/
-
-Signed-off-by: Yunsheng Lin <[email protected]>
-CC: Lorenzo Bianconi <[email protected]>
-CC: Alexander Duyck <[email protected]>
-CC: Liang Chen <[email protected]>
-CC: Alexander Lobakin <[email protected]>
-Link: https://lore.kernel.org/r/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
-
---- a/include/net/page_pool/helpers.h
-+++ b/include/net/page_pool/helpers.h
-@@ -115,28 +115,49 @@ static inline long page_pool_defrag_page
- 	long ret;
- 
- 	/* If nr == pp_frag_count then we have cleared all remaining
--	 * references to the page. No need to actually overwrite it, instead
--	 * we can leave this to be overwritten by the calling function.
-+	 * references to the page:
-+	 * 1. 'n == 1': no need to actually overwrite it.
-+	 * 2. 'n != 1': overwrite it with one, which is the rare case
-+	 *              for pp_frag_count draining.
- 	 *
--	 * The main advantage to doing this is that an atomic_read is
--	 * generally a much cheaper operation than an atomic update,
--	 * especially when dealing with a page that may be partitioned
--	 * into only 2 or 3 pieces.
-+	 * The main advantage to doing this is that not only we avoid a atomic
-+	 * update, as an atomic_read is generally a much cheaper operation than
-+	 * an atomic update, especially when dealing with a page that may be
-+	 * partitioned into only 2 or 3 pieces; but also unify the pp_frag_count
-+	 * handling by ensuring all pages have partitioned into only 1 piece
-+	 * initially, and only overwrite it when the page is partitioned into
-+	 * more than one piece.
- 	 */
--	if (atomic_long_read(&page->pp_frag_count) == nr)
-+	if (atomic_long_read(&page->pp_frag_count) == nr) {
-+		/* As we have ensured nr is always one for constant case using
-+		 * the BUILD_BUG_ON(), only need to handle the non-constant case
-+		 * here for pp_frag_count draining, which is a rare case.
-+		 */
-+		BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1);
-+		if (!__builtin_constant_p(nr))
-+			atomic_long_set(&page->pp_frag_count, 1);
-+
- 		return 0;
-+	}
- 
- 	ret = atomic_long_sub_return(nr, &page->pp_frag_count);
- 	WARN_ON(ret < 0);
-+
-+	/* We are the last user here too, reset pp_frag_count back to 1 to
-+	 * ensure all pages have been partitioned into 1 piece initially,
-+	 * this should be the rare case when the last two fragment users call
-+	 * page_pool_defrag_page() currently.
-+	 */
-+	if (unlikely(!ret))
-+		atomic_long_set(&page->pp_frag_count, 1);
-+
- 	return ret;
- }
- 
--static inline bool page_pool_is_last_frag(struct page_pool *pool,
--					  struct page *page)
-+static inline bool page_pool_is_last_frag(struct page *page)
- {
--	/* If fragments aren't enabled or count is 0 we were the last user */
--	return !(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
--	       (page_pool_defrag_page(page, 1) == 0);
-+	/* If page_pool_defrag_page() returns 0, we were the last user */
-+	return page_pool_defrag_page(page, 1) == 0;
- }
- 
- /**
-@@ -161,7 +182,7 @@ static inline void page_pool_put_page(st
- 	 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
- 	 */
- #ifdef CONFIG_PAGE_POOL
--	if (!page_pool_is_last_frag(pool, page))
-+	if (!page_pool_is_last_frag(page))
- 		return;
- 
- 	page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
---- a/net/core/page_pool.c
-+++ b/net/core/page_pool.c
-@@ -376,6 +376,14 @@ static void page_pool_set_pp_info(struct
- {
- 	page->pp = pool;
- 	page->pp_magic |= PP_SIGNATURE;
-+
-+	/* Ensuring all pages have been split into one fragment initially:
-+	 * page_pool_set_pp_info() is only called once for every page when it
-+	 * is allocated from the page allocator and page_pool_fragment_page()
-+	 * is dirtying the same cache line as the page->pp_magic above, so
-+	 * the overhead is negligible.
-+	 */
-+	page_pool_fragment_page(page, 1);
- 	if (pool->p.init_callback)
- 		pool->p.init_callback(page, pool->p.init_arg);
- }
-@@ -678,7 +686,7 @@ void page_pool_put_page_bulk(struct page
- 		struct page *page = virt_to_head_page(data[i]);
- 
- 		/* It is not the last user for the page frag case */
--		if (!page_pool_is_last_frag(pool, page))
-+		if (!page_pool_is_last_frag(page))
- 			continue;
- 
- 		page = __page_pool_put_page(pool, page, -1, false);
-@@ -754,8 +762,7 @@ struct page *page_pool_alloc_frag(struct
- 	unsigned int max_size = PAGE_SIZE << pool->p.order;
- 	struct page *page = pool->frag_page;
- 
--	if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
--		    size > max_size))
-+	if (WARN_ON(size > max_size))
- 		return NULL;
- 
- 	size = ALIGN(size, dma_get_cache_alignment());

+ 0 - 101
target/linux/generic/backport-6.6/621-v6.10-01-net-move-skb_gro_receive_list-from-udp-to-core.patch

@@ -1,101 +0,0 @@
-From 8928756d53d5b99dcd18073dc7738b8ebdbe7d96 Mon Sep 17 00:00:00 2001
-From: Felix Fietkau <[email protected]>
-Date: Thu, 2 May 2024 10:44:42 +0200
-Subject: [PATCH 1/6] net: move skb_gro_receive_list from udp to core
-
-This helper function will be used for TCP fraglist GRO support
-
-Acked-by: Paolo Abeni <[email protected]>
-Reviewed-by: Eric Dumazet <[email protected]>
-Signed-off-by: Felix Fietkau <[email protected]>
-Reviewed-by: David Ahern <[email protected]>
-Reviewed-by: Willem de Bruijn <[email protected]>
-Signed-off-by: Paolo Abeni <[email protected]>
----
- include/net/gro.h      |  1 +
- net/core/gro.c         | 27 +++++++++++++++++++++++++++
- net/ipv4/udp_offload.c | 27 ---------------------------
- 3 files changed, 28 insertions(+), 27 deletions(-)
-
---- a/include/net/gro.h
-+++ b/include/net/gro.h
-@@ -442,6 +442,7 @@ static inline __wsum ip6_gro_compute_pse
- }
- 
- int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
-+int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
- 
- /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
- static inline void gro_normal_list(struct napi_struct *napi)
---- a/net/core/gro.c
-+++ b/net/core/gro.c
-@@ -225,6 +225,33 @@ done:
- 	return 0;
- }
- 
-+int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
-+{
-+	if (unlikely(p->len + skb->len >= 65536))
-+		return -E2BIG;
-+
-+	if (NAPI_GRO_CB(p)->last == p)
-+		skb_shinfo(p)->frag_list = skb;
-+	else
-+		NAPI_GRO_CB(p)->last->next = skb;
-+
-+	skb_pull(skb, skb_gro_offset(skb));
-+
-+	NAPI_GRO_CB(p)->last = skb;
-+	NAPI_GRO_CB(p)->count++;
-+	p->data_len += skb->len;
-+
-+	/* sk ownership - if any - completely transferred to the aggregated packet */
-+	skb->destructor = NULL;
-+	skb->sk = NULL;
-+	p->truesize += skb->truesize;
-+	p->len += skb->len;
-+
-+	NAPI_GRO_CB(skb)->same_flow = 1;
-+
-+	return 0;
-+}
-+
- 
- static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
- {
---- a/net/ipv4/udp_offload.c
-+++ b/net/ipv4/udp_offload.c
-@@ -538,33 +538,6 @@ out:
- 	return segs;
- }
- 
--static int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
--{
--	if (unlikely(p->len + skb->len >= 65536))
--		return -E2BIG;
--
--	if (NAPI_GRO_CB(p)->last == p)
--		skb_shinfo(p)->frag_list = skb;
--	else
--		NAPI_GRO_CB(p)->last->next = skb;
--
--	skb_pull(skb, skb_gro_offset(skb));
--
--	NAPI_GRO_CB(p)->last = skb;
--	NAPI_GRO_CB(p)->count++;
--	p->data_len += skb->len;
--
--	/* sk ownership - if any - completely transferred to the aggregated packet */
--	skb->destructor = NULL;
--	skb->sk = NULL;
--	p->truesize += skb->truesize;
--	p->len += skb->len;
--
--	NAPI_GRO_CB(skb)->same_flow = 1;
--
--	return 0;
--}
--
- 
- #define UDP_GRO_CNT_MAX 64
- static struct sk_buff *udp_gro_receive_segment(struct list_head *head,

+ 0 - 177
target/linux/generic/backport-6.6/621-v6.10-02-net-add-support-for-segmenting-TCP-fraglist-GSO-pack.patch

@@ -1,177 +0,0 @@
-From bee88cd5bd83d40b8aec4d6cb729378f707f6197 Mon Sep 17 00:00:00 2001
-From: Felix Fietkau <[email protected]>
-Date: Thu, 2 May 2024 10:44:43 +0200
-Subject: [PATCH 2/6] net: add support for segmenting TCP fraglist GSO packets
-
-Preparation for adding TCP fraglist GRO support. It expects packets to be
-combined in a similar way as UDP fraglist GSO packets.
-For IPv4 packets, NAT is handled in the same way as UDP fraglist GSO.
-
-Acked-by: Paolo Abeni <[email protected]>
-Reviewed-by: Eric Dumazet <[email protected]>
-Signed-off-by: Felix Fietkau <[email protected]>
-Reviewed-by: David Ahern <[email protected]>
-Reviewed-by: Willem de Bruijn <[email protected]>
-Signed-off-by: Paolo Abeni <[email protected]>
----
- net/ipv4/tcp_offload.c   | 67 ++++++++++++++++++++++++++++++++++++++++
- net/ipv6/tcpv6_offload.c | 58 ++++++++++++++++++++++++++++++++++
- 2 files changed, 125 insertions(+)
-
---- a/net/ipv4/tcp_offload.c
-+++ b/net/ipv4/tcp_offload.c
-@@ -31,6 +31,70 @@ static void tcp_gso_tstamp(struct sk_buf
- 	}
- }
- 
-+static void __tcpv4_gso_segment_csum(struct sk_buff *seg,
-+				     __be32 *oldip, __be32 newip,
-+				     __be16 *oldport, __be16 newport)
-+{
-+	struct tcphdr *th;
-+	struct iphdr *iph;
-+
-+	if (*oldip == newip && *oldport == newport)
-+		return;
-+
-+	th = tcp_hdr(seg);
-+	iph = ip_hdr(seg);
-+
-+	inet_proto_csum_replace4(&th->check, seg, *oldip, newip, true);
-+	inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
-+	*oldport = newport;
-+
-+	csum_replace4(&iph->check, *oldip, newip);
-+	*oldip = newip;
-+}
-+
-+static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs)
-+{
-+	const struct tcphdr *th;
-+	const struct iphdr *iph;
-+	struct sk_buff *seg;
-+	struct tcphdr *th2;
-+	struct iphdr *iph2;
-+
-+	seg = segs;
-+	th = tcp_hdr(seg);
-+	iph = ip_hdr(seg);
-+	th2 = tcp_hdr(seg->next);
-+	iph2 = ip_hdr(seg->next);
-+
-+	if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
-+	    iph->daddr == iph2->daddr && iph->saddr == iph2->saddr)
-+		return segs;
-+
-+	while ((seg = seg->next)) {
-+		th2 = tcp_hdr(seg);
-+		iph2 = ip_hdr(seg);
-+
-+		__tcpv4_gso_segment_csum(seg,
-+					 &iph2->saddr, iph->saddr,
-+					 &th2->source, th->source);
-+		__tcpv4_gso_segment_csum(seg,
-+					 &iph2->daddr, iph->daddr,
-+					 &th2->dest, th->dest);
-+	}
-+
-+	return segs;
-+}
-+
-+static struct sk_buff *__tcp4_gso_segment_list(struct sk_buff *skb,
-+					      netdev_features_t features)
-+{
-+	skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
-+	if (IS_ERR(skb))
-+		return skb;
-+
-+	return __tcpv4_gso_segment_list_csum(skb);
-+}
-+
- static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
- 					netdev_features_t features)
- {
-@@ -40,6 +104,9 @@ static struct sk_buff *tcp4_gso_segment(
- 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
- 		return ERR_PTR(-EINVAL);
- 
-+	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
-+		return __tcp4_gso_segment_list(skb, features);
-+
- 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
- 		const struct iphdr *iph = ip_hdr(skb);
- 		struct tcphdr *th = tcp_hdr(skb);
---- a/net/ipv6/tcpv6_offload.c
-+++ b/net/ipv6/tcpv6_offload.c
-@@ -40,6 +40,61 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_com
- 	return 0;
- }
- 
-+static void __tcpv6_gso_segment_csum(struct sk_buff *seg,
-+				     __be16 *oldport, __be16 newport)
-+{
-+	struct tcphdr *th;
-+
-+	if (*oldport == newport)
-+		return;
-+
-+	th = tcp_hdr(seg);
-+	inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
-+	*oldport = newport;
-+}
-+
-+static struct sk_buff *__tcpv6_gso_segment_list_csum(struct sk_buff *segs)
-+{
-+	const struct tcphdr *th;
-+	const struct ipv6hdr *iph;
-+	struct sk_buff *seg;
-+	struct tcphdr *th2;
-+	struct ipv6hdr *iph2;
-+
-+	seg = segs;
-+	th = tcp_hdr(seg);
-+	iph = ipv6_hdr(seg);
-+	th2 = tcp_hdr(seg->next);
-+	iph2 = ipv6_hdr(seg->next);
-+
-+	if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
-+	    ipv6_addr_equal(&iph->saddr, &iph2->saddr) &&
-+	    ipv6_addr_equal(&iph->daddr, &iph2->daddr))
-+		return segs;
-+
-+	while ((seg = seg->next)) {
-+		th2 = tcp_hdr(seg);
-+		iph2 = ipv6_hdr(seg);
-+
-+		iph2->saddr = iph->saddr;
-+		iph2->daddr = iph->daddr;
-+		__tcpv6_gso_segment_csum(seg, &th2->source, th->source);
-+		__tcpv6_gso_segment_csum(seg, &th2->dest, th->dest);
-+	}
-+
-+	return segs;
-+}
-+
-+static struct sk_buff *__tcp6_gso_segment_list(struct sk_buff *skb,
-+					      netdev_features_t features)
-+{
-+	skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
-+	if (IS_ERR(skb))
-+		return skb;
-+
-+	return __tcpv6_gso_segment_list_csum(skb);
-+}
-+
- static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
- 					netdev_features_t features)
- {
-@@ -51,6 +106,9 @@ static struct sk_buff *tcp6_gso_segment(
- 	if (!pskb_may_pull(skb, sizeof(*th)))
- 		return ERR_PTR(-EINVAL);
- 
-+	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
-+		return __tcp6_gso_segment_list(skb, features);
-+
- 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
- 		const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
- 		struct tcphdr *th = tcp_hdr(skb);

+ 0 - 75
target/linux/generic/backport-6.6/621-v6.10-03-net-add-code-for-TCP-fraglist-GRO.patch

@@ -1,75 +0,0 @@
-From 8d95dc474f85481652a0e422d2f1f079de81f63c Mon Sep 17 00:00:00 2001
-From: Felix Fietkau <[email protected]>
-Date: Thu, 2 May 2024 10:44:44 +0200
-Subject: [PATCH 3/6] net: add code for TCP fraglist GRO
-
-This implements fraglist GRO similar to how it's handled in UDP, however
-no functional changes are added yet. The next change adds a heuristic for
-using fraglist GRO instead of regular GRO.
-
-Acked-by: Paolo Abeni <[email protected]>
-Signed-off-by: Felix Fietkau <[email protected]>
-Reviewed-by: Eric Dumazet <[email protected]>
-Reviewed-by: David Ahern <[email protected]>
-Reviewed-by: Willem de Bruijn <[email protected]>
-Signed-off-by: Paolo Abeni <[email protected]>
----
- net/ipv4/tcp_offload.c   | 21 +++++++++++++++++++++
- net/ipv6/tcpv6_offload.c |  9 +++++++++
- 2 files changed, 30 insertions(+)
-
---- a/net/ipv4/tcp_offload.c
-+++ b/net/ipv4/tcp_offload.c
-@@ -342,6 +342,18 @@ found:
- 	flush |= p->decrypted ^ skb->decrypted;
- #endif
- 
-+	if (unlikely(NAPI_GRO_CB(p)->is_flist)) {
-+		flush |= (__force int)(flags ^ tcp_flag_word(th2));
-+		flush |= skb->ip_summed != p->ip_summed;
-+		flush |= skb->csum_level != p->csum_level;
-+		flush |= NAPI_GRO_CB(p)->count >= 64;
-+
-+		if (flush || skb_gro_receive_list(p, skb))
-+			mss = 1;
-+
-+		goto out_check_final;
-+	}
-+
- 	if (flush || skb_gro_receive(p, skb)) {
- 		mss = 1;
- 		goto out_check_final;
-@@ -406,6 +418,15 @@ INDIRECT_CALLABLE_SCOPE int tcp4_gro_com
- 	const struct iphdr *iph = ip_hdr(skb);
- 	struct tcphdr *th = tcp_hdr(skb);
- 
-+	if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
-+		skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4;
-+		skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
-+
-+		__skb_incr_checksum_unnecessary(skb);
-+
-+		return 0;
-+	}
-+
- 	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
- 				  iph->daddr, 0);
- 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
---- a/net/ipv6/tcpv6_offload.c
-+++ b/net/ipv6/tcpv6_offload.c
-@@ -32,6 +32,15 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_com
- 	const struct ipv6hdr *iph = ipv6_hdr(skb);
- 	struct tcphdr *th = tcp_hdr(skb);
- 
-+	if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
-+		skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV6;
-+		skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
-+
-+		__skb_incr_checksum_unnecessary(skb);
-+
-+		return 0;
-+	}
-+
- 	th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
- 				  &iph->daddr, 0);
- 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;

+ 0 - 88
target/linux/generic/backport-6.6/621-v6.10-04-net-create-tcp_gro_lookup-helper-function.patch

@@ -1,88 +0,0 @@
-From 80e85fbdf19ecc4dfa31ecf639adb55555db02fe Mon Sep 17 00:00:00 2001
-From: Felix Fietkau <[email protected]>
-Date: Thu, 2 May 2024 10:44:45 +0200
-Subject: [PATCH 4/6] net: create tcp_gro_lookup helper function
-
-This pulls the flow port matching out of tcp_gro_receive, so that it can be
-reused for the next change, which adds the TCP fraglist GRO heuristic.
-
-Acked-by: Paolo Abeni <[email protected]>
-Reviewed-by: Eric Dumazet <[email protected]>
-Signed-off-by: Felix Fietkau <[email protected]>
-Reviewed-by: David Ahern <[email protected]>
-Reviewed-by: Willem de Bruijn <[email protected]>
-Signed-off-by: Paolo Abeni <[email protected]>
----
- include/net/tcp.h      |  1 +
- net/ipv4/tcp_offload.c | 41 +++++++++++++++++++++++++----------------
- 2 files changed, 26 insertions(+), 16 deletions(-)
-
---- a/include/net/tcp.h
-+++ b/include/net/tcp.h
-@@ -2101,6 +2101,7 @@ void tcp_v4_destroy_sock(struct sock *sk
- 
- struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
- 				netdev_features_t features);
-+struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
- struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
- INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
- INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
---- a/net/ipv4/tcp_offload.c
-+++ b/net/ipv4/tcp_offload.c
-@@ -251,6 +251,27 @@ out:
- 	return segs;
- }
- 
-+struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th)
-+{
-+	struct tcphdr *th2;
-+	struct sk_buff *p;
-+
-+	list_for_each_entry(p, head, list) {
-+		if (!NAPI_GRO_CB(p)->same_flow)
-+			continue;
-+
-+		th2 = tcp_hdr(p);
-+		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
-+			NAPI_GRO_CB(p)->same_flow = 0;
-+			continue;
-+		}
-+
-+		return p;
-+	}
-+
-+	return NULL;
-+}
-+
- struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
- {
- 	struct sk_buff *pp = NULL;
-@@ -288,24 +309,12 @@ struct sk_buff *tcp_gro_receive(struct l
- 	len = skb_gro_len(skb);
- 	flags = tcp_flag_word(th);
- 
--	list_for_each_entry(p, head, list) {
--		if (!NAPI_GRO_CB(p)->same_flow)
--			continue;
--
--		th2 = tcp_hdr(p);
--
--		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
--			NAPI_GRO_CB(p)->same_flow = 0;
--			continue;
--		}
--
--		goto found;
--	}
--	p = NULL;
--	goto out_check_final;
-+	p = tcp_gro_lookup(head, th);
-+	if (!p)
-+		goto out_check_final;
- 
--found:
- 	/* Include the IP ID check below from the inner most IP hdr */
-+	th2 = tcp_hdr(p);
- 	flush = NAPI_GRO_CB(p)->flush;
- 	flush |= (__force int)(flags & TCP_FLAG_CWR);
- 	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &

+ 0 - 166
target/linux/generic/backport-6.6/621-v6.10-05-net-create-tcp_gro_header_pull-helper-function.patch

@@ -1,166 +0,0 @@
-From 7516b27c555c1711ec17a5d891befb6986e573a3 Mon Sep 17 00:00:00 2001
-From: Felix Fietkau <[email protected]>
-Date: Thu, 2 May 2024 10:44:46 +0200
-Subject: [PATCH 5/6] net: create tcp_gro_header_pull helper function
-
-Pull the code out of tcp_gro_receive in order to access the tcp header
-from tcp4/6_gro_receive.
-
-Acked-by: Paolo Abeni <[email protected]>
-Reviewed-by: Eric Dumazet <[email protected]>
-Signed-off-by: Felix Fietkau <[email protected]>
-Reviewed-by: David Ahern <[email protected]>
-Reviewed-by: Willem de Bruijn <[email protected]>
-Signed-off-by: Paolo Abeni <[email protected]>
----
- include/net/tcp.h        |  4 ++-
- net/ipv4/tcp_offload.c   | 55 +++++++++++++++++++++++++---------------
- net/ipv6/tcpv6_offload.c | 18 +++++++++----
- 3 files changed, 50 insertions(+), 27 deletions(-)
-
---- a/include/net/tcp.h
-+++ b/include/net/tcp.h
-@@ -2101,8 +2101,10 @@ void tcp_v4_destroy_sock(struct sock *sk
- 
- struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
- 				netdev_features_t features);
-+struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb);
- struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
--struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
-+struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
-+				struct tcphdr *th);
- INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
- INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
- INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
---- a/net/ipv4/tcp_offload.c
-+++ b/net/ipv4/tcp_offload.c
-@@ -272,40 +272,46 @@ struct sk_buff *tcp_gro_lookup(struct li
- 	return NULL;
- }
- 
--struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
-+struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb)
- {
--	struct sk_buff *pp = NULL;
--	struct sk_buff *p;
-+	unsigned int thlen, hlen, off;
- 	struct tcphdr *th;
--	struct tcphdr *th2;
--	unsigned int len;
--	unsigned int thlen;
--	__be32 flags;
--	unsigned int mss = 1;
--	unsigned int hlen;
--	unsigned int off;
--	int flush = 1;
--	int i;
- 
- 	off = skb_gro_offset(skb);
- 	hlen = off + sizeof(*th);
- 	th = skb_gro_header(skb, hlen, off);
- 	if (unlikely(!th))
--		goto out;
-+		return NULL;
- 
- 	thlen = th->doff * 4;
- 	if (thlen < sizeof(*th))
--		goto out;
-+		return NULL;
- 
- 	hlen = off + thlen;
- 	if (skb_gro_header_hard(skb, hlen)) {
- 		th = skb_gro_header_slow(skb, hlen, off);
- 		if (unlikely(!th))
--			goto out;
-+			return NULL;
- 	}
- 
- 	skb_gro_pull(skb, thlen);
- 
-+	return th;
-+}
-+
-+struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
-+				struct tcphdr *th)
-+{
-+	unsigned int thlen = th->doff * 4;
-+	struct sk_buff *pp = NULL;
-+	struct sk_buff *p;
-+	struct tcphdr *th2;
-+	unsigned int len;
-+	__be32 flags;
-+	unsigned int mss = 1;
-+	int flush = 1;
-+	int i;
-+
- 	len = skb_gro_len(skb);
- 	flags = tcp_flag_word(th);
- 
-@@ -384,7 +390,6 @@ out_check_final:
- 	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
- 		pp = p;
- 
--out:
- 	NAPI_GRO_CB(skb)->flush |= (flush != 0);
- 
- 	return pp;
-@@ -411,15 +416,23 @@ EXPORT_SYMBOL(tcp_gro_complete);
- INDIRECT_CALLABLE_SCOPE
- struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
- {
-+	struct tcphdr *th;
-+
- 	/* Don't bother verifying checksum if we're going to flush anyway. */
- 	if (!NAPI_GRO_CB(skb)->flush &&
- 	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
--				      inet_gro_compute_pseudo)) {
--		NAPI_GRO_CB(skb)->flush = 1;
--		return NULL;
--	}
-+				      inet_gro_compute_pseudo))
-+		goto flush;
- 
--	return tcp_gro_receive(head, skb);
-+	th = tcp_gro_pull_header(skb);
-+	if (!th)
-+		goto flush;
-+
-+	return tcp_gro_receive(head, skb, th);
-+
-+flush:
-+	NAPI_GRO_CB(skb)->flush = 1;
-+	return NULL;
- }
- 
- INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
---- a/net/ipv6/tcpv6_offload.c
-+++ b/net/ipv6/tcpv6_offload.c
-@@ -16,15 +16,23 @@
- INDIRECT_CALLABLE_SCOPE
- struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
- {
-+	struct tcphdr *th;
-+
- 	/* Don't bother verifying checksum if we're going to flush anyway. */
- 	if (!NAPI_GRO_CB(skb)->flush &&
- 	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
--				      ip6_gro_compute_pseudo)) {
--		NAPI_GRO_CB(skb)->flush = 1;
--		return NULL;
--	}
-+				      ip6_gro_compute_pseudo))
-+		goto flush;
-+
-+	th = tcp_gro_pull_header(skb);
-+	if (!th)
-+		goto flush;
-+
-+	return tcp_gro_receive(head, skb, th);
- 
--	return tcp_gro_receive(head, skb);
-+flush:
-+	NAPI_GRO_CB(skb)->flush = 1;
-+	return NULL;
- }
- 
- INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)

+ 0 - 140
target/linux/generic/backport-6.6/621-v6.10-06-net-add-heuristic-for-enabling-TCP-fraglist-GRO.patch

@@ -1,140 +0,0 @@
-From c9d1d23e5239f41700be69133a5769ac5ebc88a8 Mon Sep 17 00:00:00 2001
-From: Felix Fietkau <[email protected]>
-Date: Thu, 2 May 2024 10:44:47 +0200
-Subject: [PATCH 6/6] net: add heuristic for enabling TCP fraglist GRO
-
-When forwarding TCP after GRO, software segmentation is very expensive,
-especially when the checksum needs to be recalculated.
-One case where that's currently unavoidable is when routing packets over
-PPPoE. Performance improves significantly when using fraglist GRO
-implemented in the same way as for UDP.
-
-When NETIF_F_GRO_FRAGLIST is enabled, perform a lookup for an established
-socket in the same netns as the receiving device. While this may not
-cover all relevant use cases in multi-netns configurations, it should be
-good enough for most configurations that need this.
-
-Here's a measurement of running 2 TCP streams through a MediaTek MT7622
-device (2-core Cortex-A53), which runs NAT with flow offload enabled from
-one ethernet port to PPPoE on another ethernet port + cake qdisc set to
-1Gbps.
-
-rx-gro-list off: 630 Mbit/s, CPU 35% idle
-rx-gro-list on:  770 Mbit/s, CPU 40% idle
-
-Acked-by: Paolo Abeni <[email protected]>
-Reviewed-by: Eric Dumazet <[email protected]>
-Signed-off-by: Felix Fietkau <[email protected]>
-Reviewed-by: David Ahern <[email protected]>
-Reviewed-by: Willem de Bruijn <[email protected]>
-Signed-off-by: Paolo Abeni <[email protected]>
----
- net/ipv4/tcp_offload.c   | 32 ++++++++++++++++++++++++++++++++
- net/ipv6/tcpv6_offload.c | 35 +++++++++++++++++++++++++++++++++++
- 2 files changed, 67 insertions(+)
-
---- a/net/ipv4/tcp_offload.c
-+++ b/net/ipv4/tcp_offload.c
-@@ -413,6 +413,36 @@ void tcp_gro_complete(struct sk_buff *sk
- }
- EXPORT_SYMBOL(tcp_gro_complete);
- 
-+static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
-+				    struct tcphdr *th)
-+{
-+	const struct iphdr *iph;
-+	struct sk_buff *p;
-+	struct sock *sk;
-+	struct net *net;
-+	int iif, sdif;
-+
-+	if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
-+		return;
-+
-+	p = tcp_gro_lookup(head, th);
-+	if (p) {
-+		NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
-+		return;
-+	}
-+
-+	inet_get_iif_sdif(skb, &iif, &sdif);
-+	iph = skb_gro_network_header(skb);
-+	net = dev_net(skb->dev);
-+	sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
-+				       iph->saddr, th->source,
-+				       iph->daddr, ntohs(th->dest),
-+				       iif, sdif);
-+	NAPI_GRO_CB(skb)->is_flist = !sk;
-+	if (sk)
-+		sock_put(sk);
-+}
-+
- INDIRECT_CALLABLE_SCOPE
- struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
- {
-@@ -428,6 +458,8 @@ struct sk_buff *tcp4_gro_receive(struct
- 	if (!th)
- 		goto flush;
- 
-+	tcp4_check_fraglist_gro(head, skb, th);
-+
- 	return tcp_gro_receive(head, skb, th);
- 
- flush:
---- a/net/ipv6/tcpv6_offload.c
-+++ b/net/ipv6/tcpv6_offload.c
-@@ -7,12 +7,45 @@
-  */
- #include <linux/indirect_call_wrapper.h>
- #include <linux/skbuff.h>
-+#include <net/inet6_hashtables.h>
- #include <net/gro.h>
- #include <net/protocol.h>
- #include <net/tcp.h>
- #include <net/ip6_checksum.h>
- #include "ip6_offload.h"
- 
-+static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
-+				    struct tcphdr *th)
-+{
-+#if IS_ENABLED(CONFIG_IPV6)
-+	const struct ipv6hdr *hdr;
-+	struct sk_buff *p;
-+	struct sock *sk;
-+	struct net *net;
-+	int iif, sdif;
-+
-+	if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
-+		return;
-+
-+	p = tcp_gro_lookup(head, th);
-+	if (p) {
-+		NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
-+		return;
-+	}
-+
-+	inet6_get_iif_sdif(skb, &iif, &sdif);
-+	hdr = skb_gro_network_header(skb);
-+	net = dev_net(skb->dev);
-+	sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
-+					&hdr->saddr, th->source,
-+					&hdr->daddr, ntohs(th->dest),
-+					iif, sdif);
-+	NAPI_GRO_CB(skb)->is_flist = !sk;
-+	if (sk)
-+		sock_put(sk);
-+#endif /* IS_ENABLED(CONFIG_IPV6) */
-+}
-+
- INDIRECT_CALLABLE_SCOPE
- struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
- {
-@@ -28,6 +61,8 @@ struct sk_buff *tcp6_gro_receive(struct
- 	if (!th)
- 		goto flush;
- 
-+	tcp6_check_fraglist_gro(head, skb, th);
-+
- 	return tcp_gro_receive(head, skb, th);
- 
- flush:

+ 0 - 80
target/linux/generic/backport-6.6/622-v6.12-net-gso-fix-tcp-fraglist-segmentation-after-pull-fro.patch

@@ -1,80 +0,0 @@
-From 17bd3bd82f9f79f3feba15476c2b2c95a9b11ff8 Mon Sep 17 00:00:00 2001
-From: Felix Fietkau <[email protected]>
-Date: Thu, 26 Sep 2024 10:53:14 +0200
-Subject: [PATCH] net: gso: fix tcp fraglist segmentation after pull from
- frag_list
-
-Detect tcp gso fraglist skbs with corrupted geometry (see below) and
-pass these to skb_segment instead of skb_segment_list, as the first
-can segment them correctly.
-
-Valid SKB_GSO_FRAGLIST skbs
-- consist of two or more segments
-- the head_skb holds the protocol headers plus first gso_size
-- one or more frag_list skbs hold exactly one segment
-- all but the last must be gso_size
-
-Optional datapath hooks such as NAT and BPF (bpf_skb_pull_data) can
-modify these skbs, breaking these invariants.
-
-In extreme cases they pull all data into skb linear. For TCP, this
-causes a NULL ptr deref in __tcpv4_gso_segment_list_csum at
-tcp_hdr(seg->next).
-
-Detect invalid geometry due to pull, by checking head_skb size.
-Don't just drop, as this may blackhole a destination. Convert to be
-able to pass to regular skb_segment.
-
-Approach and description based on a patch by Willem de Bruijn.
-
-Link: https://lore.kernel.org/netdev/[email protected]/
-Link: https://lore.kernel.org/netdev/[email protected]/
-Fixes: bee88cd5bd83 ("net: add support for segmenting TCP fraglist GSO packets")
-Cc: [email protected]
-Signed-off-by: Felix Fietkau <[email protected]>
-Reviewed-by: Willem de Bruijn <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- net/ipv4/tcp_offload.c   | 10 ++++++++--
- net/ipv6/tcpv6_offload.c | 10 ++++++++--
- 2 files changed, 16 insertions(+), 4 deletions(-)
-
---- a/net/ipv4/tcp_offload.c
-+++ b/net/ipv4/tcp_offload.c
-@@ -104,8 +104,14 @@ static struct sk_buff *tcp4_gso_segment(
- 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
- 		return ERR_PTR(-EINVAL);
- 
--	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
--		return __tcp4_gso_segment_list(skb, features);
-+	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
-+		struct tcphdr *th = tcp_hdr(skb);
-+
-+		if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
-+			return __tcp4_gso_segment_list(skb, features);
-+
-+		skb->ip_summed = CHECKSUM_NONE;
-+	}
- 
- 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
- 		const struct iphdr *iph = ip_hdr(skb);
---- a/net/ipv6/tcpv6_offload.c
-+++ b/net/ipv6/tcpv6_offload.c
-@@ -158,8 +158,14 @@ static struct sk_buff *tcp6_gso_segment(
- 	if (!pskb_may_pull(skb, sizeof(*th)))
- 		return ERR_PTR(-EINVAL);
- 
--	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
--		return __tcp6_gso_segment_list(skb, features);
-+	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
-+		struct tcphdr *th = tcp_hdr(skb);
-+
-+		if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
-+			return __tcp6_gso_segment_list(skb, features);
-+
-+		skb->ip_summed = CHECKSUM_NONE;
-+	}
- 
- 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
- 		const struct ipv6hdr *ipv6h = ipv6_hdr(skb);

+ 0 - 59
target/linux/generic/backport-6.6/623-v6.14-net-ipv6-fix-TCP-GSO-segmentation-with-NAT.patch

@@ -1,59 +0,0 @@
-From daa624d3c2ddffdcbad140a9625a4064371db44f Mon Sep 17 00:00:00 2001
-From: Felix Fietkau <[email protected]>
-Date: Tue, 11 Mar 2025 22:25:30 +0100
-Subject: [PATCH] net: ipv6: fix TCP GSO segmentation with NAT
-
-When updating the source/destination address, the TCP/UDP checksum needs to
-be updated as well.
-
-Fixes: bee88cd5bd83 ("net: add support for segmenting TCP fraglist GSO packets")
-Signed-off-by: Felix Fietkau <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Paolo Abeni <[email protected]>
----
- net/ipv6/tcpv6_offload.c | 21 +++++++++++++++------
- 1 file changed, 15 insertions(+), 6 deletions(-)
-
---- a/net/ipv6/tcpv6_offload.c
-+++ b/net/ipv6/tcpv6_offload.c
-@@ -93,14 +93,23 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_com
- }
- 
- static void __tcpv6_gso_segment_csum(struct sk_buff *seg,
-+				     struct in6_addr *oldip,
-+				     const struct in6_addr *newip,
- 				     __be16 *oldport, __be16 newport)
- {
--	struct tcphdr *th;
-+	struct tcphdr *th = tcp_hdr(seg);
-+
-+	if (!ipv6_addr_equal(oldip, newip)) {
-+		inet_proto_csum_replace16(&th->check, seg,
-+					  oldip->s6_addr32,
-+					  newip->s6_addr32,
-+					  true);
-+		*oldip = *newip;
-+	}
- 
- 	if (*oldport == newport)
- 		return;
- 
--	th = tcp_hdr(seg);
- 	inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
- 	*oldport = newport;
- }
-@@ -128,10 +137,10 @@ static struct sk_buff *__tcpv6_gso_segme
- 		th2 = tcp_hdr(seg);
- 		iph2 = ipv6_hdr(seg);
- 
--		iph2->saddr = iph->saddr;
--		iph2->daddr = iph->daddr;
--		__tcpv6_gso_segment_csum(seg, &th2->source, th->source);
--		__tcpv6_gso_segment_csum(seg, &th2->dest, th->dest);
-+		__tcpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr,
-+					 &th2->source, th->source);
-+		__tcpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr,
-+					 &th2->dest, th->dest);
- 	}
- 
- 	return segs;

+ 0 - 79
target/linux/generic/backport-6.6/624-v6.15-ppp-use-IFF_NO_QUEUE-in-virtual-interfaces.patch

@@ -1,79 +0,0 @@
-From: Qingfang Deng <[email protected]>
-Date: Sat, 1 Mar 2025 21:55:16 +0800
-Subject: [PATCH] ppp: use IFF_NO_QUEUE in virtual interfaces
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-For PPPoE, PPTP, and PPPoL2TP, the start_xmit() function directly
-forwards packets to the underlying network stack and never returns
-anything other than 1. So these interfaces do not require a qdisc,
-and the IFF_NO_QUEUE flag should be set.
-
-Introduces a direct_xmit flag in struct ppp_channel to indicate when
-IFF_NO_QUEUE should be applied. The flag is set in ppp_connect_channel()
-for relevant protocols.
-
-While at it, remove the usused latency member from struct ppp_channel.
-
-Signed-off-by: Qingfang Deng <[email protected]>
-Reviewed-by: Toke Høiland-Jørgensen <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
-
---- a/drivers/net/ppp/ppp_generic.c
-+++ b/drivers/net/ppp/ppp_generic.c
-@@ -3504,6 +3504,10 @@ ppp_connect_channel(struct channel *pch,
- 		ret = -ENOTCONN;
- 		goto outl;
- 	}
-+	if (pch->chan->direct_xmit)
-+		ppp->dev->priv_flags |= IFF_NO_QUEUE;
-+	else
-+		ppp->dev->priv_flags &= ~IFF_NO_QUEUE;
- 	spin_unlock_bh(&pch->downl);
- 	if (pch->file.hdrlen > ppp->file.hdrlen)
- 		ppp->file.hdrlen = pch->file.hdrlen;
---- a/drivers/net/ppp/pppoe.c
-+++ b/drivers/net/ppp/pppoe.c
-@@ -693,6 +693,7 @@ static int pppoe_connect(struct socket *
- 		po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
- 		po->chan.private = sk;
- 		po->chan.ops = &pppoe_chan_ops;
-+		po->chan.direct_xmit = true;
- 
- 		error = ppp_register_net_channel(dev_net(dev), &po->chan);
- 		if (error) {
---- a/drivers/net/ppp/pptp.c
-+++ b/drivers/net/ppp/pptp.c
-@@ -469,6 +469,7 @@ static int pptp_connect(struct socket *s
- 	po->chan.mtu -= PPTP_HEADER_OVERHEAD;
- 
- 	po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
-+	po->chan.direct_xmit = true;
- 	error = ppp_register_channel(&po->chan);
- 	if (error) {
- 		pr_err("PPTP: failed to register PPP channel (%d)\n", error);
---- a/include/linux/ppp_channel.h
-+++ b/include/linux/ppp_channel.h
-@@ -42,8 +42,7 @@ struct ppp_channel {
- 	int		hdrlen;		/* amount of headroom channel needs */
- 	void		*ppp;		/* opaque to channel */
- 	int		speed;		/* transfer rate (bytes/second) */
--	/* the following is not used at present */
--	int		latency;	/* overhead time in milliseconds */
-+	bool		direct_xmit;	/* no qdisc, xmit directly */
- };
- 
- #ifdef __KERNEL__
---- a/net/l2tp/l2tp_ppp.c
-+++ b/net/l2tp/l2tp_ppp.c
-@@ -820,6 +820,7 @@ static int pppol2tp_connect(struct socke
- 	po->chan.private = sk;
- 	po->chan.ops	 = &pppol2tp_chan_ops;
- 	po->chan.mtu	 = pppol2tp_tunnel_mtu(tunnel);
-+	po->chan.direct_xmit	= true;
- 
- 	error = ppp_register_net_channel(sock_net(sk), &po->chan);
- 	if (error) {

+ 0 - 41
target/linux/generic/backport-6.6/625-v6.16-net-bridge-locally-receive-all-multicast-packets-if-.patch

@@ -1,41 +0,0 @@
-From a496d2f0fd612ab9e10700afe00dc9267bad788b Mon Sep 17 00:00:00 2001
-From: Shengyu Qu <[email protected]>
-Date: Mon, 14 Apr 2025 18:56:01 +0800
-Subject: net: bridge: locally receive all multicast packets if IFF_ALLMULTI is
- set
-
-If multicast snooping is enabled, multicast packets may not always end up
-on the local bridge interface, if the host is not a member of the multicast
-group. Similar to how IFF_PROMISC allows all packets to be received
-locally, let IFF_ALLMULTI allow all multicast packets to be received.
-
-OpenWrt uses a user space daemon for DHCPv6/RA/NDP handling, and in relay
-mode it sets the ALLMULTI flag in order to receive all relevant queries on
-the network.
-
-This works for normal network interfaces and non-snooping bridges, but not
-snooping bridges (unless multicast routing is enabled).
-
-Reported-by: Felix Fietkau <[email protected]>
-Closes: https://github.com/openwrt/openwrt/issues/15857#issuecomment-2662851243
-Signed-off-by: Shengyu Qu <[email protected]>
-Reviewed-by: Ido Schimmel <[email protected]>
-Acked-by: Nikolay Aleksandrov <[email protected]>
-Link: https://patch.msgid.link/OSZPR01MB8434308370ACAFA90A22980798B32@OSZPR01MB8434.jpnprd01.prod.outlook.com
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- net/bridge/br_input.c | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
---- a/net/bridge/br_input.c
-+++ b/net/bridge/br_input.c
-@@ -184,7 +184,8 @@ int br_handle_frame_finish(struct net *n
- 		if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
- 		    br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst)) {
- 			if ((mdst && mdst->host_joined) ||
--			    br_multicast_is_router(brmctx, skb)) {
-+			    br_multicast_is_router(brmctx, skb) ||
-+			    br->dev->flags & IFF_ALLMULTI) {
- 				local_rcv = true;
- 				DEV_STATS_INC(br->dev, multicast);
- 			}

+ 0 - 135
target/linux/generic/backport-6.6/700-v6.10-net-create-a-dummy-net_device-allocator.patch

@@ -1,135 +0,0 @@
-From c661050f93d3fd37a33c06041bb18a89688de7d2 Mon Sep 17 00:00:00 2001
-From: Breno Leitao <[email protected]>
-Date: Mon, 22 Apr 2024 05:38:56 -0700
-Subject: [PATCH] net: create a dummy net_device allocator
-
-It is impossible to use init_dummy_netdev together with alloc_netdev()
-as the 'setup' argument.
-
-This is because alloc_netdev() initializes some fields in the net_device
-structure, and later init_dummy_netdev() memzero them all. This causes
-some problems as reported here:
-
-	https://lore.kernel.org/all/[email protected]/
-
-Split the init_dummy_netdev() function in two. Create a new function called
-init_dummy_netdev_core() that does not memzero the net_device structure.
-Then have init_dummy_netdev() memzero-ing and calling
-init_dummy_netdev_core(), keeping the old behaviour.
-
-init_dummy_netdev_core() is the new function that could be called as an
-argument for alloc_netdev().
-
-Also, create a helper to allocate and initialize dummy net devices,
-leveraging init_dummy_netdev_core() as the setup argument. This function
-basically simplify the allocation of dummy devices, by allocating and
-initializing it. Freeing the device continue to be done through
-free_netdev()
-
-Suggested-by: Jakub Kicinski <[email protected]>
-Signed-off-by: Breno Leitao <[email protected]>
-Reviewed-by: Ido Schimmel <[email protected]>
-Signed-off-by: David S. Miller <[email protected]>
----
- include/linux/netdevice.h |  3 +++
- net/core/dev.c            | 56 ++++++++++++++++++++++++++-------------
- 2 files changed, 41 insertions(+), 18 deletions(-)
-
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -4581,6 +4581,9 @@ static inline void netif_addr_unlock_bh(
- 
- void ether_setup(struct net_device *dev);
- 
-+/* Allocate dummy net_device */
-+struct net_device *alloc_netdev_dummy(int sizeof_priv);
-+
- /* Support for loadable net-drivers */
- struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
- 				    unsigned char name_assign_type,
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -10402,25 +10402,12 @@ err_free_name:
- }
- EXPORT_SYMBOL(register_netdevice);
- 
--/**
-- *	init_dummy_netdev	- init a dummy network device for NAPI
-- *	@dev: device to init
-- *
-- *	This takes a network device structure and initialize the minimum
-- *	amount of fields so it can be used to schedule NAPI polls without
-- *	registering a full blown interface. This is to be used by drivers
-- *	that need to tie several hardware interfaces to a single NAPI
-- *	poll scheduler due to HW limitations.
-+/* Initialize the core of a dummy net device.
-+ * This is useful if you are calling this function after alloc_netdev(),
-+ * since it does not memset the net_device fields.
-  */
--int init_dummy_netdev(struct net_device *dev)
-+static void init_dummy_netdev_core(struct net_device *dev)
- {
--	/* Clear everything. Note we don't initialize spinlocks
--	 * are they aren't supposed to be taken by any of the
--	 * NAPI code and this dummy netdev is supposed to be
--	 * only ever used for NAPI polls
--	 */
--	memset(dev, 0, sizeof(struct net_device));
--
- 	/* make sure we BUG if trying to hit standard
- 	 * register/unregister code path
- 	 */
-@@ -10440,12 +10427,32 @@ int init_dummy_netdev(struct net_device
- 	 * because users of this 'device' dont need to change
- 	 * its refcount.
- 	 */
-+}
-+
-+/**
-+ *	init_dummy_netdev	- init a dummy network device for NAPI
-+ *	@dev: device to init
-+ *
-+ *	This takes a network device structure and initializes the minimum
-+ *	amount of fields so it can be used to schedule NAPI polls without
-+ *	registering a full blown interface. This is to be used by drivers
-+ *	that need to tie several hardware interfaces to a single NAPI
-+ *	poll scheduler due to HW limitations.
-+ */
-+int init_dummy_netdev(struct net_device *dev)
-+{
-+	/* Clear everything. Note we don't initialize spinlocks
-+	 * as they aren't supposed to be taken by any of the
-+	 * NAPI code and this dummy netdev is supposed to be
-+	 * only ever used for NAPI polls
-+	 */
-+	memset(dev, 0, sizeof(struct net_device));
-+	init_dummy_netdev_core(dev);
- 
- 	return 0;
- }
- EXPORT_SYMBOL_GPL(init_dummy_netdev);
- 
--
- /**
-  *	register_netdev	- register a network device
-  *	@dev: device to register
-@@ -11039,6 +11046,19 @@ void free_netdev(struct net_device *dev)
- EXPORT_SYMBOL(free_netdev);
- 
- /**
-+ * alloc_netdev_dummy - Allocate and initialize a dummy net device.
-+ * @sizeof_priv: size of private data to allocate space for
-+ *
-+ * Return: the allocated net_device on success, NULL otherwise
-+ */
-+struct net_device *alloc_netdev_dummy(int sizeof_priv)
-+{
-+	return alloc_netdev(sizeof_priv, "dummy#", NET_NAME_UNKNOWN,
-+			    init_dummy_netdev_core);
-+}
-+EXPORT_SYMBOL_GPL(alloc_netdev_dummy);
-+
-+/**
-  *	synchronize_net -  Synchronize with packet receive processing
-  *
-  *	Wait for packets currently being received to be done.

+ 0 - 29
target/linux/generic/backport-6.6/701-v6.8-netfilter-nf_tables-fix-bidirectional-offload-regres.patch

@@ -1,29 +0,0 @@
-From 84443741faab9045d53f022a9ac6a6633067a481 Mon Sep 17 00:00:00 2001
-From: Felix Fietkau <[email protected]>
-Date: Wed, 14 Feb 2024 15:42:35 +0100
-Subject: [PATCH] netfilter: nf_tables: fix bidirectional offload regression
-
-Commit 8f84780b84d6 ("netfilter: flowtable: allow unidirectional rules")
-made unidirectional flow offload possible, while completely ignoring (and
-breaking) bidirectional flow offload for nftables.
-Add the missing flag that was left out as an exercise for the reader :)
-
-Cc: Vlad Buslov <[email protected]>
-Fixes: 8f84780b84d6 ("netfilter: flowtable: allow unidirectional rules")
-Reported-by: Daniel Golle <[email protected]>
-Signed-off-by: Felix Fietkau <[email protected]>
-Signed-off-by: Pablo Neira Ayuso <[email protected]>
----
- net/netfilter/nft_flow_offload.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/net/netfilter/nft_flow_offload.c
-+++ b/net/netfilter/nft_flow_offload.c
-@@ -367,6 +367,7 @@ static void nft_flow_offload_eval(const
- 	if (tcph)
- 		flow_offload_ct_tcp(ct);
- 
-+	__set_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags);
- 	ret = flow_offload_add(flowtable, flow);
- 	if (ret < 0)
- 		goto err_flow_add;

+ 0 - 2386
target/linux/generic/backport-6.6/702-01-v6.7-net-phy-aquantia-move-to-separate-directory.patch

@@ -1,2386 +0,0 @@
-From d2213db3f49bce8e7a87c8de05b9a091f78f654e Mon Sep 17 00:00:00 2001
-From: Christian Marangi <[email protected]>
-Date: Tue, 14 Nov 2023 15:08:41 +0100
-Subject: [PATCH 1/3] net: phy: aquantia: move to separate directory
-
-Move aquantia PHY driver to separate driectory in preparation for
-firmware loading support to keep things tidy.
-
-Signed-off-by: Christian Marangi <[email protected]>
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: David S. Miller <[email protected]>
----
- drivers/net/phy/Kconfig                         | 5 +----
- drivers/net/phy/Makefile                        | 6 +-----
- drivers/net/phy/aquantia/Kconfig                | 5 +++++
- drivers/net/phy/aquantia/Makefile               | 6 ++++++
- drivers/net/phy/{ => aquantia}/aquantia.h       | 0
- drivers/net/phy/{ => aquantia}/aquantia_hwmon.c | 0
- drivers/net/phy/{ => aquantia}/aquantia_main.c  | 0
- 7 files changed, 13 insertions(+), 9 deletions(-)
- create mode 100644 drivers/net/phy/aquantia/Kconfig
- create mode 100644 drivers/net/phy/aquantia/Makefile
- rename drivers/net/phy/{ => aquantia}/aquantia.h (100%)
- rename drivers/net/phy/{ => aquantia}/aquantia_hwmon.c (100%)
- rename drivers/net/phy/{ => aquantia}/aquantia_main.c (100%)
-
---- a/drivers/net/phy/Kconfig
-+++ b/drivers/net/phy/Kconfig
-@@ -96,10 +96,7 @@ config ADIN1100_PHY
- 	  Currently supports the:
- 	  - ADIN1100 - Robust,Industrial, Low Power 10BASE-T1L Ethernet PHY
- 
--config AQUANTIA_PHY
--	tristate "Aquantia PHYs"
--	help
--	  Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
-+source "drivers/net/phy/aquantia/Kconfig"
- 
- config AX88796B_PHY
- 	tristate "Asix PHYs"
---- a/drivers/net/phy/Makefile
-+++ b/drivers/net/phy/Makefile
-@@ -35,11 +35,7 @@ obj-y				+= $(sfp-obj-y) $(sfp-obj-m)
- obj-$(CONFIG_ADIN_PHY)		+= adin.o
- obj-$(CONFIG_ADIN1100_PHY)	+= adin1100.o
- obj-$(CONFIG_AMD_PHY)		+= amd.o
--aquantia-objs			+= aquantia_main.o
--ifdef CONFIG_HWMON
--aquantia-objs			+= aquantia_hwmon.o
--endif
--obj-$(CONFIG_AQUANTIA_PHY)	+= aquantia.o
-+obj-$(CONFIG_AQUANTIA_PHY)	+= aquantia/
- obj-$(CONFIG_AT803X_PHY)	+= at803x.o
- obj-$(CONFIG_AX88796B_PHY)	+= ax88796b.o
- obj-$(CONFIG_BCM54140_PHY)	+= bcm54140.o
---- /dev/null
-+++ b/drivers/net/phy/aquantia/Kconfig
-@@ -0,0 +1,5 @@
-+# SPDX-License-Identifier: GPL-2.0-only
-+config AQUANTIA_PHY
-+	tristate "Aquantia PHYs"
-+	help
-+	  Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
---- /dev/null
-+++ b/drivers/net/phy/aquantia/Makefile
-@@ -0,0 +1,6 @@
-+# SPDX-License-Identifier: GPL-2.0
-+aquantia-objs			+= aquantia_main.o
-+ifdef CONFIG_HWMON
-+aquantia-objs			+= aquantia_hwmon.o
-+endif
-+obj-$(CONFIG_AQUANTIA_PHY)	+= aquantia.o
---- a/drivers/net/phy/aquantia.h
-+++ /dev/null
-@@ -1,16 +0,0 @@
--/* SPDX-License-Identifier: GPL-2.0 */
--/* HWMON driver for Aquantia PHY
-- *
-- * Author: Nikita Yushchenko <[email protected]>
-- * Author: Andrew Lunn <[email protected]>
-- * Author: Heiner Kallweit <[email protected]>
-- */
--
--#include <linux/device.h>
--#include <linux/phy.h>
--
--#if IS_REACHABLE(CONFIG_HWMON)
--int aqr_hwmon_probe(struct phy_device *phydev);
--#else
--static inline int aqr_hwmon_probe(struct phy_device *phydev) { return 0; }
--#endif
---- /dev/null
-+++ b/drivers/net/phy/aquantia/aquantia.h
-@@ -0,0 +1,16 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+/* HWMON driver for Aquantia PHY
-+ *
-+ * Author: Nikita Yushchenko <[email protected]>
-+ * Author: Andrew Lunn <[email protected]>
-+ * Author: Heiner Kallweit <[email protected]>
-+ */
-+
-+#include <linux/device.h>
-+#include <linux/phy.h>
-+
-+#if IS_REACHABLE(CONFIG_HWMON)
-+int aqr_hwmon_probe(struct phy_device *phydev);
-+#else
-+static inline int aqr_hwmon_probe(struct phy_device *phydev) { return 0; }
-+#endif
---- /dev/null
-+++ b/drivers/net/phy/aquantia/aquantia_hwmon.c
-@@ -0,0 +1,250 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/* HWMON driver for Aquantia PHY
-+ *
-+ * Author: Nikita Yushchenko <[email protected]>
-+ * Author: Andrew Lunn <[email protected]>
-+ * Author: Heiner Kallweit <[email protected]>
-+ */
-+
-+#include <linux/phy.h>
-+#include <linux/device.h>
-+#include <linux/ctype.h>
-+#include <linux/hwmon.h>
-+
-+#include "aquantia.h"
-+
-+/* Vendor specific 1, MDIO_MMD_VEND2 */
-+#define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL	0xc421
-+#define VEND1_THERMAL_PROV_LOW_TEMP_FAIL	0xc422
-+#define VEND1_THERMAL_PROV_HIGH_TEMP_WARN	0xc423
-+#define VEND1_THERMAL_PROV_LOW_TEMP_WARN	0xc424
-+#define VEND1_THERMAL_STAT1			0xc820
-+#define VEND1_THERMAL_STAT2			0xc821
-+#define VEND1_THERMAL_STAT2_VALID		BIT(0)
-+#define VEND1_GENERAL_STAT1			0xc830
-+#define VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL	BIT(14)
-+#define VEND1_GENERAL_STAT1_LOW_TEMP_FAIL	BIT(13)
-+#define VEND1_GENERAL_STAT1_HIGH_TEMP_WARN	BIT(12)
-+#define VEND1_GENERAL_STAT1_LOW_TEMP_WARN	BIT(11)
-+
-+#if IS_REACHABLE(CONFIG_HWMON)
-+
-+static umode_t aqr_hwmon_is_visible(const void *data,
-+				    enum hwmon_sensor_types type,
-+				    u32 attr, int channel)
-+{
-+	if (type != hwmon_temp)
-+		return 0;
-+
-+	switch (attr) {
-+	case hwmon_temp_input:
-+	case hwmon_temp_min_alarm:
-+	case hwmon_temp_max_alarm:
-+	case hwmon_temp_lcrit_alarm:
-+	case hwmon_temp_crit_alarm:
-+		return 0444;
-+	case hwmon_temp_min:
-+	case hwmon_temp_max:
-+	case hwmon_temp_lcrit:
-+	case hwmon_temp_crit:
-+		return 0644;
-+	default:
-+		return 0;
-+	}
-+}
-+
-+static int aqr_hwmon_get(struct phy_device *phydev, int reg, long *value)
-+{
-+	int temp = phy_read_mmd(phydev, MDIO_MMD_VEND1, reg);
-+
-+	if (temp < 0)
-+		return temp;
-+
-+	/* 16 bit value is 2's complement with LSB = 1/256th degree Celsius */
-+	*value = (s16)temp * 1000 / 256;
-+
-+	return 0;
-+}
-+
-+static int aqr_hwmon_set(struct phy_device *phydev, int reg, long value)
-+{
-+	int temp;
-+
-+	if (value >= 128000 || value < -128000)
-+		return -ERANGE;
-+
-+	temp = value * 256 / 1000;
-+
-+	/* temp is in s16 range and we're interested in lower 16 bits only */
-+	return phy_write_mmd(phydev, MDIO_MMD_VEND1, reg, (u16)temp);
-+}
-+
-+static int aqr_hwmon_test_bit(struct phy_device *phydev, int reg, int bit)
-+{
-+	int val = phy_read_mmd(phydev, MDIO_MMD_VEND1, reg);
-+
-+	if (val < 0)
-+		return val;
-+
-+	return !!(val & bit);
-+}
-+
-+static int aqr_hwmon_status1(struct phy_device *phydev, int bit, long *value)
-+{
-+	int val = aqr_hwmon_test_bit(phydev, VEND1_GENERAL_STAT1, bit);
-+
-+	if (val < 0)
-+		return val;
-+
-+	*value = val;
-+
-+	return 0;
-+}
-+
-+static int aqr_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
-+			  u32 attr, int channel, long *value)
-+{
-+	struct phy_device *phydev = dev_get_drvdata(dev);
-+	int reg;
-+
-+	if (type != hwmon_temp)
-+		return -EOPNOTSUPP;
-+
-+	switch (attr) {
-+	case hwmon_temp_input:
-+		reg = aqr_hwmon_test_bit(phydev, VEND1_THERMAL_STAT2,
-+					 VEND1_THERMAL_STAT2_VALID);
-+		if (reg < 0)
-+			return reg;
-+		if (!reg)
-+			return -EBUSY;
-+
-+		return aqr_hwmon_get(phydev, VEND1_THERMAL_STAT1, value);
-+
-+	case hwmon_temp_lcrit:
-+		return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_LOW_TEMP_FAIL,
-+				     value);
-+	case hwmon_temp_min:
-+		return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_LOW_TEMP_WARN,
-+				     value);
-+	case hwmon_temp_max:
-+		return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_WARN,
-+				     value);
-+	case hwmon_temp_crit:
-+		return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_FAIL,
-+				     value);
-+	case hwmon_temp_lcrit_alarm:
-+		return aqr_hwmon_status1(phydev,
-+					 VEND1_GENERAL_STAT1_LOW_TEMP_FAIL,
-+					 value);
-+	case hwmon_temp_min_alarm:
-+		return aqr_hwmon_status1(phydev,
-+					 VEND1_GENERAL_STAT1_LOW_TEMP_WARN,
-+					 value);
-+	case hwmon_temp_max_alarm:
-+		return aqr_hwmon_status1(phydev,
-+					 VEND1_GENERAL_STAT1_HIGH_TEMP_WARN,
-+					 value);
-+	case hwmon_temp_crit_alarm:
-+		return aqr_hwmon_status1(phydev,
-+					 VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL,
-+					 value);
-+	default:
-+		return -EOPNOTSUPP;
-+	}
-+}
-+
-+static int aqr_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
-+			   u32 attr, int channel, long value)
-+{
-+	struct phy_device *phydev = dev_get_drvdata(dev);
-+
-+	if (type != hwmon_temp)
-+		return -EOPNOTSUPP;
-+
-+	switch (attr) {
-+	case hwmon_temp_lcrit:
-+		return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_LOW_TEMP_FAIL,
-+				     value);
-+	case hwmon_temp_min:
-+		return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_LOW_TEMP_WARN,
-+				     value);
-+	case hwmon_temp_max:
-+		return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_WARN,
-+				     value);
-+	case hwmon_temp_crit:
-+		return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_FAIL,
-+				     value);
-+	default:
-+		return -EOPNOTSUPP;
-+	}
-+}
-+
-+static const struct hwmon_ops aqr_hwmon_ops = {
-+	.is_visible = aqr_hwmon_is_visible,
-+	.read = aqr_hwmon_read,
-+	.write = aqr_hwmon_write,
-+};
-+
-+static u32 aqr_hwmon_chip_config[] = {
-+	HWMON_C_REGISTER_TZ,
-+	0,
-+};
-+
-+static const struct hwmon_channel_info aqr_hwmon_chip = {
-+	.type = hwmon_chip,
-+	.config = aqr_hwmon_chip_config,
-+};
-+
-+static u32 aqr_hwmon_temp_config[] = {
-+	HWMON_T_INPUT |
-+	HWMON_T_MAX | HWMON_T_MIN |
-+	HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
-+	HWMON_T_CRIT | HWMON_T_LCRIT |
-+	HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM,
-+	0,
-+};
-+
-+static const struct hwmon_channel_info aqr_hwmon_temp = {
-+	.type = hwmon_temp,
-+	.config = aqr_hwmon_temp_config,
-+};
-+
-+static const struct hwmon_channel_info * const aqr_hwmon_info[] = {
-+	&aqr_hwmon_chip,
-+	&aqr_hwmon_temp,
-+	NULL,
-+};
-+
-+static const struct hwmon_chip_info aqr_hwmon_chip_info = {
-+	.ops = &aqr_hwmon_ops,
-+	.info = aqr_hwmon_info,
-+};
-+
-+int aqr_hwmon_probe(struct phy_device *phydev)
-+{
-+	struct device *dev = &phydev->mdio.dev;
-+	struct device *hwmon_dev;
-+	char *hwmon_name;
-+	int i, j;
-+
-+	hwmon_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
-+	if (!hwmon_name)
-+		return -ENOMEM;
-+
-+	for (i = j = 0; hwmon_name[i]; i++) {
-+		if (isalnum(hwmon_name[i])) {
-+			if (i != j)
-+				hwmon_name[j] = hwmon_name[i];
-+			j++;
-+		}
-+	}
-+	hwmon_name[j] = '\0';
-+
-+	hwmon_dev = devm_hwmon_device_register_with_info(dev, hwmon_name,
-+					phydev, &aqr_hwmon_chip_info, NULL);
-+
-+	return PTR_ERR_OR_ZERO(hwmon_dev);
-+}
-+
-+#endif
---- /dev/null
-+++ b/drivers/net/phy/aquantia/aquantia_main.c
-@@ -0,0 +1,882 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Driver for Aquantia PHY
-+ *
-+ * Author: Shaohui Xie <[email protected]>
-+ *
-+ * Copyright 2015 Freescale Semiconductor, Inc.
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/delay.h>
-+#include <linux/bitfield.h>
-+#include <linux/phy.h>
-+
-+#include "aquantia.h"
-+
-+#define PHY_ID_AQ1202	0x03a1b445
-+#define PHY_ID_AQ2104	0x03a1b460
-+#define PHY_ID_AQR105	0x03a1b4a2
-+#define PHY_ID_AQR106	0x03a1b4d0
-+#define PHY_ID_AQR107	0x03a1b4e0
-+#define PHY_ID_AQCS109	0x03a1b5c2
-+#define PHY_ID_AQR405	0x03a1b4b0
-+#define PHY_ID_AQR112	0x03a1b662
-+#define PHY_ID_AQR412	0x03a1b712
-+#define PHY_ID_AQR113C	0x31c31c12
-+
-+#define MDIO_PHYXS_VEND_IF_STATUS		0xe812
-+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK	GENMASK(7, 3)
-+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR	0
-+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX	1
-+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI	2
-+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII	3
-+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI	4
-+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII	6
-+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI	7
-+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII	10
-+
-+#define MDIO_AN_VEND_PROV			0xc400
-+#define MDIO_AN_VEND_PROV_1000BASET_FULL	BIT(15)
-+#define MDIO_AN_VEND_PROV_1000BASET_HALF	BIT(14)
-+#define MDIO_AN_VEND_PROV_5000BASET_FULL	BIT(11)
-+#define MDIO_AN_VEND_PROV_2500BASET_FULL	BIT(10)
-+#define MDIO_AN_VEND_PROV_DOWNSHIFT_EN		BIT(4)
-+#define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK	GENMASK(3, 0)
-+#define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT	4
-+
-+#define MDIO_AN_TX_VEND_STATUS1			0xc800
-+#define MDIO_AN_TX_VEND_STATUS1_RATE_MASK	GENMASK(3, 1)
-+#define MDIO_AN_TX_VEND_STATUS1_10BASET		0
-+#define MDIO_AN_TX_VEND_STATUS1_100BASETX	1
-+#define MDIO_AN_TX_VEND_STATUS1_1000BASET	2
-+#define MDIO_AN_TX_VEND_STATUS1_10GBASET	3
-+#define MDIO_AN_TX_VEND_STATUS1_2500BASET	4
-+#define MDIO_AN_TX_VEND_STATUS1_5000BASET	5
-+#define MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX	BIT(0)
-+
-+#define MDIO_AN_TX_VEND_INT_STATUS1		0xcc00
-+#define MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT	BIT(1)
-+
-+#define MDIO_AN_TX_VEND_INT_STATUS2		0xcc01
-+#define MDIO_AN_TX_VEND_INT_STATUS2_MASK	BIT(0)
-+
-+#define MDIO_AN_TX_VEND_INT_MASK2		0xd401
-+#define MDIO_AN_TX_VEND_INT_MASK2_LINK		BIT(0)
-+
-+#define MDIO_AN_RX_LP_STAT1			0xe820
-+#define MDIO_AN_RX_LP_STAT1_1000BASET_FULL	BIT(15)
-+#define MDIO_AN_RX_LP_STAT1_1000BASET_HALF	BIT(14)
-+#define MDIO_AN_RX_LP_STAT1_SHORT_REACH		BIT(13)
-+#define MDIO_AN_RX_LP_STAT1_AQRATE_DOWNSHIFT	BIT(12)
-+#define MDIO_AN_RX_LP_STAT1_AQ_PHY		BIT(2)
-+
-+#define MDIO_AN_RX_LP_STAT4			0xe823
-+#define MDIO_AN_RX_LP_STAT4_FW_MAJOR		GENMASK(15, 8)
-+#define MDIO_AN_RX_LP_STAT4_FW_MINOR		GENMASK(7, 0)
-+
-+#define MDIO_AN_RX_VEND_STAT3			0xe832
-+#define MDIO_AN_RX_VEND_STAT3_AFR		BIT(0)
-+
-+/* MDIO_MMD_C22EXT */
-+#define MDIO_C22EXT_STAT_SGMII_RX_GOOD_FRAMES		0xd292
-+#define MDIO_C22EXT_STAT_SGMII_RX_BAD_FRAMES		0xd294
-+#define MDIO_C22EXT_STAT_SGMII_RX_FALSE_CARRIER		0xd297
-+#define MDIO_C22EXT_STAT_SGMII_TX_GOOD_FRAMES		0xd313
-+#define MDIO_C22EXT_STAT_SGMII_TX_BAD_FRAMES		0xd315
-+#define MDIO_C22EXT_STAT_SGMII_TX_FALSE_CARRIER		0xd317
-+#define MDIO_C22EXT_STAT_SGMII_TX_COLLISIONS		0xd318
-+#define MDIO_C22EXT_STAT_SGMII_TX_LINE_COLLISIONS	0xd319
-+#define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR	0xd31a
-+#define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES		0xd31b
-+
-+/* Vendor specific 1, MDIO_MMD_VEND1 */
-+#define VEND1_GLOBAL_FW_ID			0x0020
-+#define VEND1_GLOBAL_FW_ID_MAJOR		GENMASK(15, 8)
-+#define VEND1_GLOBAL_FW_ID_MINOR		GENMASK(7, 0)
-+
-+#define VEND1_GLOBAL_GEN_STAT2			0xc831
-+#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG	BIT(15)
-+
-+/* The following registers all have similar layouts; first the registers... */
-+#define VEND1_GLOBAL_CFG_10M			0x0310
-+#define VEND1_GLOBAL_CFG_100M			0x031b
-+#define VEND1_GLOBAL_CFG_1G			0x031c
-+#define VEND1_GLOBAL_CFG_2_5G			0x031d
-+#define VEND1_GLOBAL_CFG_5G			0x031e
-+#define VEND1_GLOBAL_CFG_10G			0x031f
-+/* ...and now the fields */
-+#define VEND1_GLOBAL_CFG_RATE_ADAPT		GENMASK(8, 7)
-+#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE	0
-+#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX		1
-+#define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE	2
-+
-+#define VEND1_GLOBAL_RSVD_STAT1			0xc885
-+#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID	GENMASK(7, 4)
-+#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID		GENMASK(3, 0)
-+
-+#define VEND1_GLOBAL_RSVD_STAT9			0xc88d
-+#define VEND1_GLOBAL_RSVD_STAT9_MODE		GENMASK(7, 0)
-+#define VEND1_GLOBAL_RSVD_STAT9_1000BT2		0x23
-+
-+#define VEND1_GLOBAL_INT_STD_STATUS		0xfc00
-+#define VEND1_GLOBAL_INT_VEND_STATUS		0xfc01
-+
-+#define VEND1_GLOBAL_INT_STD_MASK		0xff00
-+#define VEND1_GLOBAL_INT_STD_MASK_PMA1		BIT(15)
-+#define VEND1_GLOBAL_INT_STD_MASK_PMA2		BIT(14)
-+#define VEND1_GLOBAL_INT_STD_MASK_PCS1		BIT(13)
-+#define VEND1_GLOBAL_INT_STD_MASK_PCS2		BIT(12)
-+#define VEND1_GLOBAL_INT_STD_MASK_PCS3		BIT(11)
-+#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS1	BIT(10)
-+#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS2	BIT(9)
-+#define VEND1_GLOBAL_INT_STD_MASK_AN1		BIT(8)
-+#define VEND1_GLOBAL_INT_STD_MASK_AN2		BIT(7)
-+#define VEND1_GLOBAL_INT_STD_MASK_GBE		BIT(6)
-+#define VEND1_GLOBAL_INT_STD_MASK_ALL		BIT(0)
-+
-+#define VEND1_GLOBAL_INT_VEND_MASK		0xff01
-+#define VEND1_GLOBAL_INT_VEND_MASK_PMA		BIT(15)
-+#define VEND1_GLOBAL_INT_VEND_MASK_PCS		BIT(14)
-+#define VEND1_GLOBAL_INT_VEND_MASK_PHY_XS	BIT(13)
-+#define VEND1_GLOBAL_INT_VEND_MASK_AN		BIT(12)
-+#define VEND1_GLOBAL_INT_VEND_MASK_GBE		BIT(11)
-+#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL1	BIT(2)
-+#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2	BIT(1)
-+#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3	BIT(0)
-+
-+/* Sleep and timeout for checking if the Processor-Intensive
-+ * MDIO operation is finished
-+ */
-+#define AQR107_OP_IN_PROG_SLEEP		1000
-+#define AQR107_OP_IN_PROG_TIMEOUT	100000
-+
-+struct aqr107_hw_stat {
-+	const char *name;
-+	int reg;
-+	int size;
-+};
-+
-+#define SGMII_STAT(n, r, s) { n, MDIO_C22EXT_STAT_SGMII_ ## r, s }
-+static const struct aqr107_hw_stat aqr107_hw_stats[] = {
-+	SGMII_STAT("sgmii_rx_good_frames",	    RX_GOOD_FRAMES,	26),
-+	SGMII_STAT("sgmii_rx_bad_frames",	    RX_BAD_FRAMES,	26),
-+	SGMII_STAT("sgmii_rx_false_carrier_events", RX_FALSE_CARRIER,	 8),
-+	SGMII_STAT("sgmii_tx_good_frames",	    TX_GOOD_FRAMES,	26),
-+	SGMII_STAT("sgmii_tx_bad_frames",	    TX_BAD_FRAMES,	26),
-+	SGMII_STAT("sgmii_tx_false_carrier_events", TX_FALSE_CARRIER,	 8),
-+	SGMII_STAT("sgmii_tx_collisions",	    TX_COLLISIONS,	 8),
-+	SGMII_STAT("sgmii_tx_line_collisions",	    TX_LINE_COLLISIONS,	 8),
-+	SGMII_STAT("sgmii_tx_frame_alignment_err",  TX_FRAME_ALIGN_ERR,	16),
-+	SGMII_STAT("sgmii_tx_runt_frames",	    TX_RUNT_FRAMES,	22),
-+};
-+#define AQR107_SGMII_STAT_SZ ARRAY_SIZE(aqr107_hw_stats)
-+
-+struct aqr107_priv {
-+	u64 sgmii_stats[AQR107_SGMII_STAT_SZ];
-+};
-+
-+static int aqr107_get_sset_count(struct phy_device *phydev)
-+{
-+	return AQR107_SGMII_STAT_SZ;
-+}
-+
-+static void aqr107_get_strings(struct phy_device *phydev, u8 *data)
-+{
-+	int i;
-+
-+	for (i = 0; i < AQR107_SGMII_STAT_SZ; i++)
-+		strscpy(data + i * ETH_GSTRING_LEN, aqr107_hw_stats[i].name,
-+			ETH_GSTRING_LEN);
-+}
-+
-+static u64 aqr107_get_stat(struct phy_device *phydev, int index)
-+{
-+	const struct aqr107_hw_stat *stat = aqr107_hw_stats + index;
-+	int len_l = min(stat->size, 16);
-+	int len_h = stat->size - len_l;
-+	u64 ret;
-+	int val;
-+
-+	val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg);
-+	if (val < 0)
-+		return U64_MAX;
-+
-+	ret = val & GENMASK(len_l - 1, 0);
-+	if (len_h) {
-+		val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg + 1);
-+		if (val < 0)
-+			return U64_MAX;
-+
-+		ret += (val & GENMASK(len_h - 1, 0)) << 16;
-+	}
-+
-+	return ret;
-+}
-+
-+static void aqr107_get_stats(struct phy_device *phydev,
-+			     struct ethtool_stats *stats, u64 *data)
-+{
-+	struct aqr107_priv *priv = phydev->priv;
-+	u64 val;
-+	int i;
-+
-+	for (i = 0; i < AQR107_SGMII_STAT_SZ; i++) {
-+		val = aqr107_get_stat(phydev, i);
-+		if (val == U64_MAX)
-+			phydev_err(phydev, "Reading HW Statistics failed for %s\n",
-+				   aqr107_hw_stats[i].name);
-+		else
-+			priv->sgmii_stats[i] += val;
-+
-+		data[i] = priv->sgmii_stats[i];
-+	}
-+}
-+
-+static int aqr_config_aneg(struct phy_device *phydev)
-+{
-+	bool changed = false;
-+	u16 reg;
-+	int ret;
-+
-+	if (phydev->autoneg == AUTONEG_DISABLE)
-+		return genphy_c45_pma_setup_forced(phydev);
-+
-+	ret = genphy_c45_an_config_aneg(phydev);
-+	if (ret < 0)
-+		return ret;
-+	if (ret > 0)
-+		changed = true;
-+
-+	/* Clause 45 has no standardized support for 1000BaseT, therefore
-+	 * use vendor registers for this mode.
-+	 */
-+	reg = 0;
-+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
-+			      phydev->advertising))
-+		reg |= MDIO_AN_VEND_PROV_1000BASET_FULL;
-+
-+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
-+			      phydev->advertising))
-+		reg |= MDIO_AN_VEND_PROV_1000BASET_HALF;
-+
-+	/* Handle the case when the 2.5G and 5G speeds are not advertised */
-+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
-+			      phydev->advertising))
-+		reg |= MDIO_AN_VEND_PROV_2500BASET_FULL;
-+
-+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
-+			      phydev->advertising))
-+		reg |= MDIO_AN_VEND_PROV_5000BASET_FULL;
-+
-+	ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
-+				     MDIO_AN_VEND_PROV_1000BASET_HALF |
-+				     MDIO_AN_VEND_PROV_1000BASET_FULL |
-+				     MDIO_AN_VEND_PROV_2500BASET_FULL |
-+				     MDIO_AN_VEND_PROV_5000BASET_FULL, reg);
-+	if (ret < 0)
-+		return ret;
-+	if (ret > 0)
-+		changed = true;
-+
-+	return genphy_c45_check_and_restart_aneg(phydev, changed);
-+}
-+
-+static int aqr_config_intr(struct phy_device *phydev)
-+{
-+	bool en = phydev->interrupts == PHY_INTERRUPT_ENABLED;
-+	int err;
-+
-+	if (en) {
-+		/* Clear any pending interrupts before enabling them */
-+		err = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS2);
-+		if (err < 0)
-+			return err;
-+	}
-+
-+	err = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_MASK2,
-+			    en ? MDIO_AN_TX_VEND_INT_MASK2_LINK : 0);
-+	if (err < 0)
-+		return err;
-+
-+	err = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_STD_MASK,
-+			    en ? VEND1_GLOBAL_INT_STD_MASK_ALL : 0);
-+	if (err < 0)
-+		return err;
-+
-+	err = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_VEND_MASK,
-+			    en ? VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 |
-+			    VEND1_GLOBAL_INT_VEND_MASK_AN : 0);
-+	if (err < 0)
-+		return err;
-+
-+	if (!en) {
-+		/* Clear any pending interrupts after we have disabled them */
-+		err = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS2);
-+		if (err < 0)
-+			return err;
-+	}
-+
-+	return 0;
-+}
-+
-+static irqreturn_t aqr_handle_interrupt(struct phy_device *phydev)
-+{
-+	int irq_status;
-+
-+	irq_status = phy_read_mmd(phydev, MDIO_MMD_AN,
-+				  MDIO_AN_TX_VEND_INT_STATUS2);
-+	if (irq_status < 0) {
-+		phy_error(phydev);
-+		return IRQ_NONE;
-+	}
-+
-+	if (!(irq_status & MDIO_AN_TX_VEND_INT_STATUS2_MASK))
-+		return IRQ_NONE;
-+
-+	phy_trigger_machine(phydev);
-+
-+	return IRQ_HANDLED;
-+}
-+
-+static int aqr_read_status(struct phy_device *phydev)
-+{
-+	int val;
-+
-+	if (phydev->autoneg == AUTONEG_ENABLE) {
-+		val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT1);
-+		if (val < 0)
-+			return val;
-+
-+		linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
-+				 phydev->lp_advertising,
-+				 val & MDIO_AN_RX_LP_STAT1_1000BASET_FULL);
-+		linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
-+				 phydev->lp_advertising,
-+				 val & MDIO_AN_RX_LP_STAT1_1000BASET_HALF);
-+	}
-+
-+	return genphy_c45_read_status(phydev);
-+}
-+
-+static int aqr107_read_rate(struct phy_device *phydev)
-+{
-+	u32 config_reg;
-+	int val;
-+
-+	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_STATUS1);
-+	if (val < 0)
-+		return val;
-+
-+	if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
-+		phydev->duplex = DUPLEX_FULL;
-+	else
-+		phydev->duplex = DUPLEX_HALF;
-+
-+	switch (FIELD_GET(MDIO_AN_TX_VEND_STATUS1_RATE_MASK, val)) {
-+	case MDIO_AN_TX_VEND_STATUS1_10BASET:
-+		phydev->speed = SPEED_10;
-+		config_reg = VEND1_GLOBAL_CFG_10M;
-+		break;
-+	case MDIO_AN_TX_VEND_STATUS1_100BASETX:
-+		phydev->speed = SPEED_100;
-+		config_reg = VEND1_GLOBAL_CFG_100M;
-+		break;
-+	case MDIO_AN_TX_VEND_STATUS1_1000BASET:
-+		phydev->speed = SPEED_1000;
-+		config_reg = VEND1_GLOBAL_CFG_1G;
-+		break;
-+	case MDIO_AN_TX_VEND_STATUS1_2500BASET:
-+		phydev->speed = SPEED_2500;
-+		config_reg = VEND1_GLOBAL_CFG_2_5G;
-+		break;
-+	case MDIO_AN_TX_VEND_STATUS1_5000BASET:
-+		phydev->speed = SPEED_5000;
-+		config_reg = VEND1_GLOBAL_CFG_5G;
-+		break;
-+	case MDIO_AN_TX_VEND_STATUS1_10GBASET:
-+		phydev->speed = SPEED_10000;
-+		config_reg = VEND1_GLOBAL_CFG_10G;
-+		break;
-+	default:
-+		phydev->speed = SPEED_UNKNOWN;
-+		return 0;
-+	}
-+
-+	val = phy_read_mmd(phydev, MDIO_MMD_VEND1, config_reg);
-+	if (val < 0)
-+		return val;
-+
-+	if (FIELD_GET(VEND1_GLOBAL_CFG_RATE_ADAPT, val) ==
-+	    VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE)
-+		phydev->rate_matching = RATE_MATCH_PAUSE;
-+	else
-+		phydev->rate_matching = RATE_MATCH_NONE;
-+
-+	return 0;
-+}
-+
-+static int aqr107_read_status(struct phy_device *phydev)
-+{
-+	int val, ret;
-+
-+	ret = aqr_read_status(phydev);
-+	if (ret)
-+		return ret;
-+
-+	if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE)
-+		return 0;
-+
-+	val = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_VEND_IF_STATUS);
-+	if (val < 0)
-+		return val;
-+
-+	switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) {
-+	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
-+		phydev->interface = PHY_INTERFACE_MODE_10GKR;
-+		break;
-+	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX:
-+		phydev->interface = PHY_INTERFACE_MODE_1000BASEKX;
-+		break;
-+	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
-+		phydev->interface = PHY_INTERFACE_MODE_10GBASER;
-+		break;
-+	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
-+		phydev->interface = PHY_INTERFACE_MODE_USXGMII;
-+		break;
-+	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI:
-+		phydev->interface = PHY_INTERFACE_MODE_XAUI;
-+		break;
-+	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
-+		phydev->interface = PHY_INTERFACE_MODE_SGMII;
-+		break;
-+	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI:
-+		phydev->interface = PHY_INTERFACE_MODE_RXAUI;
-+		break;
-+	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII:
-+		phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
-+		break;
-+	default:
-+		phydev->interface = PHY_INTERFACE_MODE_NA;
-+		break;
-+	}
-+
-+	/* Read possibly downshifted rate from vendor register */
-+	return aqr107_read_rate(phydev);
-+}
-+
-+static int aqr107_get_downshift(struct phy_device *phydev, u8 *data)
-+{
-+	int val, cnt, enable;
-+
-+	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV);
-+	if (val < 0)
-+		return val;
-+
-+	enable = FIELD_GET(MDIO_AN_VEND_PROV_DOWNSHIFT_EN, val);
-+	cnt = FIELD_GET(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, val);
-+
-+	*data = enable && cnt ? cnt : DOWNSHIFT_DEV_DISABLE;
-+
-+	return 0;
-+}
-+
-+static int aqr107_set_downshift(struct phy_device *phydev, u8 cnt)
-+{
-+	int val = 0;
-+
-+	if (!FIELD_FIT(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, cnt))
-+		return -E2BIG;
-+
-+	if (cnt != DOWNSHIFT_DEV_DISABLE) {
-+		val = MDIO_AN_VEND_PROV_DOWNSHIFT_EN;
-+		val |= FIELD_PREP(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, cnt);
-+	}
-+
-+	return phy_modify_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
-+			      MDIO_AN_VEND_PROV_DOWNSHIFT_EN |
-+			      MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, val);
-+}
-+
-+static int aqr107_get_tunable(struct phy_device *phydev,
-+			      struct ethtool_tunable *tuna, void *data)
-+{
-+	switch (tuna->id) {
-+	case ETHTOOL_PHY_DOWNSHIFT:
-+		return aqr107_get_downshift(phydev, data);
-+	default:
-+		return -EOPNOTSUPP;
-+	}
-+}
-+
-+static int aqr107_set_tunable(struct phy_device *phydev,
-+			      struct ethtool_tunable *tuna, const void *data)
-+{
-+	switch (tuna->id) {
-+	case ETHTOOL_PHY_DOWNSHIFT:
-+		return aqr107_set_downshift(phydev, *(const u8 *)data);
-+	default:
-+		return -EOPNOTSUPP;
-+	}
-+}
-+
-+/* If we configure settings whilst firmware is still initializing the chip,
-+ * then these settings may be overwritten. Therefore make sure chip
-+ * initialization has completed. Use presence of the firmware ID as
-+ * indicator for initialization having completed.
-+ * The chip also provides a "reset completed" bit, but it's cleared after
-+ * read. Therefore function would time out if called again.
-+ */
-+static int aqr107_wait_reset_complete(struct phy_device *phydev)
-+{
-+	int val;
-+
-+	return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
-+					 VEND1_GLOBAL_FW_ID, val, val != 0,
-+					 20000, 2000000, false);
-+}
-+
-+static void aqr107_chip_info(struct phy_device *phydev)
-+{
-+	u8 fw_major, fw_minor, build_id, prov_id;
-+	int val;
-+
-+	val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
-+	if (val < 0)
-+		return;
-+
-+	fw_major = FIELD_GET(VEND1_GLOBAL_FW_ID_MAJOR, val);
-+	fw_minor = FIELD_GET(VEND1_GLOBAL_FW_ID_MINOR, val);
-+
-+	val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT1);
-+	if (val < 0)
-+		return;
-+
-+	build_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID, val);
-+	prov_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_PROV_ID, val);
-+
-+	phydev_dbg(phydev, "FW %u.%u, Build %u, Provisioning %u\n",
-+		   fw_major, fw_minor, build_id, prov_id);
-+}
-+
-+static int aqr107_config_init(struct phy_device *phydev)
-+{
-+	int ret;
-+
-+	/* Check that the PHY interface type is compatible */
-+	if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
-+	    phydev->interface != PHY_INTERFACE_MODE_1000BASEKX &&
-+	    phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
-+	    phydev->interface != PHY_INTERFACE_MODE_XGMII &&
-+	    phydev->interface != PHY_INTERFACE_MODE_USXGMII &&
-+	    phydev->interface != PHY_INTERFACE_MODE_10GKR &&
-+	    phydev->interface != PHY_INTERFACE_MODE_10GBASER &&
-+	    phydev->interface != PHY_INTERFACE_MODE_XAUI &&
-+	    phydev->interface != PHY_INTERFACE_MODE_RXAUI)
-+		return -ENODEV;
-+
-+	WARN(phydev->interface == PHY_INTERFACE_MODE_XGMII,
-+	     "Your devicetree is out of date, please update it. The AQR107 family doesn't support XGMII, maybe you mean USXGMII.\n");
-+
-+	ret = aqr107_wait_reset_complete(phydev);
-+	if (!ret)
-+		aqr107_chip_info(phydev);
-+
-+	return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
-+}
-+
-+static int aqcs109_config_init(struct phy_device *phydev)
-+{
-+	int ret;
-+
-+	/* Check that the PHY interface type is compatible */
-+	if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
-+	    phydev->interface != PHY_INTERFACE_MODE_2500BASEX)
-+		return -ENODEV;
-+
-+	ret = aqr107_wait_reset_complete(phydev);
-+	if (!ret)
-+		aqr107_chip_info(phydev);
-+
-+	/* AQCS109 belongs to a chip family partially supporting 10G and 5G.
-+	 * PMA speed ability bits are the same for all members of the family,
-+	 * AQCS109 however supports speeds up to 2.5G only.
-+	 */
-+	phy_set_max_speed(phydev, SPEED_2500);
-+
-+	return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
-+}
-+
-+static void aqr107_link_change_notify(struct phy_device *phydev)
-+{
-+	u8 fw_major, fw_minor;
-+	bool downshift, short_reach, afr;
-+	int mode, val;
-+
-+	if (phydev->state != PHY_RUNNING || phydev->autoneg == AUTONEG_DISABLE)
-+		return;
-+
-+	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT1);
-+	/* call failed or link partner is no Aquantia PHY */
-+	if (val < 0 || !(val & MDIO_AN_RX_LP_STAT1_AQ_PHY))
-+		return;
-+
-+	short_reach = val & MDIO_AN_RX_LP_STAT1_SHORT_REACH;
-+	downshift = val & MDIO_AN_RX_LP_STAT1_AQRATE_DOWNSHIFT;
-+
-+	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT4);
-+	if (val < 0)
-+		return;
-+
-+	fw_major = FIELD_GET(MDIO_AN_RX_LP_STAT4_FW_MAJOR, val);
-+	fw_minor = FIELD_GET(MDIO_AN_RX_LP_STAT4_FW_MINOR, val);
-+
-+	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_VEND_STAT3);
-+	if (val < 0)
-+		return;
-+
-+	afr = val & MDIO_AN_RX_VEND_STAT3_AFR;
-+
-+	phydev_dbg(phydev, "Link partner is Aquantia PHY, FW %u.%u%s%s%s\n",
-+		   fw_major, fw_minor,
-+		   short_reach ? ", short reach mode" : "",
-+		   downshift ? ", fast-retrain downshift advertised" : "",
-+		   afr ? ", fast reframe advertised" : "");
-+
-+	val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT9);
-+	if (val < 0)
-+		return;
-+
-+	mode = FIELD_GET(VEND1_GLOBAL_RSVD_STAT9_MODE, val);
-+	if (mode == VEND1_GLOBAL_RSVD_STAT9_1000BT2)
-+		phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
-+}
-+
-+static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
-+{
-+	int val, err;
-+
-+	/* The datasheet notes to wait at least 1ms after issuing a
-+	 * processor intensive operation before checking.
-+	 * We cannot use the 'sleep_before_read' parameter of read_poll_timeout
-+	 * because that just determines the maximum time slept, not the minimum.
-+	 */
-+	usleep_range(1000, 5000);
-+
-+	err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
-+					VEND1_GLOBAL_GEN_STAT2, val,
-+					!(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG),
-+					AQR107_OP_IN_PROG_SLEEP,
-+					AQR107_OP_IN_PROG_TIMEOUT, false);
-+	if (err) {
-+		phydev_err(phydev, "timeout: processor-intensive MDIO operation\n");
-+		return err;
-+	}
-+
-+	return 0;
-+}
-+
-+static int aqr107_get_rate_matching(struct phy_device *phydev,
-+				    phy_interface_t iface)
-+{
-+	if (iface == PHY_INTERFACE_MODE_10GBASER ||
-+	    iface == PHY_INTERFACE_MODE_2500BASEX ||
-+	    iface == PHY_INTERFACE_MODE_NA)
-+		return RATE_MATCH_PAUSE;
-+	return RATE_MATCH_NONE;
-+}
-+
-+static int aqr107_suspend(struct phy_device *phydev)
-+{
-+	int err;
-+
-+	err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
-+			       MDIO_CTRL1_LPOWER);
-+	if (err)
-+		return err;
-+
-+	return aqr107_wait_processor_intensive_op(phydev);
-+}
-+
-+static int aqr107_resume(struct phy_device *phydev)
-+{
-+	int err;
-+
-+	err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
-+				 MDIO_CTRL1_LPOWER);
-+	if (err)
-+		return err;
-+
-+	return aqr107_wait_processor_intensive_op(phydev);
-+}
-+
-+static int aqr107_probe(struct phy_device *phydev)
-+{
-+	phydev->priv = devm_kzalloc(&phydev->mdio.dev,
-+				    sizeof(struct aqr107_priv), GFP_KERNEL);
-+	if (!phydev->priv)
-+		return -ENOMEM;
-+
-+	return aqr_hwmon_probe(phydev);
-+}
-+
-+static struct phy_driver aqr_driver[] = {
-+{
-+	PHY_ID_MATCH_MODEL(PHY_ID_AQ1202),
-+	.name		= "Aquantia AQ1202",
-+	.config_aneg    = aqr_config_aneg,
-+	.config_intr	= aqr_config_intr,
-+	.handle_interrupt = aqr_handle_interrupt,
-+	.read_status	= aqr_read_status,
-+},
-+{
-+	PHY_ID_MATCH_MODEL(PHY_ID_AQ2104),
-+	.name		= "Aquantia AQ2104",
-+	.config_aneg    = aqr_config_aneg,
-+	.config_intr	= aqr_config_intr,
-+	.handle_interrupt = aqr_handle_interrupt,
-+	.read_status	= aqr_read_status,
-+},
-+{
-+	PHY_ID_MATCH_MODEL(PHY_ID_AQR105),
-+	.name		= "Aquantia AQR105",
-+	.config_aneg    = aqr_config_aneg,
-+	.config_intr	= aqr_config_intr,
-+	.handle_interrupt = aqr_handle_interrupt,
-+	.read_status	= aqr_read_status,
-+	.suspend	= aqr107_suspend,
-+	.resume		= aqr107_resume,
-+},
-+{
-+	PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
-+	.name		= "Aquantia AQR106",
-+	.config_aneg    = aqr_config_aneg,
-+	.config_intr	= aqr_config_intr,
-+	.handle_interrupt = aqr_handle_interrupt,
-+	.read_status	= aqr_read_status,
-+},
-+{
-+	PHY_ID_MATCH_MODEL(PHY_ID_AQR107),
-+	.name		= "Aquantia AQR107",
-+	.probe		= aqr107_probe,
-+	.get_rate_matching = aqr107_get_rate_matching,
-+	.config_init	= aqr107_config_init,
-+	.config_aneg    = aqr_config_aneg,
-+	.config_intr	= aqr_config_intr,
-+	.handle_interrupt = aqr_handle_interrupt,
-+	.read_status	= aqr107_read_status,
-+	.get_tunable    = aqr107_get_tunable,
-+	.set_tunable    = aqr107_set_tunable,
-+	.suspend	= aqr107_suspend,
-+	.resume		= aqr107_resume,
-+	.get_sset_count	= aqr107_get_sset_count,
-+	.get_strings	= aqr107_get_strings,
-+	.get_stats	= aqr107_get_stats,
-+	.link_change_notify = aqr107_link_change_notify,
-+},
-+{
-+	PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
-+	.name		= "Aquantia AQCS109",
-+	.probe		= aqr107_probe,
-+	.get_rate_matching = aqr107_get_rate_matching,
-+	.config_init	= aqcs109_config_init,
-+	.config_aneg    = aqr_config_aneg,
-+	.config_intr	= aqr_config_intr,
-+	.handle_interrupt = aqr_handle_interrupt,
-+	.read_status	= aqr107_read_status,
-+	.get_tunable    = aqr107_get_tunable,
-+	.set_tunable    = aqr107_set_tunable,
-+	.suspend	= aqr107_suspend,
-+	.resume		= aqr107_resume,
-+	.get_sset_count	= aqr107_get_sset_count,
-+	.get_strings	= aqr107_get_strings,
-+	.get_stats	= aqr107_get_stats,
-+	.link_change_notify = aqr107_link_change_notify,
-+},
-+{
-+	PHY_ID_MATCH_MODEL(PHY_ID_AQR405),
-+	.name		= "Aquantia AQR405",
-+	.config_aneg    = aqr_config_aneg,
-+	.config_intr	= aqr_config_intr,
-+	.handle_interrupt = aqr_handle_interrupt,
-+	.read_status	= aqr_read_status,
-+},
-+{
-+	PHY_ID_MATCH_MODEL(PHY_ID_AQR112),
-+	.name		= "Aquantia AQR112",
-+	.probe		= aqr107_probe,
-+	.config_aneg    = aqr_config_aneg,
-+	.config_intr	= aqr_config_intr,
-+	.handle_interrupt = aqr_handle_interrupt,
-+	.get_tunable    = aqr107_get_tunable,
-+	.set_tunable    = aqr107_set_tunable,
-+	.suspend	= aqr107_suspend,
-+	.resume		= aqr107_resume,
-+	.read_status	= aqr107_read_status,
-+	.get_rate_matching = aqr107_get_rate_matching,
-+	.get_sset_count = aqr107_get_sset_count,
-+	.get_strings	= aqr107_get_strings,
-+	.get_stats	= aqr107_get_stats,
-+	.link_change_notify = aqr107_link_change_notify,
-+},
-+{
-+	PHY_ID_MATCH_MODEL(PHY_ID_AQR412),
-+	.name		= "Aquantia AQR412",
-+	.probe		= aqr107_probe,
-+	.config_aneg    = aqr_config_aneg,
-+	.config_intr	= aqr_config_intr,
-+	.handle_interrupt = aqr_handle_interrupt,
-+	.get_tunable    = aqr107_get_tunable,
-+	.set_tunable    = aqr107_set_tunable,
-+	.suspend	= aqr107_suspend,
-+	.resume		= aqr107_resume,
-+	.read_status	= aqr107_read_status,
-+	.get_rate_matching = aqr107_get_rate_matching,
-+	.get_sset_count = aqr107_get_sset_count,
-+	.get_strings	= aqr107_get_strings,
-+	.get_stats	= aqr107_get_stats,
-+	.link_change_notify = aqr107_link_change_notify,
-+},
-+{
-+	PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
-+	.name           = "Aquantia AQR113C",
-+	.probe          = aqr107_probe,
-+	.get_rate_matching = aqr107_get_rate_matching,
-+	.config_init    = aqr107_config_init,
-+	.config_aneg    = aqr_config_aneg,
-+	.config_intr    = aqr_config_intr,
-+	.handle_interrupt       = aqr_handle_interrupt,
-+	.read_status    = aqr107_read_status,
-+	.get_tunable    = aqr107_get_tunable,
-+	.set_tunable    = aqr107_set_tunable,
-+	.suspend        = aqr107_suspend,
-+	.resume         = aqr107_resume,
-+	.get_sset_count = aqr107_get_sset_count,
-+	.get_strings    = aqr107_get_strings,
-+	.get_stats      = aqr107_get_stats,
-+	.link_change_notify = aqr107_link_change_notify,
-+},
-+};
-+
-+module_phy_driver(aqr_driver);
-+
-+static struct mdio_device_id __maybe_unused aqr_tbl[] = {
-+	{ PHY_ID_MATCH_MODEL(PHY_ID_AQ1202) },
-+	{ PHY_ID_MATCH_MODEL(PHY_ID_AQ2104) },
-+	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR105) },
-+	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR106) },
-+	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR107) },
-+	{ PHY_ID_MATCH_MODEL(PHY_ID_AQCS109) },
-+	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR405) },
-+	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR112) },
-+	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR412) },
-+	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) },
-+	{ }
-+};
-+
-+MODULE_DEVICE_TABLE(mdio, aqr_tbl);
-+
-+MODULE_DESCRIPTION("Aquantia PHY driver");
-+MODULE_AUTHOR("Shaohui Xie <[email protected]>");
-+MODULE_LICENSE("GPL v2");
---- a/drivers/net/phy/aquantia_hwmon.c
-+++ /dev/null
-@@ -1,250 +0,0 @@
--// SPDX-License-Identifier: GPL-2.0
--/* HWMON driver for Aquantia PHY
-- *
-- * Author: Nikita Yushchenko <[email protected]>
-- * Author: Andrew Lunn <[email protected]>
-- * Author: Heiner Kallweit <[email protected]>
-- */
--
--#include <linux/phy.h>
--#include <linux/device.h>
--#include <linux/ctype.h>
--#include <linux/hwmon.h>
--
--#include "aquantia.h"
--
--/* Vendor specific 1, MDIO_MMD_VEND2 */
--#define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL	0xc421
--#define VEND1_THERMAL_PROV_LOW_TEMP_FAIL	0xc422
--#define VEND1_THERMAL_PROV_HIGH_TEMP_WARN	0xc423
--#define VEND1_THERMAL_PROV_LOW_TEMP_WARN	0xc424
--#define VEND1_THERMAL_STAT1			0xc820
--#define VEND1_THERMAL_STAT2			0xc821
--#define VEND1_THERMAL_STAT2_VALID		BIT(0)
--#define VEND1_GENERAL_STAT1			0xc830
--#define VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL	BIT(14)
--#define VEND1_GENERAL_STAT1_LOW_TEMP_FAIL	BIT(13)
--#define VEND1_GENERAL_STAT1_HIGH_TEMP_WARN	BIT(12)
--#define VEND1_GENERAL_STAT1_LOW_TEMP_WARN	BIT(11)
--
--#if IS_REACHABLE(CONFIG_HWMON)
--
--static umode_t aqr_hwmon_is_visible(const void *data,
--				    enum hwmon_sensor_types type,
--				    u32 attr, int channel)
--{
--	if (type != hwmon_temp)
--		return 0;
--
--	switch (attr) {
--	case hwmon_temp_input:
--	case hwmon_temp_min_alarm:
--	case hwmon_temp_max_alarm:
--	case hwmon_temp_lcrit_alarm:
--	case hwmon_temp_crit_alarm:
--		return 0444;
--	case hwmon_temp_min:
--	case hwmon_temp_max:
--	case hwmon_temp_lcrit:
--	case hwmon_temp_crit:
--		return 0644;
--	default:
--		return 0;
--	}
--}
--
--static int aqr_hwmon_get(struct phy_device *phydev, int reg, long *value)
--{
--	int temp = phy_read_mmd(phydev, MDIO_MMD_VEND1, reg);
--
--	if (temp < 0)
--		return temp;
--
--	/* 16 bit value is 2's complement with LSB = 1/256th degree Celsius */
--	*value = (s16)temp * 1000 / 256;
--
--	return 0;
--}
--
--static int aqr_hwmon_set(struct phy_device *phydev, int reg, long value)
--{
--	int temp;
--
--	if (value >= 128000 || value < -128000)
--		return -ERANGE;
--
--	temp = value * 256 / 1000;
--
--	/* temp is in s16 range and we're interested in lower 16 bits only */
--	return phy_write_mmd(phydev, MDIO_MMD_VEND1, reg, (u16)temp);
--}
--
--static int aqr_hwmon_test_bit(struct phy_device *phydev, int reg, int bit)
--{
--	int val = phy_read_mmd(phydev, MDIO_MMD_VEND1, reg);
--
--	if (val < 0)
--		return val;
--
--	return !!(val & bit);
--}
--
--static int aqr_hwmon_status1(struct phy_device *phydev, int bit, long *value)
--{
--	int val = aqr_hwmon_test_bit(phydev, VEND1_GENERAL_STAT1, bit);
--
--	if (val < 0)
--		return val;
--
--	*value = val;
--
--	return 0;
--}
--
--static int aqr_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
--			  u32 attr, int channel, long *value)
--{
--	struct phy_device *phydev = dev_get_drvdata(dev);
--	int reg;
--
--	if (type != hwmon_temp)
--		return -EOPNOTSUPP;
--
--	switch (attr) {
--	case hwmon_temp_input:
--		reg = aqr_hwmon_test_bit(phydev, VEND1_THERMAL_STAT2,
--					 VEND1_THERMAL_STAT2_VALID);
--		if (reg < 0)
--			return reg;
--		if (!reg)
--			return -EBUSY;
--
--		return aqr_hwmon_get(phydev, VEND1_THERMAL_STAT1, value);
--
--	case hwmon_temp_lcrit:
--		return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_LOW_TEMP_FAIL,
--				     value);
--	case hwmon_temp_min:
--		return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_LOW_TEMP_WARN,
--				     value);
--	case hwmon_temp_max:
--		return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_WARN,
--				     value);
--	case hwmon_temp_crit:
--		return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_FAIL,
--				     value);
--	case hwmon_temp_lcrit_alarm:
--		return aqr_hwmon_status1(phydev,
--					 VEND1_GENERAL_STAT1_LOW_TEMP_FAIL,
--					 value);
--	case hwmon_temp_min_alarm:
--		return aqr_hwmon_status1(phydev,
--					 VEND1_GENERAL_STAT1_LOW_TEMP_WARN,
--					 value);
--	case hwmon_temp_max_alarm:
--		return aqr_hwmon_status1(phydev,
--					 VEND1_GENERAL_STAT1_HIGH_TEMP_WARN,
--					 value);
--	case hwmon_temp_crit_alarm:
--		return aqr_hwmon_status1(phydev,
--					 VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL,
--					 value);
--	default:
--		return -EOPNOTSUPP;
--	}
--}
--
--static int aqr_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
--			   u32 attr, int channel, long value)
--{
--	struct phy_device *phydev = dev_get_drvdata(dev);
--
--	if (type != hwmon_temp)
--		return -EOPNOTSUPP;
--
--	switch (attr) {
--	case hwmon_temp_lcrit:
--		return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_LOW_TEMP_FAIL,
--				     value);
--	case hwmon_temp_min:
--		return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_LOW_TEMP_WARN,
--				     value);
--	case hwmon_temp_max:
--		return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_WARN,
--				     value);
--	case hwmon_temp_crit:
--		return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_FAIL,
--				     value);
--	default:
--		return -EOPNOTSUPP;
--	}
--}
--
--static const struct hwmon_ops aqr_hwmon_ops = {
--	.is_visible = aqr_hwmon_is_visible,
--	.read = aqr_hwmon_read,
--	.write = aqr_hwmon_write,
--};
--
--static u32 aqr_hwmon_chip_config[] = {
--	HWMON_C_REGISTER_TZ,
--	0,
--};
--
--static const struct hwmon_channel_info aqr_hwmon_chip = {
--	.type = hwmon_chip,
--	.config = aqr_hwmon_chip_config,
--};
--
--static u32 aqr_hwmon_temp_config[] = {
--	HWMON_T_INPUT |
--	HWMON_T_MAX | HWMON_T_MIN |
--	HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
--	HWMON_T_CRIT | HWMON_T_LCRIT |
--	HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM,
--	0,
--};
--
--static const struct hwmon_channel_info aqr_hwmon_temp = {
--	.type = hwmon_temp,
--	.config = aqr_hwmon_temp_config,
--};
--
--static const struct hwmon_channel_info * const aqr_hwmon_info[] = {
--	&aqr_hwmon_chip,
--	&aqr_hwmon_temp,
--	NULL,
--};
--
--static const struct hwmon_chip_info aqr_hwmon_chip_info = {
--	.ops = &aqr_hwmon_ops,
--	.info = aqr_hwmon_info,
--};
--
--int aqr_hwmon_probe(struct phy_device *phydev)
--{
--	struct device *dev = &phydev->mdio.dev;
--	struct device *hwmon_dev;
--	char *hwmon_name;
--	int i, j;
--
--	hwmon_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
--	if (!hwmon_name)
--		return -ENOMEM;
--
--	for (i = j = 0; hwmon_name[i]; i++) {
--		if (isalnum(hwmon_name[i])) {
--			if (i != j)
--				hwmon_name[j] = hwmon_name[i];
--			j++;
--		}
--	}
--	hwmon_name[j] = '\0';
--
--	hwmon_dev = devm_hwmon_device_register_with_info(dev, hwmon_name,
--					phydev, &aqr_hwmon_chip_info, NULL);
--
--	return PTR_ERR_OR_ZERO(hwmon_dev);
--}
--
--#endif
---- a/drivers/net/phy/aquantia_main.c
-+++ /dev/null
-@@ -1,882 +0,0 @@
--// SPDX-License-Identifier: GPL-2.0
--/*
-- * Driver for Aquantia PHY
-- *
-- * Author: Shaohui Xie <[email protected]>
-- *
-- * Copyright 2015 Freescale Semiconductor, Inc.
-- */
--
--#include <linux/kernel.h>
--#include <linux/module.h>
--#include <linux/delay.h>
--#include <linux/bitfield.h>
--#include <linux/phy.h>
--
--#include "aquantia.h"
--
--#define PHY_ID_AQ1202	0x03a1b445
--#define PHY_ID_AQ2104	0x03a1b460
--#define PHY_ID_AQR105	0x03a1b4a2
--#define PHY_ID_AQR106	0x03a1b4d0
--#define PHY_ID_AQR107	0x03a1b4e0
--#define PHY_ID_AQCS109	0x03a1b5c2
--#define PHY_ID_AQR405	0x03a1b4b0
--#define PHY_ID_AQR112	0x03a1b662
--#define PHY_ID_AQR412	0x03a1b712
--#define PHY_ID_AQR113C	0x31c31c12
--
--#define MDIO_PHYXS_VEND_IF_STATUS		0xe812
--#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK	GENMASK(7, 3)
--#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR	0
--#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX	1
--#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI	2
--#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII	3
--#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI	4
--#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII	6
--#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI	7
--#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII	10
--
--#define MDIO_AN_VEND_PROV			0xc400
--#define MDIO_AN_VEND_PROV_1000BASET_FULL	BIT(15)
--#define MDIO_AN_VEND_PROV_1000BASET_HALF	BIT(14)
--#define MDIO_AN_VEND_PROV_5000BASET_FULL	BIT(11)
--#define MDIO_AN_VEND_PROV_2500BASET_FULL	BIT(10)
--#define MDIO_AN_VEND_PROV_DOWNSHIFT_EN		BIT(4)
--#define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK	GENMASK(3, 0)
--#define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT	4
--
--#define MDIO_AN_TX_VEND_STATUS1			0xc800
--#define MDIO_AN_TX_VEND_STATUS1_RATE_MASK	GENMASK(3, 1)
--#define MDIO_AN_TX_VEND_STATUS1_10BASET		0
--#define MDIO_AN_TX_VEND_STATUS1_100BASETX	1
--#define MDIO_AN_TX_VEND_STATUS1_1000BASET	2
--#define MDIO_AN_TX_VEND_STATUS1_10GBASET	3
--#define MDIO_AN_TX_VEND_STATUS1_2500BASET	4
--#define MDIO_AN_TX_VEND_STATUS1_5000BASET	5
--#define MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX	BIT(0)
--
--#define MDIO_AN_TX_VEND_INT_STATUS1		0xcc00
--#define MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT	BIT(1)
--
--#define MDIO_AN_TX_VEND_INT_STATUS2		0xcc01
--#define MDIO_AN_TX_VEND_INT_STATUS2_MASK	BIT(0)
--
--#define MDIO_AN_TX_VEND_INT_MASK2		0xd401
--#define MDIO_AN_TX_VEND_INT_MASK2_LINK		BIT(0)
--
--#define MDIO_AN_RX_LP_STAT1			0xe820
--#define MDIO_AN_RX_LP_STAT1_1000BASET_FULL	BIT(15)
--#define MDIO_AN_RX_LP_STAT1_1000BASET_HALF	BIT(14)
--#define MDIO_AN_RX_LP_STAT1_SHORT_REACH		BIT(13)
--#define MDIO_AN_RX_LP_STAT1_AQRATE_DOWNSHIFT	BIT(12)
--#define MDIO_AN_RX_LP_STAT1_AQ_PHY		BIT(2)
--
--#define MDIO_AN_RX_LP_STAT4			0xe823
--#define MDIO_AN_RX_LP_STAT4_FW_MAJOR		GENMASK(15, 8)
--#define MDIO_AN_RX_LP_STAT4_FW_MINOR		GENMASK(7, 0)
--
--#define MDIO_AN_RX_VEND_STAT3			0xe832
--#define MDIO_AN_RX_VEND_STAT3_AFR		BIT(0)
--
--/* MDIO_MMD_C22EXT */
--#define MDIO_C22EXT_STAT_SGMII_RX_GOOD_FRAMES		0xd292
--#define MDIO_C22EXT_STAT_SGMII_RX_BAD_FRAMES		0xd294
--#define MDIO_C22EXT_STAT_SGMII_RX_FALSE_CARRIER		0xd297
--#define MDIO_C22EXT_STAT_SGMII_TX_GOOD_FRAMES		0xd313
--#define MDIO_C22EXT_STAT_SGMII_TX_BAD_FRAMES		0xd315
--#define MDIO_C22EXT_STAT_SGMII_TX_FALSE_CARRIER		0xd317
--#define MDIO_C22EXT_STAT_SGMII_TX_COLLISIONS		0xd318
--#define MDIO_C22EXT_STAT_SGMII_TX_LINE_COLLISIONS	0xd319
--#define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR	0xd31a
--#define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES		0xd31b
--
--/* Vendor specific 1, MDIO_MMD_VEND1 */
--#define VEND1_GLOBAL_FW_ID			0x0020
--#define VEND1_GLOBAL_FW_ID_MAJOR		GENMASK(15, 8)
--#define VEND1_GLOBAL_FW_ID_MINOR		GENMASK(7, 0)
--
--#define VEND1_GLOBAL_GEN_STAT2			0xc831
--#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG	BIT(15)
--
--/* The following registers all have similar layouts; first the registers... */
--#define VEND1_GLOBAL_CFG_10M			0x0310
--#define VEND1_GLOBAL_CFG_100M			0x031b
--#define VEND1_GLOBAL_CFG_1G			0x031c
--#define VEND1_GLOBAL_CFG_2_5G			0x031d
--#define VEND1_GLOBAL_CFG_5G			0x031e
--#define VEND1_GLOBAL_CFG_10G			0x031f
--/* ...and now the fields */
--#define VEND1_GLOBAL_CFG_RATE_ADAPT		GENMASK(8, 7)
--#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE	0
--#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX		1
--#define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE	2
--
--#define VEND1_GLOBAL_RSVD_STAT1			0xc885
--#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID	GENMASK(7, 4)
--#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID		GENMASK(3, 0)
--
--#define VEND1_GLOBAL_RSVD_STAT9			0xc88d
--#define VEND1_GLOBAL_RSVD_STAT9_MODE		GENMASK(7, 0)
--#define VEND1_GLOBAL_RSVD_STAT9_1000BT2		0x23
--
--#define VEND1_GLOBAL_INT_STD_STATUS		0xfc00
--#define VEND1_GLOBAL_INT_VEND_STATUS		0xfc01
--
--#define VEND1_GLOBAL_INT_STD_MASK		0xff00
--#define VEND1_GLOBAL_INT_STD_MASK_PMA1		BIT(15)
--#define VEND1_GLOBAL_INT_STD_MASK_PMA2		BIT(14)
--#define VEND1_GLOBAL_INT_STD_MASK_PCS1		BIT(13)
--#define VEND1_GLOBAL_INT_STD_MASK_PCS2		BIT(12)
--#define VEND1_GLOBAL_INT_STD_MASK_PCS3		BIT(11)
--#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS1	BIT(10)
--#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS2	BIT(9)
--#define VEND1_GLOBAL_INT_STD_MASK_AN1		BIT(8)
--#define VEND1_GLOBAL_INT_STD_MASK_AN2		BIT(7)
--#define VEND1_GLOBAL_INT_STD_MASK_GBE		BIT(6)
--#define VEND1_GLOBAL_INT_STD_MASK_ALL		BIT(0)
--
--#define VEND1_GLOBAL_INT_VEND_MASK		0xff01
--#define VEND1_GLOBAL_INT_VEND_MASK_PMA		BIT(15)
--#define VEND1_GLOBAL_INT_VEND_MASK_PCS		BIT(14)
--#define VEND1_GLOBAL_INT_VEND_MASK_PHY_XS	BIT(13)
--#define VEND1_GLOBAL_INT_VEND_MASK_AN		BIT(12)
--#define VEND1_GLOBAL_INT_VEND_MASK_GBE		BIT(11)
--#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL1	BIT(2)
--#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2	BIT(1)
--#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3	BIT(0)
--
--/* Sleep and timeout for checking if the Processor-Intensive
-- * MDIO operation is finished
-- */
--#define AQR107_OP_IN_PROG_SLEEP		1000
--#define AQR107_OP_IN_PROG_TIMEOUT	100000
--
--struct aqr107_hw_stat {
--	const char *name;
--	int reg;
--	int size;
--};
--
--#define SGMII_STAT(n, r, s) { n, MDIO_C22EXT_STAT_SGMII_ ## r, s }
--static const struct aqr107_hw_stat aqr107_hw_stats[] = {
--	SGMII_STAT("sgmii_rx_good_frames",	    RX_GOOD_FRAMES,	26),
--	SGMII_STAT("sgmii_rx_bad_frames",	    RX_BAD_FRAMES,	26),
--	SGMII_STAT("sgmii_rx_false_carrier_events", RX_FALSE_CARRIER,	 8),
--	SGMII_STAT("sgmii_tx_good_frames",	    TX_GOOD_FRAMES,	26),
--	SGMII_STAT("sgmii_tx_bad_frames",	    TX_BAD_FRAMES,	26),
--	SGMII_STAT("sgmii_tx_false_carrier_events", TX_FALSE_CARRIER,	 8),
--	SGMII_STAT("sgmii_tx_collisions",	    TX_COLLISIONS,	 8),
--	SGMII_STAT("sgmii_tx_line_collisions",	    TX_LINE_COLLISIONS,	 8),
--	SGMII_STAT("sgmii_tx_frame_alignment_err",  TX_FRAME_ALIGN_ERR,	16),
--	SGMII_STAT("sgmii_tx_runt_frames",	    TX_RUNT_FRAMES,	22),
--};
--#define AQR107_SGMII_STAT_SZ ARRAY_SIZE(aqr107_hw_stats)
--
--struct aqr107_priv {
--	u64 sgmii_stats[AQR107_SGMII_STAT_SZ];
--};
--
--static int aqr107_get_sset_count(struct phy_device *phydev)
--{
--	return AQR107_SGMII_STAT_SZ;
--}
--
--static void aqr107_get_strings(struct phy_device *phydev, u8 *data)
--{
--	int i;
--
--	for (i = 0; i < AQR107_SGMII_STAT_SZ; i++)
--		strscpy(data + i * ETH_GSTRING_LEN, aqr107_hw_stats[i].name,
--			ETH_GSTRING_LEN);
--}
--
--static u64 aqr107_get_stat(struct phy_device *phydev, int index)
--{
--	const struct aqr107_hw_stat *stat = aqr107_hw_stats + index;
--	int len_l = min(stat->size, 16);
--	int len_h = stat->size - len_l;
--	u64 ret;
--	int val;
--
--	val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg);
--	if (val < 0)
--		return U64_MAX;
--
--	ret = val & GENMASK(len_l - 1, 0);
--	if (len_h) {
--		val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg + 1);
--		if (val < 0)
--			return U64_MAX;
--
--		ret += (val & GENMASK(len_h - 1, 0)) << 16;
--	}
--
--	return ret;
--}
--
--static void aqr107_get_stats(struct phy_device *phydev,
--			     struct ethtool_stats *stats, u64 *data)
--{
--	struct aqr107_priv *priv = phydev->priv;
--	u64 val;
--	int i;
--
--	for (i = 0; i < AQR107_SGMII_STAT_SZ; i++) {
--		val = aqr107_get_stat(phydev, i);
--		if (val == U64_MAX)
--			phydev_err(phydev, "Reading HW Statistics failed for %s\n",
--				   aqr107_hw_stats[i].name);
--		else
--			priv->sgmii_stats[i] += val;
--
--		data[i] = priv->sgmii_stats[i];
--	}
--}
--
--static int aqr_config_aneg(struct phy_device *phydev)
--{
--	bool changed = false;
--	u16 reg;
--	int ret;
--
--	if (phydev->autoneg == AUTONEG_DISABLE)
--		return genphy_c45_pma_setup_forced(phydev);
--
--	ret = genphy_c45_an_config_aneg(phydev);
--	if (ret < 0)
--		return ret;
--	if (ret > 0)
--		changed = true;
--
--	/* Clause 45 has no standardized support for 1000BaseT, therefore
--	 * use vendor registers for this mode.
--	 */
--	reg = 0;
--	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
--			      phydev->advertising))
--		reg |= MDIO_AN_VEND_PROV_1000BASET_FULL;
--
--	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
--			      phydev->advertising))
--		reg |= MDIO_AN_VEND_PROV_1000BASET_HALF;
--
--	/* Handle the case when the 2.5G and 5G speeds are not advertised */
--	if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
--			      phydev->advertising))
--		reg |= MDIO_AN_VEND_PROV_2500BASET_FULL;
--
--	if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
--			      phydev->advertising))
--		reg |= MDIO_AN_VEND_PROV_5000BASET_FULL;
--
--	ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
--				     MDIO_AN_VEND_PROV_1000BASET_HALF |
--				     MDIO_AN_VEND_PROV_1000BASET_FULL |
--				     MDIO_AN_VEND_PROV_2500BASET_FULL |
--				     MDIO_AN_VEND_PROV_5000BASET_FULL, reg);
--	if (ret < 0)
--		return ret;
--	if (ret > 0)
--		changed = true;
--
--	return genphy_c45_check_and_restart_aneg(phydev, changed);
--}
--
--static int aqr_config_intr(struct phy_device *phydev)
--{
--	bool en = phydev->interrupts == PHY_INTERRUPT_ENABLED;
--	int err;
--
--	if (en) {
--		/* Clear any pending interrupts before enabling them */
--		err = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS2);
--		if (err < 0)
--			return err;
--	}
--
--	err = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_MASK2,
--			    en ? MDIO_AN_TX_VEND_INT_MASK2_LINK : 0);
--	if (err < 0)
--		return err;
--
--	err = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_STD_MASK,
--			    en ? VEND1_GLOBAL_INT_STD_MASK_ALL : 0);
--	if (err < 0)
--		return err;
--
--	err = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_VEND_MASK,
--			    en ? VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 |
--			    VEND1_GLOBAL_INT_VEND_MASK_AN : 0);
--	if (err < 0)
--		return err;
--
--	if (!en) {
--		/* Clear any pending interrupts after we have disabled them */
--		err = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS2);
--		if (err < 0)
--			return err;
--	}
--
--	return 0;
--}
--
--static irqreturn_t aqr_handle_interrupt(struct phy_device *phydev)
--{
--	int irq_status;
--
--	irq_status = phy_read_mmd(phydev, MDIO_MMD_AN,
--				  MDIO_AN_TX_VEND_INT_STATUS2);
--	if (irq_status < 0) {
--		phy_error(phydev);
--		return IRQ_NONE;
--	}
--
--	if (!(irq_status & MDIO_AN_TX_VEND_INT_STATUS2_MASK))
--		return IRQ_NONE;
--
--	phy_trigger_machine(phydev);
--
--	return IRQ_HANDLED;
--}
--
--static int aqr_read_status(struct phy_device *phydev)
--{
--	int val;
--
--	if (phydev->autoneg == AUTONEG_ENABLE) {
--		val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT1);
--		if (val < 0)
--			return val;
--
--		linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
--				 phydev->lp_advertising,
--				 val & MDIO_AN_RX_LP_STAT1_1000BASET_FULL);
--		linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
--				 phydev->lp_advertising,
--				 val & MDIO_AN_RX_LP_STAT1_1000BASET_HALF);
--	}
--
--	return genphy_c45_read_status(phydev);
--}
--
--static int aqr107_read_rate(struct phy_device *phydev)
--{
--	u32 config_reg;
--	int val;
--
--	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_STATUS1);
--	if (val < 0)
--		return val;
--
--	if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
--		phydev->duplex = DUPLEX_FULL;
--	else
--		phydev->duplex = DUPLEX_HALF;
--
--	switch (FIELD_GET(MDIO_AN_TX_VEND_STATUS1_RATE_MASK, val)) {
--	case MDIO_AN_TX_VEND_STATUS1_10BASET:
--		phydev->speed = SPEED_10;
--		config_reg = VEND1_GLOBAL_CFG_10M;
--		break;
--	case MDIO_AN_TX_VEND_STATUS1_100BASETX:
--		phydev->speed = SPEED_100;
--		config_reg = VEND1_GLOBAL_CFG_100M;
--		break;
--	case MDIO_AN_TX_VEND_STATUS1_1000BASET:
--		phydev->speed = SPEED_1000;
--		config_reg = VEND1_GLOBAL_CFG_1G;
--		break;
--	case MDIO_AN_TX_VEND_STATUS1_2500BASET:
--		phydev->speed = SPEED_2500;
--		config_reg = VEND1_GLOBAL_CFG_2_5G;
--		break;
--	case MDIO_AN_TX_VEND_STATUS1_5000BASET:
--		phydev->speed = SPEED_5000;
--		config_reg = VEND1_GLOBAL_CFG_5G;
--		break;
--	case MDIO_AN_TX_VEND_STATUS1_10GBASET:
--		phydev->speed = SPEED_10000;
--		config_reg = VEND1_GLOBAL_CFG_10G;
--		break;
--	default:
--		phydev->speed = SPEED_UNKNOWN;
--		return 0;
--	}
--
--	val = phy_read_mmd(phydev, MDIO_MMD_VEND1, config_reg);
--	if (val < 0)
--		return val;
--
--	if (FIELD_GET(VEND1_GLOBAL_CFG_RATE_ADAPT, val) ==
--	    VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE)
--		phydev->rate_matching = RATE_MATCH_PAUSE;
--	else
--		phydev->rate_matching = RATE_MATCH_NONE;
--
--	return 0;
--}
--
--static int aqr107_read_status(struct phy_device *phydev)
--{
--	int val, ret;
--
--	ret = aqr_read_status(phydev);
--	if (ret)
--		return ret;
--
--	if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE)
--		return 0;
--
--	val = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_VEND_IF_STATUS);
--	if (val < 0)
--		return val;
--
--	switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) {
--	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
--		phydev->interface = PHY_INTERFACE_MODE_10GKR;
--		break;
--	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX:
--		phydev->interface = PHY_INTERFACE_MODE_1000BASEKX;
--		break;
--	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
--		phydev->interface = PHY_INTERFACE_MODE_10GBASER;
--		break;
--	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
--		phydev->interface = PHY_INTERFACE_MODE_USXGMII;
--		break;
--	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI:
--		phydev->interface = PHY_INTERFACE_MODE_XAUI;
--		break;
--	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
--		phydev->interface = PHY_INTERFACE_MODE_SGMII;
--		break;
--	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI:
--		phydev->interface = PHY_INTERFACE_MODE_RXAUI;
--		break;
--	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII:
--		phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
--		break;
--	default:
--		phydev->interface = PHY_INTERFACE_MODE_NA;
--		break;
--	}
--
--	/* Read possibly downshifted rate from vendor register */
--	return aqr107_read_rate(phydev);
--}
--
--static int aqr107_get_downshift(struct phy_device *phydev, u8 *data)
--{
--	int val, cnt, enable;
--
--	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV);
--	if (val < 0)
--		return val;
--
--	enable = FIELD_GET(MDIO_AN_VEND_PROV_DOWNSHIFT_EN, val);
--	cnt = FIELD_GET(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, val);
--
--	*data = enable && cnt ? cnt : DOWNSHIFT_DEV_DISABLE;
--
--	return 0;
--}
--
--static int aqr107_set_downshift(struct phy_device *phydev, u8 cnt)
--{
--	int val = 0;
--
--	if (!FIELD_FIT(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, cnt))
--		return -E2BIG;
--
--	if (cnt != DOWNSHIFT_DEV_DISABLE) {
--		val = MDIO_AN_VEND_PROV_DOWNSHIFT_EN;
--		val |= FIELD_PREP(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, cnt);
--	}
--
--	return phy_modify_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
--			      MDIO_AN_VEND_PROV_DOWNSHIFT_EN |
--			      MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, val);
--}
--
--static int aqr107_get_tunable(struct phy_device *phydev,
--			      struct ethtool_tunable *tuna, void *data)
--{
--	switch (tuna->id) {
--	case ETHTOOL_PHY_DOWNSHIFT:
--		return aqr107_get_downshift(phydev, data);
--	default:
--		return -EOPNOTSUPP;
--	}
--}
--
--static int aqr107_set_tunable(struct phy_device *phydev,
--			      struct ethtool_tunable *tuna, const void *data)
--{
--	switch (tuna->id) {
--	case ETHTOOL_PHY_DOWNSHIFT:
--		return aqr107_set_downshift(phydev, *(const u8 *)data);
--	default:
--		return -EOPNOTSUPP;
--	}
--}
--
--/* If we configure settings whilst firmware is still initializing the chip,
-- * then these settings may be overwritten. Therefore make sure chip
-- * initialization has completed. Use presence of the firmware ID as
-- * indicator for initialization having completed.
-- * The chip also provides a "reset completed" bit, but it's cleared after
-- * read. Therefore function would time out if called again.
-- */
--static int aqr107_wait_reset_complete(struct phy_device *phydev)
--{
--	int val;
--
--	return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
--					 VEND1_GLOBAL_FW_ID, val, val != 0,
--					 20000, 2000000, false);
--}
--
--static void aqr107_chip_info(struct phy_device *phydev)
--{
--	u8 fw_major, fw_minor, build_id, prov_id;
--	int val;
--
--	val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
--	if (val < 0)
--		return;
--
--	fw_major = FIELD_GET(VEND1_GLOBAL_FW_ID_MAJOR, val);
--	fw_minor = FIELD_GET(VEND1_GLOBAL_FW_ID_MINOR, val);
--
--	val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT1);
--	if (val < 0)
--		return;
--
--	build_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID, val);
--	prov_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_PROV_ID, val);
--
--	phydev_dbg(phydev, "FW %u.%u, Build %u, Provisioning %u\n",
--		   fw_major, fw_minor, build_id, prov_id);
--}
--
--static int aqr107_config_init(struct phy_device *phydev)
--{
--	int ret;
--
--	/* Check that the PHY interface type is compatible */
--	if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
--	    phydev->interface != PHY_INTERFACE_MODE_1000BASEKX &&
--	    phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
--	    phydev->interface != PHY_INTERFACE_MODE_XGMII &&
--	    phydev->interface != PHY_INTERFACE_MODE_USXGMII &&
--	    phydev->interface != PHY_INTERFACE_MODE_10GKR &&
--	    phydev->interface != PHY_INTERFACE_MODE_10GBASER &&
--	    phydev->interface != PHY_INTERFACE_MODE_XAUI &&
--	    phydev->interface != PHY_INTERFACE_MODE_RXAUI)
--		return -ENODEV;
--
--	WARN(phydev->interface == PHY_INTERFACE_MODE_XGMII,
--	     "Your devicetree is out of date, please update it. The AQR107 family doesn't support XGMII, maybe you mean USXGMII.\n");
--
--	ret = aqr107_wait_reset_complete(phydev);
--	if (!ret)
--		aqr107_chip_info(phydev);
--
--	return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
--}
--
--static int aqcs109_config_init(struct phy_device *phydev)
--{
--	int ret;
--
--	/* Check that the PHY interface type is compatible */
--	if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
--	    phydev->interface != PHY_INTERFACE_MODE_2500BASEX)
--		return -ENODEV;
--
--	ret = aqr107_wait_reset_complete(phydev);
--	if (!ret)
--		aqr107_chip_info(phydev);
--
--	/* AQCS109 belongs to a chip family partially supporting 10G and 5G.
--	 * PMA speed ability bits are the same for all members of the family,
--	 * AQCS109 however supports speeds up to 2.5G only.
--	 */
--	phy_set_max_speed(phydev, SPEED_2500);
--
--	return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
--}
--
--static void aqr107_link_change_notify(struct phy_device *phydev)
--{
--	u8 fw_major, fw_minor;
--	bool downshift, short_reach, afr;
--	int mode, val;
--
--	if (phydev->state != PHY_RUNNING || phydev->autoneg == AUTONEG_DISABLE)
--		return;
--
--	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT1);
--	/* call failed or link partner is no Aquantia PHY */
--	if (val < 0 || !(val & MDIO_AN_RX_LP_STAT1_AQ_PHY))
--		return;
--
--	short_reach = val & MDIO_AN_RX_LP_STAT1_SHORT_REACH;
--	downshift = val & MDIO_AN_RX_LP_STAT1_AQRATE_DOWNSHIFT;
--
--	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT4);
--	if (val < 0)
--		return;
--
--	fw_major = FIELD_GET(MDIO_AN_RX_LP_STAT4_FW_MAJOR, val);
--	fw_minor = FIELD_GET(MDIO_AN_RX_LP_STAT4_FW_MINOR, val);
--
--	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_VEND_STAT3);
--	if (val < 0)
--		return;
--
--	afr = val & MDIO_AN_RX_VEND_STAT3_AFR;
--
--	phydev_dbg(phydev, "Link partner is Aquantia PHY, FW %u.%u%s%s%s\n",
--		   fw_major, fw_minor,
--		   short_reach ? ", short reach mode" : "",
--		   downshift ? ", fast-retrain downshift advertised" : "",
--		   afr ? ", fast reframe advertised" : "");
--
--	val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT9);
--	if (val < 0)
--		return;
--
--	mode = FIELD_GET(VEND1_GLOBAL_RSVD_STAT9_MODE, val);
--	if (mode == VEND1_GLOBAL_RSVD_STAT9_1000BT2)
--		phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
--}
--
--static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
--{
--	int val, err;
--
--	/* The datasheet notes to wait at least 1ms after issuing a
--	 * processor intensive operation before checking.
--	 * We cannot use the 'sleep_before_read' parameter of read_poll_timeout
--	 * because that just determines the maximum time slept, not the minimum.
--	 */
--	usleep_range(1000, 5000);
--
--	err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
--					VEND1_GLOBAL_GEN_STAT2, val,
--					!(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG),
--					AQR107_OP_IN_PROG_SLEEP,
--					AQR107_OP_IN_PROG_TIMEOUT, false);
--	if (err) {
--		phydev_err(phydev, "timeout: processor-intensive MDIO operation\n");
--		return err;
--	}
--
--	return 0;
--}
--
--static int aqr107_get_rate_matching(struct phy_device *phydev,
--				    phy_interface_t iface)
--{
--	if (iface == PHY_INTERFACE_MODE_10GBASER ||
--	    iface == PHY_INTERFACE_MODE_2500BASEX ||
--	    iface == PHY_INTERFACE_MODE_NA)
--		return RATE_MATCH_PAUSE;
--	return RATE_MATCH_NONE;
--}
--
--static int aqr107_suspend(struct phy_device *phydev)
--{
--	int err;
--
--	err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
--			       MDIO_CTRL1_LPOWER);
--	if (err)
--		return err;
--
--	return aqr107_wait_processor_intensive_op(phydev);
--}
--
--static int aqr107_resume(struct phy_device *phydev)
--{
--	int err;
--
--	err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
--				 MDIO_CTRL1_LPOWER);
--	if (err)
--		return err;
--
--	return aqr107_wait_processor_intensive_op(phydev);
--}
--
--static int aqr107_probe(struct phy_device *phydev)
--{
--	phydev->priv = devm_kzalloc(&phydev->mdio.dev,
--				    sizeof(struct aqr107_priv), GFP_KERNEL);
--	if (!phydev->priv)
--		return -ENOMEM;
--
--	return aqr_hwmon_probe(phydev);
--}
--
--static struct phy_driver aqr_driver[] = {
--{
--	PHY_ID_MATCH_MODEL(PHY_ID_AQ1202),
--	.name		= "Aquantia AQ1202",
--	.config_aneg    = aqr_config_aneg,
--	.config_intr	= aqr_config_intr,
--	.handle_interrupt = aqr_handle_interrupt,
--	.read_status	= aqr_read_status,
--},
--{
--	PHY_ID_MATCH_MODEL(PHY_ID_AQ2104),
--	.name		= "Aquantia AQ2104",
--	.config_aneg    = aqr_config_aneg,
--	.config_intr	= aqr_config_intr,
--	.handle_interrupt = aqr_handle_interrupt,
--	.read_status	= aqr_read_status,
--},
--{
--	PHY_ID_MATCH_MODEL(PHY_ID_AQR105),
--	.name		= "Aquantia AQR105",
--	.config_aneg    = aqr_config_aneg,
--	.config_intr	= aqr_config_intr,
--	.handle_interrupt = aqr_handle_interrupt,
--	.read_status	= aqr_read_status,
--	.suspend	= aqr107_suspend,
--	.resume		= aqr107_resume,
--},
--{
--	PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
--	.name		= "Aquantia AQR106",
--	.config_aneg    = aqr_config_aneg,
--	.config_intr	= aqr_config_intr,
--	.handle_interrupt = aqr_handle_interrupt,
--	.read_status	= aqr_read_status,
--},
--{
--	PHY_ID_MATCH_MODEL(PHY_ID_AQR107),
--	.name		= "Aquantia AQR107",
--	.probe		= aqr107_probe,
--	.get_rate_matching = aqr107_get_rate_matching,
--	.config_init	= aqr107_config_init,
--	.config_aneg    = aqr_config_aneg,
--	.config_intr	= aqr_config_intr,
--	.handle_interrupt = aqr_handle_interrupt,
--	.read_status	= aqr107_read_status,
--	.get_tunable    = aqr107_get_tunable,
--	.set_tunable    = aqr107_set_tunable,
--	.suspend	= aqr107_suspend,
--	.resume		= aqr107_resume,
--	.get_sset_count	= aqr107_get_sset_count,
--	.get_strings	= aqr107_get_strings,
--	.get_stats	= aqr107_get_stats,
--	.link_change_notify = aqr107_link_change_notify,
--},
--{
--	PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
--	.name		= "Aquantia AQCS109",
--	.probe		= aqr107_probe,
--	.get_rate_matching = aqr107_get_rate_matching,
--	.config_init	= aqcs109_config_init,
--	.config_aneg    = aqr_config_aneg,
--	.config_intr	= aqr_config_intr,
--	.handle_interrupt = aqr_handle_interrupt,
--	.read_status	= aqr107_read_status,
--	.get_tunable    = aqr107_get_tunable,
--	.set_tunable    = aqr107_set_tunable,
--	.suspend	= aqr107_suspend,
--	.resume		= aqr107_resume,
--	.get_sset_count	= aqr107_get_sset_count,
--	.get_strings	= aqr107_get_strings,
--	.get_stats	= aqr107_get_stats,
--	.link_change_notify = aqr107_link_change_notify,
--},
--{
--	PHY_ID_MATCH_MODEL(PHY_ID_AQR405),
--	.name		= "Aquantia AQR405",
--	.config_aneg    = aqr_config_aneg,
--	.config_intr	= aqr_config_intr,
--	.handle_interrupt = aqr_handle_interrupt,
--	.read_status	= aqr_read_status,
--},
--{
--	PHY_ID_MATCH_MODEL(PHY_ID_AQR112),
--	.name		= "Aquantia AQR112",
--	.probe		= aqr107_probe,
--	.config_aneg    = aqr_config_aneg,
--	.config_intr	= aqr_config_intr,
--	.handle_interrupt = aqr_handle_interrupt,
--	.get_tunable    = aqr107_get_tunable,
--	.set_tunable    = aqr107_set_tunable,
--	.suspend	= aqr107_suspend,
--	.resume		= aqr107_resume,
--	.read_status	= aqr107_read_status,
--	.get_rate_matching = aqr107_get_rate_matching,
--	.get_sset_count = aqr107_get_sset_count,
--	.get_strings	= aqr107_get_strings,
--	.get_stats	= aqr107_get_stats,
--	.link_change_notify = aqr107_link_change_notify,
--},
--{
--	PHY_ID_MATCH_MODEL(PHY_ID_AQR412),
--	.name		= "Aquantia AQR412",
--	.probe		= aqr107_probe,
--	.config_aneg    = aqr_config_aneg,
--	.config_intr	= aqr_config_intr,
--	.handle_interrupt = aqr_handle_interrupt,
--	.get_tunable    = aqr107_get_tunable,
--	.set_tunable    = aqr107_set_tunable,
--	.suspend	= aqr107_suspend,
--	.resume		= aqr107_resume,
--	.read_status	= aqr107_read_status,
--	.get_rate_matching = aqr107_get_rate_matching,
--	.get_sset_count = aqr107_get_sset_count,
--	.get_strings	= aqr107_get_strings,
--	.get_stats	= aqr107_get_stats,
--	.link_change_notify = aqr107_link_change_notify,
--},
--{
--	PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
--	.name           = "Aquantia AQR113C",
--	.probe          = aqr107_probe,
--	.get_rate_matching = aqr107_get_rate_matching,
--	.config_init    = aqr107_config_init,
--	.config_aneg    = aqr_config_aneg,
--	.config_intr    = aqr_config_intr,
--	.handle_interrupt       = aqr_handle_interrupt,
--	.read_status    = aqr107_read_status,
--	.get_tunable    = aqr107_get_tunable,
--	.set_tunable    = aqr107_set_tunable,
--	.suspend        = aqr107_suspend,
--	.resume         = aqr107_resume,
--	.get_sset_count = aqr107_get_sset_count,
--	.get_strings    = aqr107_get_strings,
--	.get_stats      = aqr107_get_stats,
--	.link_change_notify = aqr107_link_change_notify,
--},
--};
--
--module_phy_driver(aqr_driver);
--
--static struct mdio_device_id __maybe_unused aqr_tbl[] = {
--	{ PHY_ID_MATCH_MODEL(PHY_ID_AQ1202) },
--	{ PHY_ID_MATCH_MODEL(PHY_ID_AQ2104) },
--	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR105) },
--	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR106) },
--	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR107) },
--	{ PHY_ID_MATCH_MODEL(PHY_ID_AQCS109) },
--	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR405) },
--	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR112) },
--	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR412) },
--	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) },
--	{ }
--};
--
--MODULE_DEVICE_TABLE(mdio, aqr_tbl);
--
--MODULE_DESCRIPTION("Aquantia PHY driver");
--MODULE_AUTHOR("Shaohui Xie <[email protected]>");
--MODULE_LICENSE("GPL v2");

+ 0 - 183
target/linux/generic/backport-6.6/702-02-v6.7-net-phy-aquantia-move-MMD_VEND-define-to-header.patch

@@ -1,183 +0,0 @@
-From e1fbfa4a995d42e02e22b0dff2f8b4fdee1504b3 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <[email protected]>
-Date: Tue, 14 Nov 2023 15:08:42 +0100
-Subject: [PATCH 2/3] net: phy: aquantia: move MMD_VEND define to header
-
-Move MMD_VEND define to header to clean things up and in preparation for
-firmware loading support that require some define placed in
-aquantia_main.
-
-Signed-off-by: Christian Marangi <[email protected]>
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: David S. Miller <[email protected]>
----
- drivers/net/phy/aquantia/aquantia.h       | 69 +++++++++++++++++++++++
- drivers/net/phy/aquantia/aquantia_hwmon.c | 14 -----
- drivers/net/phy/aquantia/aquantia_main.c  | 55 ------------------
- 3 files changed, 69 insertions(+), 69 deletions(-)
-
---- a/drivers/net/phy/aquantia/aquantia.h
-+++ b/drivers/net/phy/aquantia/aquantia.h
-@@ -9,6 +9,75 @@
- #include <linux/device.h>
- #include <linux/phy.h>
- 
-+/* Vendor specific 1, MDIO_MMD_VEND1 */
-+#define VEND1_GLOBAL_FW_ID			0x0020
-+#define VEND1_GLOBAL_FW_ID_MAJOR		GENMASK(15, 8)
-+#define VEND1_GLOBAL_FW_ID_MINOR		GENMASK(7, 0)
-+
-+/* The following registers all have similar layouts; first the registers... */
-+#define VEND1_GLOBAL_CFG_10M			0x0310
-+#define VEND1_GLOBAL_CFG_100M			0x031b
-+#define VEND1_GLOBAL_CFG_1G			0x031c
-+#define VEND1_GLOBAL_CFG_2_5G			0x031d
-+#define VEND1_GLOBAL_CFG_5G			0x031e
-+#define VEND1_GLOBAL_CFG_10G			0x031f
-+/* ...and now the fields */
-+#define VEND1_GLOBAL_CFG_RATE_ADAPT		GENMASK(8, 7)
-+#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE	0
-+#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX		1
-+#define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE	2
-+
-+/* Vendor specific 1, MDIO_MMD_VEND2 */
-+#define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL	0xc421
-+#define VEND1_THERMAL_PROV_LOW_TEMP_FAIL	0xc422
-+#define VEND1_THERMAL_PROV_HIGH_TEMP_WARN	0xc423
-+#define VEND1_THERMAL_PROV_LOW_TEMP_WARN	0xc424
-+#define VEND1_THERMAL_STAT1			0xc820
-+#define VEND1_THERMAL_STAT2			0xc821
-+#define VEND1_THERMAL_STAT2_VALID		BIT(0)
-+#define VEND1_GENERAL_STAT1			0xc830
-+#define VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL	BIT(14)
-+#define VEND1_GENERAL_STAT1_LOW_TEMP_FAIL	BIT(13)
-+#define VEND1_GENERAL_STAT1_HIGH_TEMP_WARN	BIT(12)
-+#define VEND1_GENERAL_STAT1_LOW_TEMP_WARN	BIT(11)
-+
-+#define VEND1_GLOBAL_GEN_STAT2			0xc831
-+#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG	BIT(15)
-+
-+#define VEND1_GLOBAL_RSVD_STAT1			0xc885
-+#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID	GENMASK(7, 4)
-+#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID		GENMASK(3, 0)
-+
-+#define VEND1_GLOBAL_RSVD_STAT9			0xc88d
-+#define VEND1_GLOBAL_RSVD_STAT9_MODE		GENMASK(7, 0)
-+#define VEND1_GLOBAL_RSVD_STAT9_1000BT2		0x23
-+
-+#define VEND1_GLOBAL_INT_STD_STATUS		0xfc00
-+#define VEND1_GLOBAL_INT_VEND_STATUS		0xfc01
-+
-+#define VEND1_GLOBAL_INT_STD_MASK		0xff00
-+#define VEND1_GLOBAL_INT_STD_MASK_PMA1		BIT(15)
-+#define VEND1_GLOBAL_INT_STD_MASK_PMA2		BIT(14)
-+#define VEND1_GLOBAL_INT_STD_MASK_PCS1		BIT(13)
-+#define VEND1_GLOBAL_INT_STD_MASK_PCS2		BIT(12)
-+#define VEND1_GLOBAL_INT_STD_MASK_PCS3		BIT(11)
-+#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS1	BIT(10)
-+#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS2	BIT(9)
-+#define VEND1_GLOBAL_INT_STD_MASK_AN1		BIT(8)
-+#define VEND1_GLOBAL_INT_STD_MASK_AN2		BIT(7)
-+#define VEND1_GLOBAL_INT_STD_MASK_GBE		BIT(6)
-+#define VEND1_GLOBAL_INT_STD_MASK_ALL		BIT(0)
-+
-+#define VEND1_GLOBAL_INT_VEND_MASK		0xff01
-+#define VEND1_GLOBAL_INT_VEND_MASK_PMA		BIT(15)
-+#define VEND1_GLOBAL_INT_VEND_MASK_PCS		BIT(14)
-+#define VEND1_GLOBAL_INT_VEND_MASK_PHY_XS	BIT(13)
-+#define VEND1_GLOBAL_INT_VEND_MASK_AN		BIT(12)
-+#define VEND1_GLOBAL_INT_VEND_MASK_GBE		BIT(11)
-+#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL1	BIT(2)
-+#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2	BIT(1)
-+#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3	BIT(0)
-+
- #if IS_REACHABLE(CONFIG_HWMON)
- int aqr_hwmon_probe(struct phy_device *phydev);
- #else
---- a/drivers/net/phy/aquantia/aquantia_hwmon.c
-+++ b/drivers/net/phy/aquantia/aquantia_hwmon.c
-@@ -13,20 +13,6 @@
- 
- #include "aquantia.h"
- 
--/* Vendor specific 1, MDIO_MMD_VEND2 */
--#define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL	0xc421
--#define VEND1_THERMAL_PROV_LOW_TEMP_FAIL	0xc422
--#define VEND1_THERMAL_PROV_HIGH_TEMP_WARN	0xc423
--#define VEND1_THERMAL_PROV_LOW_TEMP_WARN	0xc424
--#define VEND1_THERMAL_STAT1			0xc820
--#define VEND1_THERMAL_STAT2			0xc821
--#define VEND1_THERMAL_STAT2_VALID		BIT(0)
--#define VEND1_GENERAL_STAT1			0xc830
--#define VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL	BIT(14)
--#define VEND1_GENERAL_STAT1_LOW_TEMP_FAIL	BIT(13)
--#define VEND1_GENERAL_STAT1_HIGH_TEMP_WARN	BIT(12)
--#define VEND1_GENERAL_STAT1_LOW_TEMP_WARN	BIT(11)
--
- #if IS_REACHABLE(CONFIG_HWMON)
- 
- static umode_t aqr_hwmon_is_visible(const void *data,
---- a/drivers/net/phy/aquantia/aquantia_main.c
-+++ b/drivers/net/phy/aquantia/aquantia_main.c
-@@ -91,61 +91,6 @@
- #define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR	0xd31a
- #define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES		0xd31b
- 
--/* Vendor specific 1, MDIO_MMD_VEND1 */
--#define VEND1_GLOBAL_FW_ID			0x0020
--#define VEND1_GLOBAL_FW_ID_MAJOR		GENMASK(15, 8)
--#define VEND1_GLOBAL_FW_ID_MINOR		GENMASK(7, 0)
--
--#define VEND1_GLOBAL_GEN_STAT2			0xc831
--#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG	BIT(15)
--
--/* The following registers all have similar layouts; first the registers... */
--#define VEND1_GLOBAL_CFG_10M			0x0310
--#define VEND1_GLOBAL_CFG_100M			0x031b
--#define VEND1_GLOBAL_CFG_1G			0x031c
--#define VEND1_GLOBAL_CFG_2_5G			0x031d
--#define VEND1_GLOBAL_CFG_5G			0x031e
--#define VEND1_GLOBAL_CFG_10G			0x031f
--/* ...and now the fields */
--#define VEND1_GLOBAL_CFG_RATE_ADAPT		GENMASK(8, 7)
--#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE	0
--#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX		1
--#define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE	2
--
--#define VEND1_GLOBAL_RSVD_STAT1			0xc885
--#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID	GENMASK(7, 4)
--#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID		GENMASK(3, 0)
--
--#define VEND1_GLOBAL_RSVD_STAT9			0xc88d
--#define VEND1_GLOBAL_RSVD_STAT9_MODE		GENMASK(7, 0)
--#define VEND1_GLOBAL_RSVD_STAT9_1000BT2		0x23
--
--#define VEND1_GLOBAL_INT_STD_STATUS		0xfc00
--#define VEND1_GLOBAL_INT_VEND_STATUS		0xfc01
--
--#define VEND1_GLOBAL_INT_STD_MASK		0xff00
--#define VEND1_GLOBAL_INT_STD_MASK_PMA1		BIT(15)
--#define VEND1_GLOBAL_INT_STD_MASK_PMA2		BIT(14)
--#define VEND1_GLOBAL_INT_STD_MASK_PCS1		BIT(13)
--#define VEND1_GLOBAL_INT_STD_MASK_PCS2		BIT(12)
--#define VEND1_GLOBAL_INT_STD_MASK_PCS3		BIT(11)
--#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS1	BIT(10)
--#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS2	BIT(9)
--#define VEND1_GLOBAL_INT_STD_MASK_AN1		BIT(8)
--#define VEND1_GLOBAL_INT_STD_MASK_AN2		BIT(7)
--#define VEND1_GLOBAL_INT_STD_MASK_GBE		BIT(6)
--#define VEND1_GLOBAL_INT_STD_MASK_ALL		BIT(0)
--
--#define VEND1_GLOBAL_INT_VEND_MASK		0xff01
--#define VEND1_GLOBAL_INT_VEND_MASK_PMA		BIT(15)
--#define VEND1_GLOBAL_INT_VEND_MASK_PCS		BIT(14)
--#define VEND1_GLOBAL_INT_VEND_MASK_PHY_XS	BIT(13)
--#define VEND1_GLOBAL_INT_VEND_MASK_AN		BIT(12)
--#define VEND1_GLOBAL_INT_VEND_MASK_GBE		BIT(11)
--#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL1	BIT(2)
--#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2	BIT(1)
--#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3	BIT(0)
--
- /* Sleep and timeout for checking if the Processor-Intensive
-  * MDIO operation is finished
-  */

+ 0 - 504
target/linux/generic/backport-6.6/702-03-v6.7-net-phy-aquantia-add-firmware-load-support.patch

@@ -1,504 +0,0 @@
-From e93984ebc1c82bd34f7a1b3391efaceee0a8ae96 Mon Sep 17 00:00:00 2001
-From: Robert Marko <[email protected]>
-Date: Tue, 14 Nov 2023 15:08:43 +0100
-Subject: [PATCH 3/3] net: phy: aquantia: add firmware load support
-
-Aquantia PHY-s require firmware to be loaded before they start operating.
-It can be automatically loaded in case when there is a SPI-NOR connected
-to Aquantia PHY-s or can be loaded from the host via MDIO.
-
-This patch adds support for loading the firmware via MDIO as in most cases
-there is no SPI-NOR being used to save on cost.
-Firmware loading code itself is ported from mainline U-boot with cleanups.
-
-The firmware has mixed values both in big and little endian.
-PHY core itself is big-endian but it expects values to be in little-endian.
-The firmware is little-endian but CRC-16 value for it is stored at the end
-of firmware in big-endian.
-
-It seems the PHY does the conversion internally from firmware that is
-little-endian to the PHY that is big-endian on using the mailbox
-but mailbox returns a big-endian CRC-16 to verify the written data
-integrity.
-
-Co-developed-by: Christian Marangi <[email protected]>
-Signed-off-by: Robert Marko <[email protected]>
-Signed-off-by: Christian Marangi <[email protected]>
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: David S. Miller <[email protected]>
----
- drivers/net/phy/aquantia/Kconfig             |   1 +
- drivers/net/phy/aquantia/Makefile            |   2 +-
- drivers/net/phy/aquantia/aquantia.h          |  32 ++
- drivers/net/phy/aquantia/aquantia_firmware.c | 370 +++++++++++++++++++
- drivers/net/phy/aquantia/aquantia_main.c     |   6 +
- 5 files changed, 410 insertions(+), 1 deletion(-)
- create mode 100644 drivers/net/phy/aquantia/aquantia_firmware.c
-
---- a/drivers/net/phy/aquantia/Kconfig
-+++ b/drivers/net/phy/aquantia/Kconfig
-@@ -1,5 +1,6 @@
- # SPDX-License-Identifier: GPL-2.0-only
- config AQUANTIA_PHY
- 	tristate "Aquantia PHYs"
-+	select CRC_CCITT
- 	help
- 	  Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
---- a/drivers/net/phy/aquantia/Makefile
-+++ b/drivers/net/phy/aquantia/Makefile
-@@ -1,5 +1,5 @@
- # SPDX-License-Identifier: GPL-2.0
--aquantia-objs			+= aquantia_main.o
-+aquantia-objs			+= aquantia_main.o aquantia_firmware.o
- ifdef CONFIG_HWMON
- aquantia-objs			+= aquantia_hwmon.o
- endif
---- a/drivers/net/phy/aquantia/aquantia.h
-+++ b/drivers/net/phy/aquantia/aquantia.h
-@@ -10,10 +10,35 @@
- #include <linux/phy.h>
- 
- /* Vendor specific 1, MDIO_MMD_VEND1 */
-+#define VEND1_GLOBAL_SC				0x0
-+#define VEND1_GLOBAL_SC_SOFT_RESET		BIT(15)
-+#define VEND1_GLOBAL_SC_LOW_POWER		BIT(11)
-+
- #define VEND1_GLOBAL_FW_ID			0x0020
- #define VEND1_GLOBAL_FW_ID_MAJOR		GENMASK(15, 8)
- #define VEND1_GLOBAL_FW_ID_MINOR		GENMASK(7, 0)
- 
-+#define VEND1_GLOBAL_MAILBOX_INTERFACE1			0x0200
-+#define VEND1_GLOBAL_MAILBOX_INTERFACE1_EXECUTE		BIT(15)
-+#define VEND1_GLOBAL_MAILBOX_INTERFACE1_WRITE		BIT(14)
-+#define VEND1_GLOBAL_MAILBOX_INTERFACE1_CRC_RESET	BIT(12)
-+#define VEND1_GLOBAL_MAILBOX_INTERFACE1_BUSY		BIT(8)
-+
-+#define VEND1_GLOBAL_MAILBOX_INTERFACE2			0x0201
-+#define VEND1_GLOBAL_MAILBOX_INTERFACE3			0x0202
-+#define VEND1_GLOBAL_MAILBOX_INTERFACE3_MSW_ADDR_MASK	GENMASK(15, 0)
-+#define VEND1_GLOBAL_MAILBOX_INTERFACE3_MSW_ADDR(x)	FIELD_PREP(VEND1_GLOBAL_MAILBOX_INTERFACE3_MSW_ADDR_MASK, (u16)((x) >> 16))
-+#define VEND1_GLOBAL_MAILBOX_INTERFACE4			0x0203
-+#define VEND1_GLOBAL_MAILBOX_INTERFACE4_LSW_ADDR_MASK	GENMASK(15, 2)
-+#define VEND1_GLOBAL_MAILBOX_INTERFACE4_LSW_ADDR(x)	FIELD_PREP(VEND1_GLOBAL_MAILBOX_INTERFACE4_LSW_ADDR_MASK, (u16)(x))
-+
-+#define VEND1_GLOBAL_MAILBOX_INTERFACE5			0x0204
-+#define VEND1_GLOBAL_MAILBOX_INTERFACE5_MSW_DATA_MASK	GENMASK(15, 0)
-+#define VEND1_GLOBAL_MAILBOX_INTERFACE5_MSW_DATA(x)	FIELD_PREP(VEND1_GLOBAL_MAILBOX_INTERFACE5_MSW_DATA_MASK, (u16)((x) >> 16))
-+#define VEND1_GLOBAL_MAILBOX_INTERFACE6			0x0205
-+#define VEND1_GLOBAL_MAILBOX_INTERFACE6_LSW_DATA_MASK	GENMASK(15, 0)
-+#define VEND1_GLOBAL_MAILBOX_INTERFACE6_LSW_DATA(x)	FIELD_PREP(VEND1_GLOBAL_MAILBOX_INTERFACE6_LSW_DATA_MASK, (u16)(x))
-+
- /* The following registers all have similar layouts; first the registers... */
- #define VEND1_GLOBAL_CFG_10M			0x0310
- #define VEND1_GLOBAL_CFG_100M			0x031b
-@@ -28,6 +53,11 @@
- #define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE	2
- 
- /* Vendor specific 1, MDIO_MMD_VEND2 */
-+#define VEND1_GLOBAL_CONTROL2			0xc001
-+#define VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_RST	BIT(15)
-+#define VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD	BIT(6)
-+#define VEND1_GLOBAL_CONTROL2_UP_RUN_STALL	BIT(0)
-+
- #define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL	0xc421
- #define VEND1_THERMAL_PROV_LOW_TEMP_FAIL	0xc422
- #define VEND1_THERMAL_PROV_HIGH_TEMP_WARN	0xc423
-@@ -83,3 +113,5 @@ int aqr_hwmon_probe(struct phy_device *p
- #else
- static inline int aqr_hwmon_probe(struct phy_device *phydev) { return 0; }
- #endif
-+
-+int aqr_firmware_load(struct phy_device *phydev);
---- /dev/null
-+++ b/drivers/net/phy/aquantia/aquantia_firmware.c
-@@ -0,0 +1,370 @@
-+// SPDX-License-Identifier: GPL-2.0
-+
-+#include <linux/bitfield.h>
-+#include <linux/of.h>
-+#include <linux/firmware.h>
-+#include <linux/crc-ccitt.h>
-+#include <linux/nvmem-consumer.h>
-+
-+#include <asm/unaligned.h>
-+
-+#include "aquantia.h"
-+
-+#define UP_RESET_SLEEP		100
-+
-+/* addresses of memory segments in the phy */
-+#define DRAM_BASE_ADDR		0x3FFE0000
-+#define IRAM_BASE_ADDR		0x40000000
-+
-+/* firmware image format constants */
-+#define VERSION_STRING_SIZE		0x40
-+#define VERSION_STRING_OFFSET		0x0200
-+/* primary offset is written at an offset from the start of the fw blob */
-+#define PRIMARY_OFFSET_OFFSET		0x8
-+/* primary offset needs to be then added to a base offset */
-+#define PRIMARY_OFFSET_SHIFT		12
-+#define PRIMARY_OFFSET(x)		((x) << PRIMARY_OFFSET_SHIFT)
-+#define HEADER_OFFSET			0x300
-+
-+struct aqr_fw_header {
-+	u32 padding;
-+	u8 iram_offset[3];
-+	u8 iram_size[3];
-+	u8 dram_offset[3];
-+	u8 dram_size[3];
-+} __packed;
-+
-+enum aqr_fw_src {
-+	AQR_FW_SRC_NVMEM = 0,
-+	AQR_FW_SRC_FS,
-+};
-+
-+static const char * const aqr_fw_src_string[] = {
-+	[AQR_FW_SRC_NVMEM] = "NVMEM",
-+	[AQR_FW_SRC_FS] = "FS",
-+};
-+
-+/* AQR firmware doesn't have fixed offsets for iram and dram section
-+ * but instead provide an header with the offset to use on reading
-+ * and parsing the firmware.
-+ *
-+ * AQR firmware can't be trusted and each offset is validated to be
-+ * not negative and be in the size of the firmware itself.
-+ */
-+static bool aqr_fw_validate_get(size_t size, size_t offset, size_t get_size)
-+{
-+	return offset + get_size <= size;
-+}
-+
-+static int aqr_fw_get_be16(const u8 *data, size_t offset, size_t size, u16 *value)
-+{
-+	if (!aqr_fw_validate_get(size, offset, sizeof(u16)))
-+		return -EINVAL;
-+
-+	*value = get_unaligned_be16(data + offset);
-+
-+	return 0;
-+}
-+
-+static int aqr_fw_get_le16(const u8 *data, size_t offset, size_t size, u16 *value)
-+{
-+	if (!aqr_fw_validate_get(size, offset, sizeof(u16)))
-+		return -EINVAL;
-+
-+	*value = get_unaligned_le16(data + offset);
-+
-+	return 0;
-+}
-+
-+static int aqr_fw_get_le24(const u8 *data, size_t offset, size_t size, u32 *value)
-+{
-+	if (!aqr_fw_validate_get(size, offset, sizeof(u8) * 3))
-+		return -EINVAL;
-+
-+	*value = get_unaligned_le24(data + offset);
-+
-+	return 0;
-+}
-+
-+/* load data into the phy's memory */
-+static int aqr_fw_load_memory(struct phy_device *phydev, u32 addr,
-+			      const u8 *data, size_t len)
-+{
-+	u16 crc = 0, up_crc;
-+	size_t pos;
-+
-+	/* PHY expect addr in LE */
-+	addr = (__force u32)cpu_to_le32(addr);
-+
-+	phy_write_mmd(phydev, MDIO_MMD_VEND1,
-+		      VEND1_GLOBAL_MAILBOX_INTERFACE1,
-+		      VEND1_GLOBAL_MAILBOX_INTERFACE1_CRC_RESET);
-+	phy_write_mmd(phydev, MDIO_MMD_VEND1,
-+		      VEND1_GLOBAL_MAILBOX_INTERFACE3,
-+		      VEND1_GLOBAL_MAILBOX_INTERFACE3_MSW_ADDR(addr));
-+	phy_write_mmd(phydev, MDIO_MMD_VEND1,
-+		      VEND1_GLOBAL_MAILBOX_INTERFACE4,
-+		      VEND1_GLOBAL_MAILBOX_INTERFACE4_LSW_ADDR(addr));
-+
-+	/* We assume and enforce the size to be word aligned.
-+	 * If a firmware that is not word aligned is found, please report upstream.
-+	 */
-+	for (pos = 0; pos < len; pos += sizeof(u32)) {
-+		u32 word;
-+
-+		/* FW data is always stored in little-endian */
-+		word = get_unaligned((const u32 *)(data + pos));
-+
-+		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_MAILBOX_INTERFACE5,
-+			      VEND1_GLOBAL_MAILBOX_INTERFACE5_MSW_DATA(word));
-+		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_MAILBOX_INTERFACE6,
-+			      VEND1_GLOBAL_MAILBOX_INTERFACE6_LSW_DATA(word));
-+
-+		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_MAILBOX_INTERFACE1,
-+			      VEND1_GLOBAL_MAILBOX_INTERFACE1_EXECUTE |
-+			      VEND1_GLOBAL_MAILBOX_INTERFACE1_WRITE);
-+
-+		/* calculate CRC as we load data to the mailbox.
-+		 * We convert word to big-endian as PHY is BE and mailbox will
-+		 * return a BE CRC.
-+		 */
-+		word = (__force u32)cpu_to_be32(word);
-+		crc = crc_ccitt_false(crc, (u8 *)&word, sizeof(word));
-+	}
-+
-+	up_crc = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_MAILBOX_INTERFACE2);
-+	if (crc != up_crc) {
-+		phydev_err(phydev, "CRC mismatch: calculated 0x%04x PHY 0x%04x\n",
-+			   crc, up_crc);
-+		return -EINVAL;
-+	}
-+
-+	return 0;
-+}
-+
-+static int aqr_fw_boot(struct phy_device *phydev, const u8 *data, size_t size,
-+		       enum aqr_fw_src fw_src)
-+{
-+	u16 calculated_crc, read_crc, read_primary_offset;
-+	u32 iram_offset = 0, iram_size = 0;
-+	u32 dram_offset = 0, dram_size = 0;
-+	char version[VERSION_STRING_SIZE];
-+	u32 primary_offset = 0;
-+	int ret;
-+
-+	/* extract saved CRC at the end of the fw
-+	 * CRC is saved in big-endian as PHY is BE
-+	 */
-+	ret = aqr_fw_get_be16(data, size - sizeof(u16), size, &read_crc);
-+	if (ret) {
-+		phydev_err(phydev, "bad firmware CRC in firmware\n");
-+		return ret;
-+	}
-+	calculated_crc = crc_ccitt_false(0, data, size - sizeof(u16));
-+	if (read_crc != calculated_crc) {
-+		phydev_err(phydev, "bad firmware CRC: file 0x%04x calculated 0x%04x\n",
-+			   read_crc, calculated_crc);
-+		return -EINVAL;
-+	}
-+
-+	/* Get the primary offset to extract DRAM and IRAM sections. */
-+	ret = aqr_fw_get_le16(data, PRIMARY_OFFSET_OFFSET, size, &read_primary_offset);
-+	if (ret) {
-+		phydev_err(phydev, "bad primary offset in firmware\n");
-+		return ret;
-+	}
-+	primary_offset = PRIMARY_OFFSET(read_primary_offset);
-+
-+	/* Find the DRAM and IRAM sections within the firmware file.
-+	 * Make sure the fw_header is correctly in the firmware.
-+	 */
-+	if (!aqr_fw_validate_get(size, primary_offset + HEADER_OFFSET,
-+				 sizeof(struct aqr_fw_header))) {
-+		phydev_err(phydev, "bad fw_header in firmware\n");
-+		return -EINVAL;
-+	}
-+
-+	/* offset are in LE and values needs to be converted to cpu endian */
-+	ret = aqr_fw_get_le24(data, primary_offset + HEADER_OFFSET +
-+			      offsetof(struct aqr_fw_header, iram_offset),
-+			      size, &iram_offset);
-+	if (ret) {
-+		phydev_err(phydev, "bad iram offset in firmware\n");
-+		return ret;
-+	}
-+	ret = aqr_fw_get_le24(data, primary_offset + HEADER_OFFSET +
-+			      offsetof(struct aqr_fw_header, iram_size),
-+			      size, &iram_size);
-+	if (ret) {
-+		phydev_err(phydev, "invalid iram size in firmware\n");
-+		return ret;
-+	}
-+	ret = aqr_fw_get_le24(data, primary_offset + HEADER_OFFSET +
-+			      offsetof(struct aqr_fw_header, dram_offset),
-+			      size, &dram_offset);
-+	if (ret) {
-+		phydev_err(phydev, "bad dram offset in firmware\n");
-+		return ret;
-+	}
-+	ret = aqr_fw_get_le24(data, primary_offset + HEADER_OFFSET +
-+			      offsetof(struct aqr_fw_header, dram_size),
-+			      size, &dram_size);
-+	if (ret) {
-+		phydev_err(phydev, "invalid dram size in firmware\n");
-+		return ret;
-+	}
-+
-+	/* Increment the offset with the primary offset.
-+	 * Validate iram/dram offset and size.
-+	 */
-+	iram_offset += primary_offset;
-+	if (iram_size % sizeof(u32)) {
-+		phydev_err(phydev, "iram size if not aligned to word size. Please report this upstream!\n");
-+		return -EINVAL;
-+	}
-+	if (!aqr_fw_validate_get(size, iram_offset, iram_size)) {
-+		phydev_err(phydev, "invalid iram offset for iram size\n");
-+		return -EINVAL;
-+	}
-+
-+	dram_offset += primary_offset;
-+	if (dram_size % sizeof(u32)) {
-+		phydev_err(phydev, "dram size if not aligned to word size. Please report this upstream!\n");
-+		return -EINVAL;
-+	}
-+	if (!aqr_fw_validate_get(size, dram_offset, dram_size)) {
-+		phydev_err(phydev, "invalid iram offset for iram size\n");
-+		return -EINVAL;
-+	}
-+
-+	phydev_dbg(phydev, "primary %d IRAM offset=%d size=%d DRAM offset=%d size=%d\n",
-+		   primary_offset, iram_offset, iram_size, dram_offset, dram_size);
-+
-+	if (!aqr_fw_validate_get(size, dram_offset + VERSION_STRING_OFFSET,
-+				 VERSION_STRING_SIZE)) {
-+		phydev_err(phydev, "invalid version in firmware\n");
-+		return -EINVAL;
-+	}
-+	strscpy(version, (char *)data + dram_offset + VERSION_STRING_OFFSET,
-+		VERSION_STRING_SIZE);
-+	if (version[0] == '\0') {
-+		phydev_err(phydev, "invalid version in firmware\n");
-+		return -EINVAL;
-+	}
-+	phydev_info(phydev, "loading firmware version '%s' from '%s'\n", version,
-+		    aqr_fw_src_string[fw_src]);
-+
-+	/* stall the microcprocessor */
-+	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_CONTROL2,
-+		      VEND1_GLOBAL_CONTROL2_UP_RUN_STALL | VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD);
-+
-+	phydev_dbg(phydev, "loading DRAM 0x%08x from offset=%d size=%d\n",
-+		   DRAM_BASE_ADDR, dram_offset, dram_size);
-+	ret = aqr_fw_load_memory(phydev, DRAM_BASE_ADDR, data + dram_offset,
-+				 dram_size);
-+	if (ret)
-+		return ret;
-+
-+	phydev_dbg(phydev, "loading IRAM 0x%08x from offset=%d size=%d\n",
-+		   IRAM_BASE_ADDR, iram_offset, iram_size);
-+	ret = aqr_fw_load_memory(phydev, IRAM_BASE_ADDR, data + iram_offset,
-+				 iram_size);
-+	if (ret)
-+		return ret;
-+
-+	/* make sure soft reset and low power mode are clear */
-+	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_SC,
-+			   VEND1_GLOBAL_SC_SOFT_RESET | VEND1_GLOBAL_SC_LOW_POWER);
-+
-+	/* Release the microprocessor. UP_RESET must be held for 100 usec. */
-+	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_CONTROL2,
-+		      VEND1_GLOBAL_CONTROL2_UP_RUN_STALL |
-+		      VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD |
-+		      VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_RST);
-+	usleep_range(UP_RESET_SLEEP, UP_RESET_SLEEP * 2);
-+
-+	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_CONTROL2,
-+		      VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD);
-+
-+	return 0;
-+}
-+
-+static int aqr_firmware_load_nvmem(struct phy_device *phydev)
-+{
-+	struct nvmem_cell *cell;
-+	size_t size;
-+	u8 *buf;
-+	int ret;
-+
-+	cell = nvmem_cell_get(&phydev->mdio.dev, "firmware");
-+	if (IS_ERR(cell))
-+		return PTR_ERR(cell);
-+
-+	buf = nvmem_cell_read(cell, &size);
-+	if (IS_ERR(buf)) {
-+		ret = PTR_ERR(buf);
-+		goto exit;
-+	}
-+
-+	ret = aqr_fw_boot(phydev, buf, size, AQR_FW_SRC_NVMEM);
-+	if (ret)
-+		phydev_err(phydev, "firmware loading failed: %d\n", ret);
-+
-+	kfree(buf);
-+exit:
-+	nvmem_cell_put(cell);
-+
-+	return ret;
-+}
-+
-+static int aqr_firmware_load_fs(struct phy_device *phydev)
-+{
-+	struct device *dev = &phydev->mdio.dev;
-+	const struct firmware *fw;
-+	const char *fw_name;
-+	int ret;
-+
-+	ret = of_property_read_string(dev->of_node, "firmware-name",
-+				      &fw_name);
-+	if (ret)
-+		return ret;
-+
-+	ret = request_firmware(&fw, fw_name, dev);
-+	if (ret) {
-+		phydev_err(phydev, "failed to find FW file %s (%d)\n",
-+			   fw_name, ret);
-+		return ret;
-+	}
-+
-+	ret = aqr_fw_boot(phydev, fw->data, fw->size, AQR_FW_SRC_FS);
-+	if (ret)
-+		phydev_err(phydev, "firmware loading failed: %d\n", ret);
-+
-+	release_firmware(fw);
-+
-+	return ret;
-+}
-+
-+int aqr_firmware_load(struct phy_device *phydev)
-+{
-+	int ret;
-+
-+	/* Check if the firmware is not already loaded by pooling
-+	 * the current version returned by the PHY. If 0 is returned,
-+	 * no firmware is loaded.
-+	 */
-+	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
-+	if (ret > 0)
-+		goto exit;
-+
-+	ret = aqr_firmware_load_nvmem(phydev);
-+	if (!ret)
-+		goto exit;
-+
-+	ret = aqr_firmware_load_fs(phydev);
-+	if (ret)
-+		return ret;
-+
-+exit:
-+	return 0;
-+}
---- a/drivers/net/phy/aquantia/aquantia_main.c
-+++ b/drivers/net/phy/aquantia/aquantia_main.c
-@@ -658,11 +658,17 @@ static int aqr107_resume(struct phy_devi
- 
- static int aqr107_probe(struct phy_device *phydev)
- {
-+	int ret;
-+
- 	phydev->priv = devm_kzalloc(&phydev->mdio.dev,
- 				    sizeof(struct aqr107_priv), GFP_KERNEL);
- 	if (!phydev->priv)
- 		return -ENOMEM;
- 
-+	ret = aqr_firmware_load(phydev);
-+	if (ret)
-+		return ret;
-+
- 	return aqr_hwmon_probe(phydev);
- }
- 

+ 0 - 101
target/linux/generic/backport-6.6/703-v6.10-flow_offload-add-control-flag-checking-helpers.patch

@@ -1,101 +0,0 @@
-From d11e63119432bdb55065d094cb6fd37e9147c70d Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Asbj=C3=B8rn=20Sloth=20T=C3=B8nnesen?= <[email protected]>
-Date: Thu, 11 Apr 2024 10:52:54 +0000
-Subject: [PATCH] flow_offload: add control flag checking helpers
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-These helpers aim to help drivers, with checking
-for the presence of unsupported control flags.
-
-For drivers supporting at least one control flag:
-  flow_rule_is_supp_control_flags()
-
-For drivers using flow_rule_match_control(), but not using flags:
-  flow_rule_has_control_flags()
-
-For drivers not using flow_rule_match_control():
-  flow_rule_match_has_control_flags()
-
-While primarily aimed at FLOW_DISSECTOR_KEY_CONTROL
-and flow_rule_match_control(), then the first two
-can also be used with FLOW_DISSECTOR_KEY_ENC_CONTROL
-and flow_rule_match_enc_control().
-
-These helpers mirrors the existing check done in sfc:
-  drivers/net/ethernet/sfc/tc.c +276
-
-Only compile-tested.
-
-Signed-off-by: Asbjørn Sloth Tønnesen <[email protected]>
-Reviewed-by: Louis Peens <[email protected]>
-Signed-off-by: David S. Miller <[email protected]>
----
- include/net/flow_offload.h | 55 ++++++++++++++++++++++++++++++++++++++
- 1 file changed, 55 insertions(+)
-
---- a/include/net/flow_offload.h
-+++ b/include/net/flow_offload.h
-@@ -449,6 +449,61 @@ static inline bool flow_rule_match_key(c
- 	return dissector_uses_key(rule->match.dissector, key);
- }
- 
-+/**
-+ * flow_rule_is_supp_control_flags() - check for supported control flags
-+ * @supp_flags: control flags supported by driver
-+ * @ctrl_flags: control flags present in rule
-+ * @extack: The netlink extended ACK for reporting errors.
-+ *
-+ * Return: true if only supported control flags are set, false otherwise.
-+ */
-+static inline bool flow_rule_is_supp_control_flags(const u32 supp_flags,
-+						   const u32 ctrl_flags,
-+						   struct netlink_ext_ack *extack)
-+{
-+	if (likely((ctrl_flags & ~supp_flags) == 0))
-+		return true;
-+
-+	NL_SET_ERR_MSG_FMT_MOD(extack,
-+			       "Unsupported match on control.flags %#x",
-+			       ctrl_flags);
-+
-+	return false;
-+}
-+
-+/**
-+ * flow_rule_has_control_flags() - check for presence of any control flags
-+ * @ctrl_flags: control flags present in rule
-+ * @extack: The netlink extended ACK for reporting errors.
-+ *
-+ * Return: true if control flags are set, false otherwise.
-+ */
-+static inline bool flow_rule_has_control_flags(const u32 ctrl_flags,
-+					       struct netlink_ext_ack *extack)
-+{
-+	return !flow_rule_is_supp_control_flags(0, ctrl_flags, extack);
-+}
-+
-+/**
-+ * flow_rule_match_has_control_flags() - match and check for any control flags
-+ * @rule: The flow_rule under evaluation.
-+ * @extack: The netlink extended ACK for reporting errors.
-+ *
-+ * Return: true if control flags are set, false otherwise.
-+ */
-+static inline bool flow_rule_match_has_control_flags(struct flow_rule *rule,
-+						     struct netlink_ext_ack *extack)
-+{
-+	struct flow_match_control match;
-+
-+	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL))
-+		return false;
-+
-+	flow_rule_match_control(rule, &match);
-+
-+	return flow_rule_has_control_flags(match.mask->flags, extack);
-+}
-+
- struct flow_stats {
- 	u64	pkts;
- 	u64	bytes;

+ 0 - 40
target/linux/generic/backport-6.6/704-v6.12-ipv6-Add-ipv6_addr_-cpu_to_be32-be32_to_cpu-helpers.patch

@@ -1,40 +0,0 @@
-From f40a455d01f80c6638be382d75cb1c4e7748d8af Mon Sep 17 00:00:00 2001
-From: Simon Horman <[email protected]>
-Date: Tue, 13 Aug 2024 14:33:47 +0100
-Subject: [PATCH] ipv6: Add ipv6_addr_{cpu_to_be32,be32_to_cpu} helpers
-
-Add helpers to convert an ipv6 addr, expressed as an array
-of words, from CPU to big-endian byte order, and vice versa.
-
-No functional change intended.
-Compile tested only.
-
-Suggested-by: Andrew Lunn <[email protected]>
-Link: https://lore.kernel.org/netdev/[email protected]/
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: Simon Horman <[email protected]>
-Link: https://patch.msgid.link/[email protected]
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- include/net/ipv6.h | 12 ++++++++++++
- 1 file changed, 12 insertions(+)
-
---- a/include/net/ipv6.h
-+++ b/include/net/ipv6.h
-@@ -1382,4 +1382,16 @@ static inline void ip6_sock_set_recvpkti
- 	release_sock(sk);
- }
- 
-+#define IPV6_ADDR_WORDS 4
-+
-+static inline void ipv6_addr_cpu_to_be32(__be32 *dst, const u32 *src)
-+{
-+	cpu_to_be32_array(dst, src, IPV6_ADDR_WORDS);
-+}
-+
-+static inline void ipv6_addr_be32_to_cpu(u32 *dst, const __be32 *src)
-+{
-+	be32_to_cpu_array(dst, src, IPV6_ADDR_WORDS);
-+}
-+
- #endif /* _NET_IPV6_H */

+ 0 - 69
target/linux/generic/backport-6.6/707-v6.8-02-net-phy-at803x-move-disable-WOL-to-specific-at8031-p.patch

@@ -1,69 +0,0 @@
-From 6a3b8c573b5a152a6aa7a0b54c5e18b84c6ba6f5 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <[email protected]>
-Date: Fri, 8 Dec 2023 15:51:49 +0100
-Subject: [PATCH 02/13] net: phy: at803x: move disable WOL to specific at8031
- probe
-
-Move the WOL disable call to specific at8031 probe to make at803x_probe
-more generic and drop extra check for PHY ID.
-
-Keep the same previous behaviour by first calling at803x_probe and then
-disabling WOL.
-
-Signed-off-by: Christian Marangi <[email protected]>
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: David S. Miller <[email protected]>
----
- drivers/net/phy/at803x.c | 27 +++++++++++++++++----------
- 1 file changed, 17 insertions(+), 10 deletions(-)
-
---- a/drivers/net/phy/at803x.c
-+++ b/drivers/net/phy/at803x.c
-@@ -886,15 +886,6 @@ static int at803x_probe(struct phy_devic
- 			priv->is_fiber = true;
- 			break;
- 		}
--
--		/* Disable WoL in 1588 register which is enabled
--		 * by default
--		 */
--		ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
--				     AT803X_PHY_MMD3_WOL_CTRL,
--				     AT803X_WOL_EN, 0);
--		if (ret)
--			return ret;
- 	}
- 
- 	return 0;
-@@ -1591,6 +1582,22 @@ static int at803x_cable_test_start(struc
- 	return 0;
- }
- 
-+static int at8031_probe(struct phy_device *phydev)
-+{
-+	int ret;
-+
-+	ret = at803x_probe(phydev);
-+	if (ret)
-+		return ret;
-+
-+	/* Disable WoL in 1588 register which is enabled
-+	 * by default
-+	 */
-+	return phy_modify_mmd(phydev, MDIO_MMD_PCS,
-+			      AT803X_PHY_MMD3_WOL_CTRL,
-+			      AT803X_WOL_EN, 0);
-+}
-+
- static int qca83xx_config_init(struct phy_device *phydev)
- {
- 	u8 switch_revision;
-@@ -2092,7 +2099,7 @@ static struct phy_driver at803x_driver[]
- 	PHY_ID_MATCH_EXACT(ATH8031_PHY_ID),
- 	.name			= "Qualcomm Atheros AR8031/AR8033",
- 	.flags			= PHY_POLL_CABLE_TEST,
--	.probe			= at803x_probe,
-+	.probe			= at8031_probe,
- 	.config_init		= at803x_config_init,
- 	.config_aneg		= at803x_config_aneg,
- 	.soft_reset		= genphy_soft_reset,

+ 0 - 129
target/linux/generic/backport-6.6/707-v6.8-03-net-phy-at803x-raname-hw_stats-functions-to-qca83xx-.patch

@@ -1,129 +0,0 @@
-From 07b1ad83b9ed6db1735ba10baf67b7a565ac0cef Mon Sep 17 00:00:00 2001
-From: Christian Marangi <[email protected]>
-Date: Fri, 8 Dec 2023 15:51:50 +0100
-Subject: [PATCH 03/13] net: phy: at803x: raname hw_stats functions to qca83xx
- specific name
-
-The function and the struct related to hw_stats were specific to qca83xx
-PHY but were called following the convention in the driver of calling
-everything with at803x prefix.
-
-To better organize the code, rename these function a more specific name
-to better describe that they are specific to 83xx PHY family.
-
-Signed-off-by: Christian Marangi <[email protected]>
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: David S. Miller <[email protected]>
----
- drivers/net/phy/at803x.c | 44 ++++++++++++++++++++--------------------
- 1 file changed, 22 insertions(+), 22 deletions(-)
-
---- a/drivers/net/phy/at803x.c
-+++ b/drivers/net/phy/at803x.c
-@@ -295,7 +295,7 @@ struct at803x_hw_stat {
- 	enum stat_access_type access_type;
- };
- 
--static struct at803x_hw_stat at803x_hw_stats[] = {
-+static struct at803x_hw_stat qca83xx_hw_stats[] = {
- 	{ "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
- 	{ "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
- 	{ "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
-@@ -311,7 +311,7 @@ struct at803x_priv {
- 	bool is_1000basex;
- 	struct regulator_dev *vddio_rdev;
- 	struct regulator_dev *vddh_rdev;
--	u64 stats[ARRAY_SIZE(at803x_hw_stats)];
-+	u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
- };
- 
- struct at803x_context {
-@@ -529,24 +529,24 @@ static void at803x_get_wol(struct phy_de
- 		wol->wolopts |= WAKE_MAGIC;
- }
- 
--static int at803x_get_sset_count(struct phy_device *phydev)
-+static int qca83xx_get_sset_count(struct phy_device *phydev)
- {
--	return ARRAY_SIZE(at803x_hw_stats);
-+	return ARRAY_SIZE(qca83xx_hw_stats);
- }
- 
--static void at803x_get_strings(struct phy_device *phydev, u8 *data)
-+static void qca83xx_get_strings(struct phy_device *phydev, u8 *data)
- {
- 	int i;
- 
--	for (i = 0; i < ARRAY_SIZE(at803x_hw_stats); i++) {
-+	for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++) {
- 		strscpy(data + i * ETH_GSTRING_LEN,
--			at803x_hw_stats[i].string, ETH_GSTRING_LEN);
-+			qca83xx_hw_stats[i].string, ETH_GSTRING_LEN);
- 	}
- }
- 
--static u64 at803x_get_stat(struct phy_device *phydev, int i)
-+static u64 qca83xx_get_stat(struct phy_device *phydev, int i)
- {
--	struct at803x_hw_stat stat = at803x_hw_stats[i];
-+	struct at803x_hw_stat stat = qca83xx_hw_stats[i];
- 	struct at803x_priv *priv = phydev->priv;
- 	int val;
- 	u64 ret;
-@@ -567,13 +567,13 @@ static u64 at803x_get_stat(struct phy_de
- 	return ret;
- }
- 
--static void at803x_get_stats(struct phy_device *phydev,
--			     struct ethtool_stats *stats, u64 *data)
-+static void qca83xx_get_stats(struct phy_device *phydev,
-+			      struct ethtool_stats *stats, u64 *data)
- {
- 	int i;
- 
--	for (i = 0; i < ARRAY_SIZE(at803x_hw_stats); i++)
--		data[i] = at803x_get_stat(phydev, i);
-+	for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++)
-+		data[i] = qca83xx_get_stat(phydev, i);
- }
- 
- static int at803x_suspend(struct phy_device *phydev)
-@@ -2175,9 +2175,9 @@ static struct phy_driver at803x_driver[]
- 	.flags			= PHY_IS_INTERNAL,
- 	.config_init		= qca83xx_config_init,
- 	.soft_reset		= genphy_soft_reset,
--	.get_sset_count		= at803x_get_sset_count,
--	.get_strings		= at803x_get_strings,
--	.get_stats		= at803x_get_stats,
-+	.get_sset_count		= qca83xx_get_sset_count,
-+	.get_strings		= qca83xx_get_strings,
-+	.get_stats		= qca83xx_get_stats,
- 	.suspend		= qca83xx_suspend,
- 	.resume			= qca83xx_resume,
- }, {
-@@ -2191,9 +2191,9 @@ static struct phy_driver at803x_driver[]
- 	.flags			= PHY_IS_INTERNAL,
- 	.config_init		= qca83xx_config_init,
- 	.soft_reset		= genphy_soft_reset,
--	.get_sset_count		= at803x_get_sset_count,
--	.get_strings		= at803x_get_strings,
--	.get_stats		= at803x_get_stats,
-+	.get_sset_count		= qca83xx_get_sset_count,
-+	.get_strings		= qca83xx_get_strings,
-+	.get_stats		= qca83xx_get_stats,
- 	.suspend		= qca83xx_suspend,
- 	.resume			= qca83xx_resume,
- }, {
-@@ -2207,9 +2207,9 @@ static struct phy_driver at803x_driver[]
- 	.flags			= PHY_IS_INTERNAL,
- 	.config_init		= qca83xx_config_init,
- 	.soft_reset		= genphy_soft_reset,
--	.get_sset_count		= at803x_get_sset_count,
--	.get_strings		= at803x_get_strings,
--	.get_stats		= at803x_get_stats,
-+	.get_sset_count		= qca83xx_get_sset_count,
-+	.get_strings		= qca83xx_get_strings,
-+	.get_stats		= qca83xx_get_stats,
- 	.suspend		= qca83xx_suspend,
- 	.resume			= qca83xx_resume,
- }, {

+ 0 - 155
target/linux/generic/backport-6.6/707-v6.8-04-net-phy-at803x-move-qca83xx-specific-check-in-dedica.patch

@@ -1,155 +0,0 @@
-From d43cff3f82336c0bd965ea552232d9f4ddac71a6 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <[email protected]>
-Date: Fri, 8 Dec 2023 15:51:51 +0100
-Subject: [PATCH 04/13] net: phy: at803x: move qca83xx specific check in
- dedicated functions
-
-Rework qca83xx specific check to dedicated function to tidy things up
-and drop useless phy_id check.
-
-Also drop an useless link_change_notify for QCA8337 as it did nothing an
-returned early.
-
-Signed-off-by: Christian Marangi <[email protected]>
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: David S. Miller <[email protected]>
----
- drivers/net/phy/at803x.c | 68 ++++++++++++++++++++++------------------
- 1 file changed, 37 insertions(+), 31 deletions(-)
-
---- a/drivers/net/phy/at803x.c
-+++ b/drivers/net/phy/at803x.c
-@@ -1623,27 +1623,26 @@ static int qca83xx_config_init(struct ph
- 		break;
- 	}
- 
-+	/* Following original QCA sourcecode set port to prefer master */
-+	phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
-+
-+	return 0;
-+}
-+
-+static int qca8327_config_init(struct phy_device *phydev)
-+{
- 	/* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
- 	 * Disable on init and enable only with 100m speed following
- 	 * qca original source code.
- 	 */
--	if (phydev->drv->phy_id == QCA8327_A_PHY_ID ||
--	    phydev->drv->phy_id == QCA8327_B_PHY_ID)
--		at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
--				      QCA8327_DEBUG_MANU_CTRL_EN, 0);
-+	at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
-+			      QCA8327_DEBUG_MANU_CTRL_EN, 0);
- 
--	/* Following original QCA sourcecode set port to prefer master */
--	phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
--
--	return 0;
-+	return qca83xx_config_init(phydev);
- }
- 
- static void qca83xx_link_change_notify(struct phy_device *phydev)
- {
--	/* QCA8337 doesn't require DAC Amplitude adjustement */
--	if (phydev->drv->phy_id == QCA8337_PHY_ID)
--		return;
--
- 	/* Set DAC Amplitude adjustment to +6% for 100m on link running */
- 	if (phydev->state == PHY_RUNNING) {
- 		if (phydev->speed == SPEED_100)
-@@ -1686,19 +1685,6 @@ static int qca83xx_resume(struct phy_dev
- 
- static int qca83xx_suspend(struct phy_device *phydev)
- {
--	u16 mask = 0;
--
--	/* Only QCA8337 support actual suspend.
--	 * QCA8327 cause port unreliability when phy suspend
--	 * is set.
--	 */
--	if (phydev->drv->phy_id == QCA8337_PHY_ID) {
--		genphy_suspend(phydev);
--	} else {
--		mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
--		phy_modify(phydev, MII_BMCR, mask, 0);
--	}
--
- 	at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
- 			      AT803X_DEBUG_GATE_CLK_IN1000, 0);
- 
-@@ -1709,6 +1695,27 @@ static int qca83xx_suspend(struct phy_de
- 	return 0;
- }
- 
-+static int qca8337_suspend(struct phy_device *phydev)
-+{
-+	/* Only QCA8337 support actual suspend. */
-+	genphy_suspend(phydev);
-+
-+	return qca83xx_suspend(phydev);
-+}
-+
-+static int qca8327_suspend(struct phy_device *phydev)
-+{
-+	u16 mask = 0;
-+
-+	/* QCA8327 cause port unreliability when phy suspend
-+	 * is set.
-+	 */
-+	mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
-+	phy_modify(phydev, MII_BMCR, mask, 0);
-+
-+	return qca83xx_suspend(phydev);
-+}
-+
- static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
- {
- 	int ret;
-@@ -2170,7 +2177,6 @@ static struct phy_driver at803x_driver[]
- 	.phy_id_mask		= QCA8K_PHY_ID_MASK,
- 	.name			= "Qualcomm Atheros 8337 internal PHY",
- 	/* PHY_GBIT_FEATURES */
--	.link_change_notify	= qca83xx_link_change_notify,
- 	.probe			= at803x_probe,
- 	.flags			= PHY_IS_INTERNAL,
- 	.config_init		= qca83xx_config_init,
-@@ -2178,7 +2184,7 @@ static struct phy_driver at803x_driver[]
- 	.get_sset_count		= qca83xx_get_sset_count,
- 	.get_strings		= qca83xx_get_strings,
- 	.get_stats		= qca83xx_get_stats,
--	.suspend		= qca83xx_suspend,
-+	.suspend		= qca8337_suspend,
- 	.resume			= qca83xx_resume,
- }, {
- 	/* QCA8327-A from switch QCA8327-AL1A */
-@@ -2189,12 +2195,12 @@ static struct phy_driver at803x_driver[]
- 	.link_change_notify	= qca83xx_link_change_notify,
- 	.probe			= at803x_probe,
- 	.flags			= PHY_IS_INTERNAL,
--	.config_init		= qca83xx_config_init,
-+	.config_init		= qca8327_config_init,
- 	.soft_reset		= genphy_soft_reset,
- 	.get_sset_count		= qca83xx_get_sset_count,
- 	.get_strings		= qca83xx_get_strings,
- 	.get_stats		= qca83xx_get_stats,
--	.suspend		= qca83xx_suspend,
-+	.suspend		= qca8327_suspend,
- 	.resume			= qca83xx_resume,
- }, {
- 	/* QCA8327-B from switch QCA8327-BL1A */
-@@ -2205,12 +2211,12 @@ static struct phy_driver at803x_driver[]
- 	.link_change_notify	= qca83xx_link_change_notify,
- 	.probe			= at803x_probe,
- 	.flags			= PHY_IS_INTERNAL,
--	.config_init		= qca83xx_config_init,
-+	.config_init		= qca8327_config_init,
- 	.soft_reset		= genphy_soft_reset,
- 	.get_sset_count		= qca83xx_get_sset_count,
- 	.get_strings		= qca83xx_get_strings,
- 	.get_stats		= qca83xx_get_stats,
--	.suspend		= qca83xx_suspend,
-+	.suspend		= qca8327_suspend,
- 	.resume			= qca83xx_resume,
- }, {
- 	/* Qualcomm QCA8081 */

+ 0 - 94
target/linux/generic/backport-6.6/707-v6.8-05-net-phy-at803x-move-specific-DT-option-for-at8031-to.patch

@@ -1,94 +0,0 @@
-From 900eef75cc5018e149c52fe305c9c3fe424c52a7 Mon Sep 17 00:00:00 2001
-From: Christian Marangi <[email protected]>
-Date: Fri, 8 Dec 2023 15:51:52 +0100
-Subject: [PATCH 05/13] net: phy: at803x: move specific DT option for at8031 to
- specific probe
-
-Move specific DT options for at8031 to specific probe to tidy things up
-and make at803x_parse_dt more generic.
-
-Signed-off-by: Christian Marangi <[email protected]>
-Reviewed-by: Andrew Lunn <[email protected]>
-Signed-off-by: David S. Miller <[email protected]>
----
- drivers/net/phy/at803x.c | 55 ++++++++++++++++++++++------------------
- 1 file changed, 31 insertions(+), 24 deletions(-)
-
---- a/drivers/net/phy/at803x.c
-+++ b/drivers/net/phy/at803x.c
-@@ -825,30 +825,6 @@ static int at803x_parse_dt(struct phy_de
- 		}
- 	}
- 
--	/* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
--	 * options.
--	 */
--	if (phydev->drv->phy_id == ATH8031_PHY_ID) {
--		if (of_property_read_bool(node, "qca,keep-pll-enabled"))
--			priv->flags |= AT803X_KEEP_PLL_ENABLED;
--
--		ret = at8031_register_regulators(phydev);
--		if (ret < 0)
--			return ret;
--
--		ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
--							 "vddio");
--		if (ret) {
--			phydev_err(phydev, "failed to get VDDIO regulator\n");
--			return ret;
--		}
--
--		/* Only AR8031/8033 support 1000Base-X for SFP modules */
--		ret = phy_sfp_probe(phydev, &at803x_sfp_ops);
--		if (ret < 0)
--			return ret;
--	}
--
- 	return 0;
- }
- 
-@@ -1582,6 +1558,30 @@ static int at803x_cable_test_start(struc
- 	return 0;
- }
- 
-+static int at8031_parse_dt(struct phy_device *phydev)
-+{
-+	struct device_node *node = phydev->mdio.dev.of_node;
-+	struct at803x_priv *priv = phydev->priv;
-+	int ret;
-+
-+	if (of_property_read_bool(node, "qca,keep-pll-enabled"))
-+		priv->flags |= AT803X_KEEP_PLL_ENABLED;
-+
-+	ret = at8031_register_regulators(phydev);
-+	if (ret < 0)
-+		return ret;
-+
-+	ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
-+						 "vddio");
-+	if (ret) {
-+		phydev_err(phydev, "failed to get VDDIO regulator\n");
-+		return ret;
-+	}
-+
-+	/* Only AR8031/8033 support 1000Base-X for SFP modules */
-+	return phy_sfp_probe(phydev, &at803x_sfp_ops);
-+}
-+
- static int at8031_probe(struct phy_device *phydev)
- {
- 	int ret;
-@@ -1590,6 +1590,13 @@ static int at8031_probe(struct phy_devic
- 	if (ret)
- 		return ret;
- 
-+	/* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
-+	 * options.
-+	 */
-+	ret = at8031_parse_dt(phydev);
-+	if (ret)
-+		return ret;
-+
- 	/* Disable WoL in 1588 register which is enabled
- 	 * by default
- 	 */

Некоторые файлы не были показаны из-за большого количества измененных файлов