2
0
Эх сурвалжийг харах

kernel/generic: Restore kernel files for v6.6

This is an automatically generated commit which aids following Kernel patch
history, as git will see the move and copy as a rename thus defeating the
purpose.

For the original discussion see:
https://lists.openwrt.org/pipermail/openwrt-devel/2023-October/041673.html

Signed-off-by: Mieczyslaw Nalewaj <[email protected]>
Link: https://github.com/openwrt/openwrt/pull/16547
Signed-off-by: Christian Marangi <[email protected]>
Mieczyslaw Nalewaj 8 сар өмнө
parent
commit
456b7a5d48
100 өөрчлөгдсөн 29134 нэмэгдсэн , 0 устгасан
  1. 56 0
      target/linux/generic/backport-6.6/0080-v6.9-smp-Avoid-setup_max_cpus_namespace_collision_shadowing.patch
  2. 58 0
      target/linux/generic/backport-6.6/065-v6.10-compiler_types.h-Define-__retain.patch
  3. 65 0
      target/linux/generic/backport-6.6/066-v6.10-bpf-Harden-__bpf_kfunc-against-linker-removal.patch
  4. 59 0
      target/linux/generic/backport-6.6/192-v6.12-fix-libbpf-Wmaybe-uninitialized.patch
  5. 172 0
      target/linux/generic/backport-6.6/200-v6.11-ARM-9404-1-arm32-enable-HAVE_LD_DEAD_CODE_DATA_ELIMI.patch
  6. 51 0
      target/linux/generic/backport-6.6/300-v6.7-arm64-swiotlb-Reduce-the-default-size-if-no-ZONE_DMA.patch
  7. 161 0
      target/linux/generic/backport-6.6/301-v6.9-kernel.h-removed-REPEAT_BYTE-from-kernel.h.patch
  8. 107 0
      target/linux/generic/backport-6.6/302-v6.9-kernel.h-Move-upper_-_bits-and-lower_-_bits-to-wordp.patch
  9. 206 0
      target/linux/generic/backport-6.6/310-v6.7-mips-kexec-fix-the-incorrect-ifdeffery-and-dependenc.patch
  10. 171 0
      target/linux/generic/backport-6.6/320-v6.11-mips-bmips-rework-and-cache-CBR-addr-handling.patch
  11. 111 0
      target/linux/generic/backport-6.6/321-v6.11-mips-bmips-setup-make-CBR-address-configurable.patch
  12. 57 0
      target/linux/generic/backport-6.6/322-v6.11-mips-bmips-enable-RAC-on-BMIPS4350.patch
  13. 36 0
      target/linux/generic/backport-6.6/400-v6.9-mtd-rawnand-brcmnand-Support-write-protection-settin.patch
  14. 123 0
      target/linux/generic/backport-6.6/401-v6.9-dt-bindings-mtd-add-basic-bindings-for-UBI.patch
  15. 50 0
      target/linux/generic/backport-6.6/402-v6.9-dt-bindings-mtd-ubi-volume-allow-UBI-volumes-to-prov.patch
  16. 285 0
      target/linux/generic/backport-6.6/403-v6.9-mtd-ubi-block-use-notifier-to-create-ubiblock-from-p.patch
  17. 205 0
      target/linux/generic/backport-6.6/404-v6.9-mtd-ubi-attach-from-device-tree.patch
  18. 180 0
      target/linux/generic/backport-6.6/405-v6.9-mtd-ubi-introduce-pre-removal-notification-for-UBI-v.patch
  19. 66 0
      target/linux/generic/backport-6.6/406-v6.9-mtd-ubi-populate-ubi-volume-fwnode.patch
  20. 244 0
      target/linux/generic/backport-6.6/407-v6.9-mtd-ubi-provide-NVMEM-layer-over-UBI-volumes.patch
  21. 34 0
      target/linux/generic/backport-6.6/408-v6.9-mtd-ubi-fix-NVMEM-over-UBI-volumes-on-32-bit-systems.patch
  22. 53 0
      target/linux/generic/backport-6.6/410-v6.13-01-block-add-support-for-defining-read-only-partitions.patch
  23. 94 0
      target/linux/generic/backport-6.6/410-v6.13-03-block-introduce-add_disk_fwnode.patch
  24. 104 0
      target/linux/generic/backport-6.6/410-v6.13-04-mmc-block-attach-partitions-fwnode-if-found-in-mmc-c.patch
  25. 200 0
      target/linux/generic/backport-6.6/410-v6.13-05-block-add-support-for-partition-table-defined-in-OF.patch
  26. 146 0
      target/linux/generic/backport-6.6/411-v6.7-mtd-spinand-add-support-for-FORESEE-F35SQA002G.patch
  27. 38 0
      target/linux/generic/backport-6.6/412-v6.14-mtd-spinand-add-support-for-FORESEE-F35SQA001G.patch
  28. 1013 0
      target/linux/generic/backport-6.6/413-01-v6.14-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch
  29. 880 0
      target/linux/generic/backport-6.6/413-02-v6.14-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch
  30. 2436 0
      target/linux/generic/backport-6.6/413-03-v6.14-mtd-nand-Add-qpic_common-API-file.patch
  31. 198 0
      target/linux/generic/backport-6.6/413-04-v6.14-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch
  32. 64 0
      target/linux/generic/backport-6.6/414-v6.14-mtd-rawnand-qcom-fix-broken-config-in-qcom_param_pag.patch
  33. 77 0
      target/linux/generic/backport-6.6/415-v6.14-mtd-rawnand-qcom-Fix-build-issue-on-x86-architecture.patch
  34. 1737 0
      target/linux/generic/backport-6.6/416-v6.15-01-spi-spi-qpic-add-driver-for-QCOM-SPI-NAND-flash-Inte.patch
  35. 28 0
      target/linux/generic/backport-6.6/416-v6.15-02-spi-spi-qpic-snand-Fix-ECC_CFG_ECC_DISABLE-shift-in-.patch
  36. 35 0
      target/linux/generic/backport-6.6/416-v6.15-03-spi-spi-qpic-snand-avoid-memleak-in-qcom_spi_ecc_ini.patch
  37. 49 0
      target/linux/generic/backport-6.6/416-v6.15-04-spi-SPI_QPIC_SNAND-should-be-tristate-and-depend-on-.patch
  38. 29 0
      target/linux/generic/backport-6.6/416-v6.15-05-spi-spi-qpic-snand-use-kmalloc-for-OOB-buffer-alloca.patch
  39. 31 0
      target/linux/generic/backport-6.6/417-v6.12-mtd-spinand-set-bitflip_threshold-to-75-of-ECC-stren.patch
  40. 65 0
      target/linux/generic/backport-6.6/418-v6.12-mtd-spinand-winbond-add-support-for-W25N01KV.patch
  41. 75 0
      target/linux/generic/backport-6.6/600-v6.10-net-Remove-conditional-threaded-NAPI-wakeup-based-on.patch
  42. 330 0
      target/linux/generic/backport-6.6/601-v6.10-net-Allow-to-use-SMP-threads-for-backlog-NAPI.patch
  43. 121 0
      target/linux/generic/backport-6.6/602-v6.10-net-Use-backlog-NAPI-to-clean-up-the-defer_list.patch
  44. 164 0
      target/linux/generic/backport-6.6/603-v6.10-net-Rename-rps_lock-to-backlog_lock.patch
  45. 30 0
      target/linux/generic/backport-6.6/610-v6.9-net-mdio-add-2.5g-and-5g-related-PMA-speed-constants.patch
  46. 94 0
      target/linux/generic/backport-6.6/611-01-v6.11-udp-Allow-GSO-transmit-from-devices-with-no-checksum.patch
  47. 69 0
      target/linux/generic/backport-6.6/611-02-v6.11-net-Make-USO-depend-on-CSUM-offload.patch
  48. 86 0
      target/linux/generic/backport-6.6/611-03-v6.11-udp-Fall-back-to-software-USO-if-IPv6-extension-head.patch
  49. 29 0
      target/linux/generic/backport-6.6/612-v6.9-net-get-stats64-if-device-if-driver-is-configured.patch
  50. 139 0
      target/linux/generic/backport-6.6/620-01-v6.7-page_pool-fragment-API-support-for-32-bit-arch-with-.patch
  51. 183 0
      target/linux/generic/backport-6.6/620-02-v6.7-page_pool-unify-frag_count-handling-in-page_pool_is_.patch
  52. 101 0
      target/linux/generic/backport-6.6/621-v6.10-01-net-move-skb_gro_receive_list-from-udp-to-core.patch
  53. 177 0
      target/linux/generic/backport-6.6/621-v6.10-02-net-add-support-for-segmenting-TCP-fraglist-GSO-pack.patch
  54. 75 0
      target/linux/generic/backport-6.6/621-v6.10-03-net-add-code-for-TCP-fraglist-GRO.patch
  55. 88 0
      target/linux/generic/backport-6.6/621-v6.10-04-net-create-tcp_gro_lookup-helper-function.patch
  56. 166 0
      target/linux/generic/backport-6.6/621-v6.10-05-net-create-tcp_gro_header_pull-helper-function.patch
  57. 140 0
      target/linux/generic/backport-6.6/621-v6.10-06-net-add-heuristic-for-enabling-TCP-fraglist-GRO.patch
  58. 80 0
      target/linux/generic/backport-6.6/622-v6.12-net-gso-fix-tcp-fraglist-segmentation-after-pull-fro.patch
  59. 59 0
      target/linux/generic/backport-6.6/623-v6.14-net-ipv6-fix-TCP-GSO-segmentation-with-NAT.patch
  60. 135 0
      target/linux/generic/backport-6.6/700-v6.10-net-create-a-dummy-net_device-allocator.patch
  61. 29 0
      target/linux/generic/backport-6.6/701-v6.8-netfilter-nf_tables-fix-bidirectional-offload-regres.patch
  62. 2386 0
      target/linux/generic/backport-6.6/702-01-v6.7-net-phy-aquantia-move-to-separate-directory.patch
  63. 183 0
      target/linux/generic/backport-6.6/702-02-v6.7-net-phy-aquantia-move-MMD_VEND-define-to-header.patch
  64. 504 0
      target/linux/generic/backport-6.6/702-03-v6.7-net-phy-aquantia-add-firmware-load-support.patch
  65. 69 0
      target/linux/generic/backport-6.6/707-v6.8-02-net-phy-at803x-move-disable-WOL-to-specific-at8031-p.patch
  66. 129 0
      target/linux/generic/backport-6.6/707-v6.8-03-net-phy-at803x-raname-hw_stats-functions-to-qca83xx-.patch
  67. 155 0
      target/linux/generic/backport-6.6/707-v6.8-04-net-phy-at803x-move-qca83xx-specific-check-in-dedica.patch
  68. 94 0
      target/linux/generic/backport-6.6/707-v6.8-05-net-phy-at803x-move-specific-DT-option-for-at8031-to.patch
  69. 78 0
      target/linux/generic/backport-6.6/707-v6.8-06-net-phy-at803x-move-specific-at8031-probe-mode-check.patch
  70. 86 0
      target/linux/generic/backport-6.6/707-v6.8-07-net-phy-at803x-move-specific-at8031-config_init-to-d.patch
  71. 92 0
      target/linux/generic/backport-6.6/707-v6.8-08-net-phy-at803x-move-specific-at8031-WOL-bits-to-dedi.patch
  72. 78 0
      target/linux/generic/backport-6.6/707-v6.8-09-net-phy-at803x-move-specific-at8031-config_intr-to-d.patch
  73. 78 0
      target/linux/generic/backport-6.6/707-v6.8-10-net-phy-at803x-make-at8031-related-DT-functions-name.patch
  74. 297 0
      target/linux/generic/backport-6.6/707-v6.8-11-net-phy-at803x-move-at8031-functions-in-dedicated-se.patch
  75. 114 0
      target/linux/generic/backport-6.6/707-v6.8-12-net-phy-at803x-move-at8035-specific-DT-parse-to-dedi.patch
  76. 219 0
      target/linux/generic/backport-6.6/707-v6.8-13-net-phy-at803x-drop-specific-PHY-ID-check-from-cable.patch
  77. 116 0
      target/linux/generic/backport-6.6/708-v6.8-01-net-phy-at803x-move-specific-qca808x-config_aneg-to-.patch
  78. 97 0
      target/linux/generic/backport-6.6/708-v6.8-02-net-phy-at803x-make-read-specific-status-function-mo.patch
  79. 27 0
      target/linux/generic/backport-6.6/709-v6.8-01-net-phy-at803x-remove-extra-space-after-cast.patch
  80. 38 0
      target/linux/generic/backport-6.6/709-v6.8-02-net-phy-at803x-replace-msleep-1-with-usleep_range.patch
  81. 152 0
      target/linux/generic/backport-6.6/710-v6.8-net-phy-at803x-better-align-function-varibles-to-ope.patch
  82. 62 0
      target/linux/generic/backport-6.6/711-v6.8-01-net-phy-at803x-generalize-cdt-fault-length-function.patch
  83. 118 0
      target/linux/generic/backport-6.6/711-v6.8-02-net-phy-at803x-refactor-qca808x-cable-test-get-statu.patch
  84. 182 0
      target/linux/generic/backport-6.6/711-v6.8-03-net-phy-at803x-add-support-for-cdt-cross-short-test-.patch
  85. 62 0
      target/linux/generic/backport-6.6/711-v6.8-04-net-phy-at803x-make-read_status-more-generic.patch
  86. 408 0
      target/linux/generic/backport-6.6/712-v6.9-net-phy-at803x-add-LED-support-for-qca808x.patch
  87. 5598 0
      target/linux/generic/backport-6.6/713-v6.9-01-net-phy-move-at803x-PHY-driver-to-dedicated-director.patch
  88. 243 0
      target/linux/generic/backport-6.6/713-v6.9-02-net-phy-qcom-create-and-move-functions-to-shared-lib.patch
  89. 638 0
      target/linux/generic/backport-6.6/713-v6.9-03-net-phy-qcom-deatch-qca83xx-PHY-driver-from-at803x.patch
  90. 1014 0
      target/linux/generic/backport-6.6/713-v6.9-04-net-phy-qcom-move-additional-functions-to-shared-lib.patch
  91. 1936 0
      target/linux/generic/backport-6.6/713-v6.9-05-net-phy-qcom-detach-qca808x-PHY-driver-from-at803x.patch
  92. 28 0
      target/linux/generic/backport-6.6/714-v6.8-01-net-phy-make-addr-type-u8-in-phy_package_shared-stru.patch
  93. 341 0
      target/linux/generic/backport-6.6/714-v6.8-02-net-phy-extend-PHY-package-API-to-support-multiple-g.patch
  94. 116 0
      target/linux/generic/backport-6.6/714-v6.8-03-net-phy-restructure-__phy_write-read_mmd-to-helper-a.patch
  95. 196 0
      target/linux/generic/backport-6.6/714-v6.8-04-net-phy-add-support-for-PHY-package-MMD-read-write.patch
  96. 36 0
      target/linux/generic/backport-6.6/715-v6.9-01-net-phy-qcom-qca808x-fix-logic-error-in-LED-brightne.patch
  97. 41 0
      target/linux/generic/backport-6.6/715-v6.9-02-net-phy-qcom-qca808x-default-to-LED-active-High-if-n.patch
  98. 211 0
      target/linux/generic/backport-6.6/716-v6.9-02-net-phy-add-support-for-scanning-PHY-in-PHY-packages.patch
  99. 185 0
      target/linux/generic/backport-6.6/716-v6.9-03-net-phy-add-devm-of_phy_package_join-helper.patch
  100. 583 0
      target/linux/generic/backport-6.6/716-v6.9-04-net-phy-qcom-move-more-function-to-shared-library.patch

+ 56 - 0
target/linux/generic/backport-6.6/0080-v6.9-smp-Avoid-setup_max_cpus_namespace_collision_shadowing.patch

@@ -0,0 +1,56 @@
+From 4c8a49854130da0117a0fdb858551824919a2389 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <[email protected]>
+Date: Tue, 27 Feb 2024 09:58:15 +0100
+Subject: [PATCH] smp: Avoid 'setup_max_cpus' namespace collision/shadowing
+
+bringup_nonboot_cpus() gets passed the 'setup_max_cpus'
+variable in init/main.c - which is also the name of the parameter,
+shadowing the name.
+
+To reduce confusion and to allow the 'setup_max_cpus' value
+to be #defined in the <linux/smp.h> header, use the 'max_cpus'
+name for the function parameter name.
+
+Signed-off-by: Ingo Molnar <[email protected]>
+Cc: Thomas Gleixner <[email protected]>
+Cc: [email protected]
+---
+ include/linux/cpu.h | 2 +-
+ kernel/cpu.c        | 6 +++---
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -109,7 +109,7 @@ void notify_cpu_starting(unsigned int cp
+ extern void cpu_maps_update_begin(void);
+ extern void cpu_maps_update_done(void);
+ int bringup_hibernate_cpu(unsigned int sleep_cpu);
+-void bringup_nonboot_cpus(unsigned int setup_max_cpus);
++void bringup_nonboot_cpus(unsigned int max_cpus);
+ 
+ #else	/* CONFIG_SMP */
+ #define cpuhp_tasks_frozen	0
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1905,17 +1905,17 @@ static bool __init cpuhp_bringup_cpus_pa
+ static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return false; }
+ #endif /* CONFIG_HOTPLUG_PARALLEL */
+ 
+-void __init bringup_nonboot_cpus(unsigned int setup_max_cpus)
++void __init bringup_nonboot_cpus(unsigned int max_cpus)
+ {
+-	if (!setup_max_cpus)
++	if (!max_cpus)
+ 		return;
+ 
+ 	/* Try parallel bringup optimization if enabled */
+-	if (cpuhp_bringup_cpus_parallel(setup_max_cpus))
++	if (cpuhp_bringup_cpus_parallel(max_cpus))
+ 		return;
+ 
+ 	/* Full per CPU serialized bringup */
+-	cpuhp_bringup_mask(cpu_present_mask, setup_max_cpus, CPUHP_ONLINE);
++	cpuhp_bringup_mask(cpu_present_mask, max_cpus, CPUHP_ONLINE);
+ }
+ 
+ #ifdef CONFIG_PM_SLEEP_SMP

+ 58 - 0
target/linux/generic/backport-6.6/065-v6.10-compiler_types.h-Define-__retain.patch

@@ -0,0 +1,58 @@
+From 443df175be581618d6ff781dc3af3aa1a9ba789d Mon Sep 17 00:00:00 2001
+From: Tony Ambardar <[email protected]>
+Date: Fri, 31 May 2024 23:55:55 -0700
+Subject: [PATCH 1/2] compiler_types.h: Define __retain for
+ __attribute__((__retain__))
+
+Some code includes the __used macro to prevent functions and data from
+being optimized out. This macro implements __attribute__((__used__)), which
+operates at the compiler and IR-level, and so still allows a linker to
+remove objects intended to be kept.
+
+Compilers supporting __attribute__((__retain__)) can address this gap by
+setting the flag SHF_GNU_RETAIN on the section of a function/variable,
+indicating to the linker the object should be retained. This attribute is
+available since gcc 11, clang 13, and binutils 2.36.
+
+Provide a __retain macro implementing __attribute__((__retain__)), whose
+first user will be the '__bpf_kfunc' tag.
+
+Link: https://lore.kernel.org/bpf/ZlmGoT9KiYLZd91S@krava/T/
+Cc: [email protected] # v6.6+
+Signed-off-by: Tony Ambardar <[email protected]>
+---
+ include/linux/compiler_types.h | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+--- a/include/linux/compiler_types.h
++++ b/include/linux/compiler_types.h
+@@ -145,6 +145,29 @@ static inline void __chk_io_ptr(const vo
+ #define __has_builtin(x) (0)
+ #endif
+ 
++/*
++ * Annotating a function/variable with __retain tells the compiler to place
++ * the object in its own section and set the flag SHF_GNU_RETAIN. This flag
++ * instructs the linker to retain the object during garbage-cleanup or LTO
++ * phases.
++ *
++ * Note that the __used macro is also used to prevent functions or data
++ * being optimized out, but operates at the compiler/IR-level and may still
++ * allow unintended removal of objects during linking.
++ *
++ * Optional: only supported since gcc >= 11, clang >= 13
++ *
++ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-retain-function-attribute
++ * clang: https://clang.llvm.org/docs/AttributeReference.html#retain
++ */
++#if __has_attribute(__retain__) && \
++	(defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || \
++	 defined(CONFIG_LTO_CLANG))
++# define __retain			__attribute__((__retain__))
++#else
++# define __retain
++#endif
++
+ /* Compiler specific macros. */
+ #ifdef __clang__
+ #include <linux/compiler-clang.h>

+ 65 - 0
target/linux/generic/backport-6.6/066-v6.10-bpf-Harden-__bpf_kfunc-against-linker-removal.patch

@@ -0,0 +1,65 @@
+From ac507ed9882fd91a94657d68fe9ceac04b957103 Mon Sep 17 00:00:00 2001
+From: Tony Ambardar <[email protected]>
+Date: Sat, 1 Jun 2024 00:00:21 -0700
+Subject: [PATCH 2/2] bpf: Harden __bpf_kfunc tag against linker kfunc removal
+
+BPF kfuncs are often not directly referenced and may be inadvertently
+removed by optimization steps during kernel builds, thus the __bpf_kfunc
+tag mitigates against this removal by including the __used macro. However,
+this macro alone does not prevent removal during linking, and may still
+yield build warnings (e.g. on mips64el):
+
+    LD      vmlinux
+    BTFIDS  vmlinux
+  WARN: resolve_btfids: unresolved symbol bpf_verify_pkcs7_signature
+  WARN: resolve_btfids: unresolved symbol bpf_lookup_user_key
+  WARN: resolve_btfids: unresolved symbol bpf_lookup_system_key
+  WARN: resolve_btfids: unresolved symbol bpf_key_put
+  WARN: resolve_btfids: unresolved symbol bpf_iter_task_next
+  WARN: resolve_btfids: unresolved symbol bpf_iter_css_task_new
+  WARN: resolve_btfids: unresolved symbol bpf_get_file_xattr
+  WARN: resolve_btfids: unresolved symbol bpf_ct_insert_entry
+  WARN: resolve_btfids: unresolved symbol bpf_cgroup_release
+  WARN: resolve_btfids: unresolved symbol bpf_cgroup_from_id
+  WARN: resolve_btfids: unresolved symbol bpf_cgroup_acquire
+  WARN: resolve_btfids: unresolved symbol bpf_arena_free_pages
+    NM      System.map
+    SORTTAB vmlinux
+    OBJCOPY vmlinux.32
+
+Update the __bpf_kfunc tag to better guard against linker optimization by
+including the new __retain compiler macro, which fixes the warnings above.
+
+Verify the __retain macro with readelf by checking object flags for 'R':
+
+  $ readelf -Wa kernel/trace/bpf_trace.o
+  Section Headers:
+    [Nr]  Name              Type     Address  Off  Size ES Flg Lk Inf Al
+  ...
+    [178] .text.bpf_key_put PROGBITS 00000000 6420 0050 00 AXR  0   0  8
+  ...
+  Key to Flags:
+  ...
+    R (retain), D (mbind), p (processor specific)
+
+Link: https://lore.kernel.org/bpf/ZlmGoT9KiYLZd91S@krava/T/
+Reported-by: kernel test robot <[email protected]>
+Closes: https://lore.kernel.org/r/[email protected]/
+Fixes: 57e7c169cd6a ("bpf: Add __bpf_kfunc tag for marking kernel functions as kfuncs")
+Cc: [email protected] # v6.6+
+Signed-off-by: Tony Ambardar <[email protected]>
+---
+ include/linux/btf.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/btf.h
++++ b/include/linux/btf.h
+@@ -81,7 +81,7 @@
+  * as to avoid issues such as the compiler inlining or eliding either a static
+  * kfunc, or a global kfunc in an LTO build.
+  */
+-#define __bpf_kfunc __used noinline
++#define __bpf_kfunc __used __retain noinline
+ 
+ /*
+  * Return the name of the passed struct, if exists, or halt the build if for

+ 59 - 0
target/linux/generic/backport-6.6/192-v6.12-fix-libbpf-Wmaybe-uninitialized.patch

@@ -0,0 +1,59 @@
+From fab45b962749184e1a1a57c7c583782b78fad539 Mon Sep 17 00:00:00 2001
+From: Sam James <[email protected]>
+Date: Tue, 13 Aug 2024 20:49:06 +0100
+Subject: [PATCH] libbpf: Workaround -Wmaybe-uninitialized false positive
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+In `elf_close`, we get this with GCC 15 -O3 (at least):
+```
+In function ‘elf_close’,
+    inlined from ‘elf_close’ at elf.c:53:6,
+    inlined from ‘elf_find_func_offset_from_file’ at elf.c:384:2:
+elf.c:57:9: warning: ‘elf_fd.elf’ may be used uninitialized [-Wmaybe-uninitialized]
+   57 |         elf_end(elf_fd->elf);
+      |         ^~~~~~~~~~~~~~~~~~~~
+elf.c: In function ‘elf_find_func_offset_from_file’:
+elf.c:377:23: note: ‘elf_fd.elf’ was declared here
+  377 |         struct elf_fd elf_fd;
+      |                       ^~~~~~
+In function ‘elf_close’,
+    inlined from ‘elf_close’ at elf.c:53:6,
+    inlined from ‘elf_find_func_offset_from_file’ at elf.c:384:2:
+elf.c:58:9: warning: ‘elf_fd.fd’ may be used uninitialized [-Wmaybe-uninitialized]
+   58 |         close(elf_fd->fd);
+      |         ^~~~~~~~~~~~~~~~~
+elf.c: In function ‘elf_find_func_offset_from_file’:
+elf.c:377:23: note: ‘elf_fd.fd’ was declared here
+  377 |         struct elf_fd elf_fd;
+      |                       ^~~~~~
+```
+
+In reality, our use is fine, it's just that GCC doesn't model errno
+here (see linked GCC bug). Suppress -Wmaybe-uninitialized accordingly
+by initializing elf_fd.fd to -1 and elf_fd.elf to NULL.
+
+I've done this in two other functions as well given it could easily
+occur there too (same access/use pattern).
+
+Signed-off-by: Sam James <[email protected]>
+Signed-off-by: Andrii Nakryiko <[email protected]>
+Link: https://gcc.gnu.org/PR114952
+Link: https://lore.kernel.org/bpf/14ec488a1cac02794c2fa2b83ae0cef1bce2cb36.1723578546.git.sam@gentoo.org
+---
+ tools/lib/bpf/elf.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/tools/lib/bpf/elf.c
++++ b/tools/lib/bpf/elf.c
+@@ -16,6 +16,9 @@ int elf_open(const char *binary_path, st
+ 	int fd, ret;
+ 	Elf *elf;
+ 
++	elf_fd->elf = NULL;
++	elf_fd->fd = -1;
++
+ 	if (elf_version(EV_CURRENT) == EV_NONE) {
+ 		pr_warn("elf: failed to init libelf for %s\n", binary_path);
+ 		return -LIBBPF_ERRNO__LIBELF;

+ 172 - 0
target/linux/generic/backport-6.6/200-v6.11-ARM-9404-1-arm32-enable-HAVE_LD_DEAD_CODE_DATA_ELIMI.patch

@@ -0,0 +1,172 @@
+From ed0f941022515ff40473ea5335769a5dc2524a3f Mon Sep 17 00:00:00 2001
+From: Yuntao Liu <[email protected]>
+Date: Mon, 3 Jun 2024 16:37:50 +0100
+Subject: [PATCH] ARM: 9404/1: arm32: enable HAVE_LD_DEAD_CODE_DATA_ELIMINATION
+
+The current arm32 architecture does not yet support the
+HAVE_LD_DEAD_CODE_DATA_ELIMINATION feature. arm32 is widely used in
+embedded scenarios, and enabling this feature would be beneficial for
+reducing the size of the kernel image.
+
+In order to make this work, we keep the necessary tables by annotating
+them with KEEP, also it requires further changes to linker script to KEEP
+some tables and wildcard compiler generated sections into the right place.
+When using ld.lld for linking, KEEP is not recognized within the OVERLAY
+command, and Ard proposed a concise method to solve this problem.
+
+It boots normally with defconfig, vexpress_defconfig and tinyconfig.
+
+The size comparison of zImage is as follows:
+defconfig       vexpress_defconfig      tinyconfig
+5137712         5138024                 424192          no dce
+5032560         4997824                 298384          dce
+2.0%            2.7%                    29.7%           shrink
+
+When using smaller config file, there is a significant reduction in the
+size of the zImage.
+
+We also tested this patch on a commercially available single-board
+computer, and the comparison is as follows:
+a15eb_config
+2161384         no dce
+2092240         dce
+3.2%            shrink
+
+The zImage size has been reduced by approximately 3.2%, which is 70KB on
+2.1M.
+
+Signed-off-by: Yuntao Liu <[email protected]>
+Tested-by: Arnd Bergmann <[email protected]>
+Reviewed-by: Arnd Bergmann <[email protected]>
+Reviewed-by: Linus Walleij <[email protected]>
+Signed-off-by: Russell King (Oracle) <[email protected]>
+---
+ arch/arm/Kconfig                       | 1 +
+ arch/arm/boot/compressed/vmlinux.lds.S | 2 +-
+ arch/arm/include/asm/vmlinux.lds.h     | 2 +-
+ arch/arm/kernel/entry-armv.S           | 3 +++
+ arch/arm/kernel/vmlinux-xip.lds.S      | 4 ++--
+ arch/arm/kernel/vmlinux.lds.S          | 6 +++---
+ drivers/firmware/efi/libstub/Makefile  | 4 ++++
+ 7 files changed, 15 insertions(+), 7 deletions(-)
+
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -111,6 +111,7 @@ config ARM
+ 	select HAVE_KERNEL_XZ
+ 	select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M
+ 	select HAVE_KRETPROBES if HAVE_KPROBES
++	select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
+ 	select HAVE_MOD_ARCH_SPECIFIC
+ 	select HAVE_NMI
+ 	select HAVE_OPTPROBES if !THUMB2_KERNEL
+--- a/arch/arm/boot/compressed/vmlinux.lds.S
++++ b/arch/arm/boot/compressed/vmlinux.lds.S
+@@ -125,7 +125,7 @@ SECTIONS
+ 
+   . = BSS_START;
+   __bss_start = .;
+-  .bss			: { *(.bss) }
++  .bss			: { *(.bss .bss.*) }
+   _end = .;
+ 
+   . = ALIGN(8);		/* the stack must be 64-bit aligned */
+--- a/arch/arm/include/asm/vmlinux.lds.h
++++ b/arch/arm/include/asm/vmlinux.lds.h
+@@ -42,7 +42,7 @@
+ #define PROC_INFO							\
+ 		. = ALIGN(4);						\
+ 		__proc_info_begin = .;					\
+-		*(.proc.info.init)					\
++		KEEP(*(.proc.info.init))				\
+ 		__proc_info_end = .;
+ 
+ #define IDMAP_TEXT							\
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -1073,6 +1073,7 @@ vector_addrexcptn:
+ 	.globl	vector_fiq
+ 
+ 	.section .vectors, "ax", %progbits
++	.reloc  .text, R_ARM_NONE, .
+ 	W(b)	vector_rst
+ 	W(b)	vector_und
+ ARM(	.reloc	., R_ARM_LDR_PC_G0, .L__vector_swi		)
+@@ -1086,6 +1087,7 @@ THUMB(	.reloc	., R_ARM_THM_PC12, .L__vec
+ 
+ #ifdef CONFIG_HARDEN_BRANCH_HISTORY
+ 	.section .vectors.bhb.loop8, "ax", %progbits
++	.reloc  .text, R_ARM_NONE, .
+ 	W(b)	vector_rst
+ 	W(b)	vector_bhb_loop8_und
+ ARM(	.reloc	., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi	)
+@@ -1098,6 +1100,7 @@ THUMB(	.reloc	., R_ARM_THM_PC12, .L__vec
+ 	W(b)	vector_bhb_loop8_fiq
+ 
+ 	.section .vectors.bhb.bpiall, "ax", %progbits
++	.reloc  .text, R_ARM_NONE, .
+ 	W(b)	vector_rst
+ 	W(b)	vector_bhb_bpiall_und
+ ARM(	.reloc	., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi	)
+--- a/arch/arm/kernel/vmlinux-xip.lds.S
++++ b/arch/arm/kernel/vmlinux-xip.lds.S
+@@ -63,7 +63,7 @@ SECTIONS
+ 	. = ALIGN(4);
+ 	__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
+ 		__start___ex_table = .;
+-		ARM_MMU_KEEP(*(__ex_table))
++		ARM_MMU_KEEP(KEEP(*(__ex_table)))
+ 		__stop___ex_table = .;
+ 	}
+ 
+@@ -83,7 +83,7 @@ SECTIONS
+ 	}
+ 	.init.arch.info : {
+ 		__arch_info_begin = .;
+-		*(.arch.info.init)
++		KEEP(*(.arch.info.init))
+ 		__arch_info_end = .;
+ 	}
+ 	.init.tagtable : {
+--- a/arch/arm/kernel/vmlinux.lds.S
++++ b/arch/arm/kernel/vmlinux.lds.S
+@@ -74,7 +74,7 @@ SECTIONS
+ 	. = ALIGN(4);
+ 	__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
+ 		__start___ex_table = .;
+-		ARM_MMU_KEEP(*(__ex_table))
++		ARM_MMU_KEEP(KEEP(*(__ex_table)))
+ 		__stop___ex_table = .;
+ 	}
+ 
+@@ -99,7 +99,7 @@ SECTIONS
+ 	}
+ 	.init.arch.info : {
+ 		__arch_info_begin = .;
+-		*(.arch.info.init)
++		KEEP(*(.arch.info.init))
+ 		__arch_info_end = .;
+ 	}
+ 	.init.tagtable : {
+@@ -116,7 +116,7 @@ SECTIONS
+ #endif
+ 	.init.pv_table : {
+ 		__pv_table_begin = .;
+-		*(.pv_table)
++		KEEP(*(.pv_table))
+ 		__pv_table_end = .;
+ 	}
+ 
+--- a/drivers/firmware/efi/libstub/Makefile
++++ b/drivers/firmware/efi/libstub/Makefile
+@@ -67,6 +67,10 @@ OBJECT_FILES_NON_STANDARD	:= y
+ # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+ KCOV_INSTRUMENT			:= n
+ 
++# The .data section would be renamed to .data.efistub, therefore, remove
++# `-fdata-sections` flag from KBUILD_CFLAGS_KERNEL
++KBUILD_CFLAGS_KERNEL := $(filter-out -fdata-sections, $(KBUILD_CFLAGS_KERNEL))
++
+ lib-y				:= efi-stub-helper.o gop.o secureboot.o tpm.o \
+ 				   file.o mem.o random.o randomalloc.o pci.o \
+ 				   skip_spaces.o lib-cmdline.o lib-ctype.o \

+ 51 - 0
target/linux/generic/backport-6.6/300-v6.7-arm64-swiotlb-Reduce-the-default-size-if-no-ZONE_DMA.patch

@@ -0,0 +1,51 @@
+From 65033574ade97afccba074d837fd269903a83a9a Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <[email protected]>
+Date: Thu, 5 Oct 2023 16:40:30 +0100
+Subject: [PATCH] arm64: swiotlb: Reduce the default size if no ZONE_DMA
+ bouncing needed
+
+With CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC enabled, the arm64 kernel still
+allocates the default SWIOTLB buffer (64MB) even if ZONE_DMA is disabled
+or all the RAM fits into this zone. However, this potentially wastes a
+non-negligible amount of memory on platforms with little RAM.
+
+Reduce the SWIOTLB size to 1MB per 1GB of RAM if only needed for
+kmalloc() buffer bouncing.
+
+Signed-off-by: Catalin Marinas <[email protected]>
+Suggested-by: Ross Burton <[email protected]>
+Cc: Ross Burton <[email protected]>
+Cc: Will Deacon <[email protected]>
+Reviewed-by: Robin Murphy <[email protected]>
+---
+ arch/arm64/mm/init.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -16,6 +16,7 @@
+ #include <linux/nodemask.h>
+ #include <linux/initrd.h>
+ #include <linux/gfp.h>
++#include <linux/math.h>
+ #include <linux/memblock.h>
+ #include <linux/sort.h>
+ #include <linux/of.h>
+@@ -493,8 +494,16 @@ void __init mem_init(void)
+ {
+ 	bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit);
+ 
+-	if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC))
++	if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && !swiotlb) {
++		/*
++		 * If no bouncing needed for ZONE_DMA, reduce the swiotlb
++		 * buffer for kmalloc() bouncing to 1MB per 1GB of RAM.
++		 */
++		unsigned long size =
++			DIV_ROUND_UP(memblock_phys_mem_size(), 1024);
++		swiotlb_adjust_size(min(swiotlb_size_or_default(), size));
+ 		swiotlb = true;
++	}
+ 
+ 	swiotlb_init(swiotlb, SWIOTLB_VERBOSE);
+ 

+ 161 - 0
target/linux/generic/backport-6.6/301-v6.9-kernel.h-removed-REPEAT_BYTE-from-kernel.h.patch

@@ -0,0 +1,161 @@
+From 66a5c40f60f5d88ad8d47ba6a4ba05892853fa1f Mon Sep 17 00:00:00 2001
+From: Tanzir Hasan <[email protected]>
+Date: Tue, 26 Dec 2023 18:00:00 +0000
+Subject: [PATCH] kernel.h: removed REPEAT_BYTE from kernel.h
+
+This patch creates wordpart.h and includes it in asm/word-at-a-time.h
+for all architectures. WORD_AT_A_TIME_CONSTANTS depends on kernel.h
+because of REPEAT_BYTE. Moving this to another header and including it
+where necessary allows us to not include the bloated kernel.h. Making
+this implicit dependency on REPEAT_BYTE explicit allows for later
+improvements in the lib/string.c inclusion list.
+
+Suggested-by: Al Viro <[email protected]>
+Suggested-by: Andy Shevchenko <[email protected]>
+Signed-off-by: Tanzir Hasan <[email protected]>
+Reviewed-by: Andy Shevchenko <[email protected]>
+Link: https://lore.kernel.org/r/[email protected]
+Signed-off-by: Kees Cook <[email protected]>
+---
+ arch/arm/include/asm/word-at-a-time.h     |  3 ++-
+ arch/arm64/include/asm/word-at-a-time.h   |  3 ++-
+ arch/powerpc/include/asm/word-at-a-time.h |  4 ++--
+ arch/riscv/include/asm/word-at-a-time.h   |  3 ++-
+ arch/s390/include/asm/word-at-a-time.h    |  3 ++-
+ arch/sh/include/asm/word-at-a-time.h      |  2 ++
+ arch/x86/include/asm/word-at-a-time.h     |  3 ++-
+ arch/x86/kvm/mmu/mmu.c                    |  1 +
+ fs/namei.c                                |  2 +-
+ include/asm-generic/word-at-a-time.h      |  3 ++-
+ include/linux/kernel.h                    |  8 --------
+ include/linux/wordpart.h                  | 13 +++++++++++++
+ 12 files changed, 31 insertions(+), 17 deletions(-)
+ create mode 100644 include/linux/wordpart.h
+
+--- a/arch/arm/include/asm/word-at-a-time.h
++++ b/arch/arm/include/asm/word-at-a-time.h
+@@ -8,7 +8,8 @@
+  * Little-endian word-at-a-time zero byte handling.
+  * Heavily based on the x86 algorithm.
+  */
+-#include <linux/kernel.h>
++#include <linux/bitops.h>
++#include <linux/wordpart.h>
+ 
+ struct word_at_a_time {
+ 	const unsigned long one_bits, high_bits;
+--- a/arch/arm64/include/asm/word-at-a-time.h
++++ b/arch/arm64/include/asm/word-at-a-time.h
+@@ -9,7 +9,8 @@
+ 
+ #ifndef __AARCH64EB__
+ 
+-#include <linux/kernel.h>
++#include <linux/bitops.h>
++#include <linux/wordpart.h>
+ 
+ struct word_at_a_time {
+ 	const unsigned long one_bits, high_bits;
+--- a/arch/powerpc/include/asm/word-at-a-time.h
++++ b/arch/powerpc/include/asm/word-at-a-time.h
+@@ -4,8 +4,8 @@
+ /*
+  * Word-at-a-time interfaces for PowerPC.
+  */
+-
+-#include <linux/kernel.h>
++#include <linux/bitops.h>
++#include <linux/wordpart.h>
+ #include <asm/asm-compat.h>
+ #include <asm/extable.h>
+ 
+--- a/arch/sh/include/asm/word-at-a-time.h
++++ b/arch/sh/include/asm/word-at-a-time.h
+@@ -5,6 +5,8 @@
+ #ifdef CONFIG_CPU_BIG_ENDIAN
+ # include <asm-generic/word-at-a-time.h>
+ #else
++#include <linux/bitops.h>
++#include <linux/wordpart.h>
+ /*
+  * Little-endian version cribbed from x86.
+  */
+--- a/arch/x86/include/asm/word-at-a-time.h
++++ b/arch/x86/include/asm/word-at-a-time.h
+@@ -2,7 +2,8 @@
+ #ifndef _ASM_WORD_AT_A_TIME_H
+ #define _ASM_WORD_AT_A_TIME_H
+ 
+-#include <linux/kernel.h>
++#include <linux/bitops.h>
++#include <linux/wordpart.h>
+ 
+ /*
+  * This is largely generic for little-endian machines, but the
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -47,6 +47,7 @@
+ #include <linux/kern_levels.h>
+ #include <linux/kstrtox.h>
+ #include <linux/kthread.h>
++#include <linux/wordpart.h>
+ 
+ #include <asm/page.h>
+ #include <asm/memtype.h>
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -17,8 +17,8 @@
+ 
+ #include <linux/init.h>
+ #include <linux/export.h>
+-#include <linux/kernel.h>
+ #include <linux/slab.h>
++#include <linux/wordpart.h>
+ #include <linux/fs.h>
+ #include <linux/filelock.h>
+ #include <linux/namei.h>
+--- a/include/asm-generic/word-at-a-time.h
++++ b/include/asm-generic/word-at-a-time.h
+@@ -2,7 +2,8 @@
+ #ifndef _ASM_WORD_AT_A_TIME_H
+ #define _ASM_WORD_AT_A_TIME_H
+ 
+-#include <linux/kernel.h>
++#include <linux/bitops.h>
++#include <linux/wordpart.h>
+ #include <asm/byteorder.h>
+ 
+ #ifdef __BIG_ENDIAN
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -38,14 +38,6 @@
+ 
+ #define STACK_MAGIC	0xdeadbeef
+ 
+-/**
+- * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
+- * @x: value to repeat
+- *
+- * NOTE: @x is not checked for > 0xff; larger values produce odd results.
+- */
+-#define REPEAT_BYTE(x)	((~0ul / 0xff) * (x))
+-
+ /* generic data direction definitions */
+ #define READ			0
+ #define WRITE			1
+--- /dev/null
++++ b/include/linux/wordpart.h
+@@ -0,0 +1,13 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++
++#ifndef _LINUX_WORDPART_H
++#define _LINUX_WORDPART_H
++/**
++ * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
++ * @x: value to repeat
++ *
++ * NOTE: @x is not checked for > 0xff; larger values produce odd results.
++ */
++#define REPEAT_BYTE(x)	((~0ul / 0xff) * (x))
++
++#endif // _LINUX_WORDPART_H

+ 107 - 0
target/linux/generic/backport-6.6/302-v6.9-kernel.h-Move-upper_-_bits-and-lower_-_bits-to-wordp.patch

@@ -0,0 +1,107 @@
+From adeb04362d74188c1e22ccb824b15a0a7b3de2f4 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <[email protected]>
+Date: Wed, 14 Feb 2024 19:26:32 +0200
+Subject: [PATCH] kernel.h: Move upper_*_bits() and lower_*_bits() to
+ wordpart.h
+
+The wordpart.h header is collecting APIs related to the handling
+parts of the word (usually in byte granularity). The upper_*_bits()
+and lower_*_bits() are good candidates to be moved to there.
+
+This helps to clean up header dependency hell with regard to kernel.h
+as the latter gathers completely unrelated stuff together and slows
+down compilation (especially when it's included into other header).
+
+Signed-off-by: Andy Shevchenko <[email protected]>
+Link: https://lore.kernel.org/r/[email protected]
+Reviewed-by: Randy Dunlap <[email protected]>
+Signed-off-by: Kees Cook <[email protected]>
+---
+ include/linux/kernel.h   | 30 ++----------------------------
+ include/linux/wordpart.h | 29 +++++++++++++++++++++++++++++
+ 2 files changed, 31 insertions(+), 28 deletions(-)
+
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -32,6 +32,8 @@
+ #include <linux/sprintf.h>
+ #include <linux/static_call_types.h>
+ #include <linux/instruction_pointer.h>
++#include <linux/wordpart.h>
++
+ #include <asm/byteorder.h>
+ 
+ #include <uapi/linux/kernel.h>
+@@ -57,34 +59,6 @@
+ }					\
+ )
+ 
+-/**
+- * upper_32_bits - return bits 32-63 of a number
+- * @n: the number we're accessing
+- *
+- * A basic shift-right of a 64- or 32-bit quantity.  Use this to suppress
+- * the "right shift count >= width of type" warning when that quantity is
+- * 32-bits.
+- */
+-#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+-
+-/**
+- * lower_32_bits - return bits 0-31 of a number
+- * @n: the number we're accessing
+- */
+-#define lower_32_bits(n) ((u32)((n) & 0xffffffff))
+-
+-/**
+- * upper_16_bits - return bits 16-31 of a number
+- * @n: the number we're accessing
+- */
+-#define upper_16_bits(n) ((u16)((n) >> 16))
+-
+-/**
+- * lower_16_bits - return bits 0-15 of a number
+- * @n: the number we're accessing
+- */
+-#define lower_16_bits(n) ((u16)((n) & 0xffff))
+-
+ struct completion;
+ struct user;
+ 
+--- a/include/linux/wordpart.h
++++ b/include/linux/wordpart.h
+@@ -2,6 +2,35 @@
+ 
+ #ifndef _LINUX_WORDPART_H
+ #define _LINUX_WORDPART_H
++
++/**
++ * upper_32_bits - return bits 32-63 of a number
++ * @n: the number we're accessing
++ *
++ * A basic shift-right of a 64- or 32-bit quantity.  Use this to suppress
++ * the "right shift count >= width of type" warning when that quantity is
++ * 32-bits.
++ */
++#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
++
++/**
++ * lower_32_bits - return bits 0-31 of a number
++ * @n: the number we're accessing
++ */
++#define lower_32_bits(n) ((u32)((n) & 0xffffffff))
++
++/**
++ * upper_16_bits - return bits 16-31 of a number
++ * @n: the number we're accessing
++ */
++#define upper_16_bits(n) ((u16)((n) >> 16))
++
++/**
++ * lower_16_bits - return bits 0-15 of a number
++ * @n: the number we're accessing
++ */
++#define lower_16_bits(n) ((u16)((n) & 0xffff))
++
+ /**
+  * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
+  * @x: value to repeat

+ 206 - 0
target/linux/generic/backport-6.6/310-v6.7-mips-kexec-fix-the-incorrect-ifdeffery-and-dependenc.patch

@@ -0,0 +1,206 @@
+From 8cd2accb71f5eb8e92d775fc1978d3779875c2e5 Mon Sep 17 00:00:00 2001
+From: Baoquan He <[email protected]>
+Date: Fri, 8 Dec 2023 15:30:34 +0800
+Subject: [PATCH] mips, kexec: fix the incorrect ifdeffery and dependency of
+ CONFIG_KEXEC
+
+The select of KEXEC for CRASH_DUMP in kernel/Kconfig.kexec will be
+dropped, then compiling errors will be triggered if below config items are
+set:
+
+===
+CONFIG_CRASH_CORE=y
+CONFIG_KEXEC_CORE=y
+CONFIG_CRASH_DUMP=y
+===
+
+--------------------------------------------------------------------
+mipsel-linux-ld: kernel/kexec_core.o: in function `kimage_free':
+kernel/kexec_core.c:(.text+0x2200): undefined reference to `machine_kexec_cleanup'
+mipsel-linux-ld: kernel/kexec_core.o: in function `__crash_kexec':
+kernel/kexec_core.c:(.text+0x2480): undefined reference to `machine_crash_shutdown'
+mipsel-linux-ld: kernel/kexec_core.c:(.text+0x2488): undefined reference to `machine_kexec'
+mipsel-linux-ld: kernel/kexec_core.o: in function `kernel_kexec':
+kernel/kexec_core.c:(.text+0x29b8): undefined reference to `machine_shutdown'
+mipsel-linux-ld: kernel/kexec_core.c:(.text+0x29c0): undefined reference to `machine_kexec'
+--------------------------------------------------------------------
+
+Here, change the dependency of building kexec_core related object files,
+and the ifdeffery in mips from CONFIG_KEXEC to CONFIG_KEXEC_CORE.
+
+Link: https://lkml.kernel.org/r/[email protected]
+Signed-off-by: Baoquan He <[email protected]>
+Reported-by: kernel test robot <[email protected]>
+Closes: https://lore.kernel.org/oe-kbuild-all/[email protected]/
+Cc: Eric DeVolder <[email protected]>
+Cc: Ignat Korchagin <[email protected]>
+Cc: Stephen Rothwell <[email protected]>
+Signed-off-by: Andrew Morton <[email protected]>
+---
+ arch/mips/cavium-octeon/smp.c   |  4 ++--
+ arch/mips/include/asm/kexec.h   |  2 +-
+ arch/mips/include/asm/smp-ops.h |  2 +-
+ arch/mips/include/asm/smp.h     |  2 +-
+ arch/mips/kernel/Makefile       |  2 +-
+ arch/mips/kernel/smp-bmips.c    |  4 ++--
+ arch/mips/kernel/smp-cps.c      | 10 +++++-----
+ arch/mips/loongson64/reset.c    |  4 ++--
+ arch/mips/loongson64/smp.c      |  2 +-
+ 9 files changed, 16 insertions(+), 16 deletions(-)
+
+--- a/arch/mips/cavium-octeon/smp.c
++++ b/arch/mips/cavium-octeon/smp.c
+@@ -422,7 +422,7 @@ static const struct plat_smp_ops octeon_
+ 	.cpu_disable		= octeon_cpu_disable,
+ 	.cpu_die		= octeon_cpu_die,
+ #endif
+-#ifdef CONFIG_KEXEC
++#ifdef CONFIG_KEXEC_CORE
+ 	.kexec_nonboot_cpu	= kexec_nonboot_cpu_jump,
+ #endif
+ };
+@@ -502,7 +502,7 @@ static const struct plat_smp_ops octeon_
+ 	.cpu_disable		= octeon_cpu_disable,
+ 	.cpu_die		= octeon_cpu_die,
+ #endif
+-#ifdef CONFIG_KEXEC
++#ifdef CONFIG_KEXEC_CORE
+ 	.kexec_nonboot_cpu	= kexec_nonboot_cpu_jump,
+ #endif
+ };
+--- a/arch/mips/include/asm/kexec.h
++++ b/arch/mips/include/asm/kexec.h
+@@ -31,7 +31,7 @@ static inline void crash_setup_regs(stru
+ 		prepare_frametrace(newregs);
+ }
+ 
+-#ifdef CONFIG_KEXEC
++#ifdef CONFIG_KEXEC_CORE
+ struct kimage;
+ extern unsigned long kexec_args[4];
+ extern int (*_machine_kexec_prepare)(struct kimage *);
+--- a/arch/mips/include/asm/smp-ops.h
++++ b/arch/mips/include/asm/smp-ops.h
+@@ -35,7 +35,7 @@ struct plat_smp_ops {
+ 	void (*cpu_die)(unsigned int cpu);
+ 	void (*cleanup_dead_cpu)(unsigned cpu);
+ #endif
+-#ifdef CONFIG_KEXEC
++#ifdef CONFIG_KEXEC_CORE
+ 	void (*kexec_nonboot_cpu)(void);
+ #endif
+ };
+--- a/arch/mips/include/asm/smp.h
++++ b/arch/mips/include/asm/smp.h
+@@ -93,7 +93,7 @@ static inline void __cpu_die(unsigned in
+ extern void __noreturn play_dead(void);
+ #endif
+ 
+-#ifdef CONFIG_KEXEC
++#ifdef CONFIG_KEXEC_CORE
+ static inline void kexec_nonboot_cpu(void)
+ {
+ 	extern const struct plat_smp_ops *mp_ops;	/* private */
+--- a/arch/mips/kernel/Makefile
++++ b/arch/mips/kernel/Makefile
+@@ -90,7 +90,7 @@ obj-$(CONFIG_GPIO_TXX9)		+= gpio_txx9.o
+ 
+ obj-$(CONFIG_RELOCATABLE)	+= relocate.o
+ 
+-obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o crash.o
++obj-$(CONFIG_KEXEC_CORE)	+= machine_kexec.o relocate_kernel.o crash.o
+ obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
+ obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
+ obj-$(CONFIG_EARLY_PRINTK_8250)	+= early_printk_8250.o
+--- a/arch/mips/kernel/smp-bmips.c
++++ b/arch/mips/kernel/smp-bmips.c
+@@ -434,7 +434,7 @@ const struct plat_smp_ops bmips43xx_smp_
+ 	.cpu_disable		= bmips_cpu_disable,
+ 	.cpu_die		= bmips_cpu_die,
+ #endif
+-#ifdef CONFIG_KEXEC
++#ifdef CONFIG_KEXEC_CORE
+ 	.kexec_nonboot_cpu	= kexec_nonboot_cpu_jump,
+ #endif
+ };
+@@ -451,7 +451,7 @@ const struct plat_smp_ops bmips5000_smp_
+ 	.cpu_disable		= bmips_cpu_disable,
+ 	.cpu_die		= bmips_cpu_die,
+ #endif
+-#ifdef CONFIG_KEXEC
++#ifdef CONFIG_KEXEC_CORE
+ 	.kexec_nonboot_cpu	= kexec_nonboot_cpu_jump,
+ #endif
+ };
+--- a/arch/mips/kernel/smp-cps.c
++++ b/arch/mips/kernel/smp-cps.c
+@@ -395,7 +395,7 @@ static void cps_smp_finish(void)
+ 	local_irq_enable();
+ }
+ 
+-#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
++#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE)
+ 
+ enum cpu_death {
+ 	CPU_DEATH_HALT,
+@@ -432,7 +432,7 @@ static void cps_shutdown_this_cpu(enum c
+ 	}
+ }
+ 
+-#ifdef CONFIG_KEXEC
++#ifdef CONFIG_KEXEC_CORE
+ 
+ static void cps_kexec_nonboot_cpu(void)
+ {
+@@ -442,9 +442,9 @@ static void cps_kexec_nonboot_cpu(void)
+ 		cps_shutdown_this_cpu(CPU_DEATH_POWER);
+ }
+ 
+-#endif /* CONFIG_KEXEC */
++#endif /* CONFIG_KEXEC_CORE */
+ 
+-#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC */
++#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC_CORE */
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
+ 
+@@ -613,7 +613,7 @@ static const struct plat_smp_ops cps_smp
+ 	.cpu_die		= cps_cpu_die,
+ 	.cleanup_dead_cpu	= cps_cleanup_dead_cpu,
+ #endif
+-#ifdef CONFIG_KEXEC
++#ifdef CONFIG_KEXEC_CORE
+ 	.kexec_nonboot_cpu	= cps_kexec_nonboot_cpu,
+ #endif
+ };
+--- a/arch/mips/loongson64/reset.c
++++ b/arch/mips/loongson64/reset.c
+@@ -39,7 +39,7 @@ static int firmware_poweroff(struct sys_
+ 	return NOTIFY_DONE;
+ }
+ 
+-#ifdef CONFIG_KEXEC
++#ifdef CONFIG_KEXEC_CORE
+ 
+ /* 0X80000000~0X80200000 is safe */
+ #define MAX_ARGS	64
+@@ -152,7 +152,7 @@ static int __init mips_reboot_setup(void
+ 				 firmware_poweroff, NULL);
+ 	}
+ 
+-#ifdef CONFIG_KEXEC
++#ifdef CONFIG_KEXEC_CORE
+ 	kexec_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL);
+ 	if (WARN_ON(!kexec_argv))
+ 		return -ENOMEM;
+--- a/arch/mips/loongson64/smp.c
++++ b/arch/mips/loongson64/smp.c
+@@ -883,7 +883,7 @@ const struct plat_smp_ops loongson3_smp_
+ 	.cpu_disable = loongson3_cpu_disable,
+ 	.cpu_die = loongson3_cpu_die,
+ #endif
+-#ifdef CONFIG_KEXEC
++#ifdef CONFIG_KEXEC_CORE
+ 	.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
+ #endif
+ };

+ 171 - 0
target/linux/generic/backport-6.6/320-v6.11-mips-bmips-rework-and-cache-CBR-addr-handling.patch

@@ -0,0 +1,171 @@
+From a5c05453a13ab324ad8719e8a23dfb6af01f3652 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Thu, 20 Jun 2024 17:26:42 +0200
+Subject: [PATCH 1/4] mips: bmips: rework and cache CBR addr handling
+
+Rework the handling of the CBR address and cache it. This address
+doesn't change and can be cached instead of reading the register every
+time.
+
+This is in preparation of permitting to tweak the CBR address in DT with
+broken SoC or bootloader.
+
+bmips_cbr_addr is defined in setup.c for each arch to keep compatibility
+with legacy brcm47xx/brcm63xx and generic BMIPS target.
+
+Acked-by: Florian Fainelli <[email protected]>
+Signed-off-by: Christian Marangi <[email protected]>
+Signed-off-by: Thomas Bogendoerfer <[email protected]>
+---
+ arch/mips/bcm47xx/prom.c      | 3 +++
+ arch/mips/bcm47xx/setup.c     | 4 ++++
+ arch/mips/bcm63xx/prom.c      | 3 +++
+ arch/mips/bcm63xx/setup.c     | 4 ++++
+ arch/mips/bmips/dma.c         | 2 +-
+ arch/mips/bmips/setup.c       | 7 ++++++-
+ arch/mips/include/asm/bmips.h | 1 +
+ arch/mips/kernel/smp-bmips.c  | 4 ++--
+ 8 files changed, 24 insertions(+), 4 deletions(-)
+
+--- a/arch/mips/bcm47xx/prom.c
++++ b/arch/mips/bcm47xx/prom.c
+@@ -32,6 +32,7 @@
+ #include <linux/ssb/ssb_driver_chipcommon.h>
+ #include <linux/ssb/ssb_regs.h>
+ #include <linux/smp.h>
++#include <asm/bmips.h>
+ #include <asm/bootinfo.h>
+ #include <bcm47xx.h>
+ #include <bcm47xx_board.h>
+@@ -109,6 +110,8 @@ static __init void prom_init_mem(void)
+ 
+ void __init prom_init(void)
+ {
++	/* Cache CBR addr before CPU/DMA setup */
++	bmips_cbr_addr = BMIPS_GET_CBR();
+ 	prom_init_mem();
+ 	setup_8250_early_printk_port(CKSEG1ADDR(BCM47XX_SERIAL_ADDR), 0, 0);
+ }
+--- a/arch/mips/bcm47xx/setup.c
++++ b/arch/mips/bcm47xx/setup.c
+@@ -37,6 +37,7 @@
+ #include <linux/ssb/ssb.h>
+ #include <linux/ssb/ssb_embedded.h>
+ #include <linux/bcma/bcma_soc.h>
++#include <asm/bmips.h>
+ #include <asm/bootinfo.h>
+ #include <asm/idle.h>
+ #include <asm/prom.h>
+@@ -45,6 +46,9 @@
+ #include <bcm47xx.h>
+ #include <bcm47xx_board.h>
+ 
++/* CBR addr doesn't change and we can cache it */
++void __iomem *bmips_cbr_addr __read_mostly;
++
+ union bcm47xx_bus bcm47xx_bus;
+ EXPORT_SYMBOL(bcm47xx_bus);
+ 
+--- a/arch/mips/bcm63xx/prom.c
++++ b/arch/mips/bcm63xx/prom.c
+@@ -22,6 +22,9 @@ void __init prom_init(void)
+ {
+ 	u32 reg, mask;
+ 
++	/* Cache CBR addr before CPU/DMA setup */
++	bmips_cbr_addr = BMIPS_GET_CBR();
++
+ 	bcm63xx_cpu_init();
+ 
+ 	/* stop any running watchdog */
+--- a/arch/mips/bcm63xx/setup.c
++++ b/arch/mips/bcm63xx/setup.c
+@@ -12,6 +12,7 @@
+ #include <linux/memblock.h>
+ #include <linux/ioport.h>
+ #include <linux/pm.h>
++#include <asm/bmips.h>
+ #include <asm/bootinfo.h>
+ #include <asm/time.h>
+ #include <asm/reboot.h>
+@@ -22,6 +23,9 @@
+ #include <bcm63xx_io.h>
+ #include <bcm63xx_gpio.h>
+ 
++/* CBR addr doesn't change and we can cache it */
++void __iomem *bmips_cbr_addr __read_mostly;
++
+ void bcm63xx_machine_halt(void)
+ {
+ 	pr_info("System halted\n");
+--- a/arch/mips/bmips/dma.c
++++ b/arch/mips/bmips/dma.c
+@@ -9,7 +9,7 @@ bool bmips_rac_flush_disable;
+ 
+ void arch_sync_dma_for_cpu_all(void)
+ {
+-	void __iomem *cbr = BMIPS_GET_CBR();
++	void __iomem *cbr = bmips_cbr_addr;
+ 	u32 cfg;
+ 
+ 	if (boot_cpu_type() != CPU_BMIPS3300 &&
+--- a/arch/mips/bmips/setup.c
++++ b/arch/mips/bmips/setup.c
+@@ -34,6 +34,9 @@
+ #define REG_BCM6328_OTP		((void __iomem *)CKSEG1ADDR(0x1000062c))
+ #define BCM6328_TP1_DISABLED	BIT(9)
+ 
++/* CBR addr doesn't change and we can cache it */
++void __iomem *bmips_cbr_addr __read_mostly;
++
+ extern bool bmips_rac_flush_disable;
+ 
+ static const unsigned long kbase = VMLINUX_LOAD_ADDRESS & 0xfff00000;
+@@ -111,7 +114,7 @@ static void bcm6358_quirks(void)
+ 	 * because the bootloader is not initializing it properly.
+ 	 */
+ 	bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31)) ||
+-				  !!BMIPS_GET_CBR();
++				  !!bmips_cbr_addr;
+ }
+ 
+ static void bcm6368_quirks(void)
+@@ -144,6 +147,8 @@ static void __init bmips_init_cfe(void)
+ 
+ void __init prom_init(void)
+ {
++	/* Cache CBR addr before CPU/DMA setup */
++	bmips_cbr_addr = BMIPS_GET_CBR();
+ 	bmips_init_cfe();
+ 	bmips_cpu_setup();
+ 	register_bmips_smp_ops();
+--- a/arch/mips/include/asm/bmips.h
++++ b/arch/mips/include/asm/bmips.h
+@@ -81,6 +81,7 @@ extern char bmips_smp_movevec[];
+ extern char bmips_smp_int_vec[];
+ extern char bmips_smp_int_vec_end[];
+ 
++extern void __iomem *bmips_cbr_addr;
+ extern int bmips_smp_enabled;
+ extern int bmips_cpu_offset;
+ extern cpumask_t bmips_booted_mask;
+--- a/arch/mips/kernel/smp-bmips.c
++++ b/arch/mips/kernel/smp-bmips.c
+@@ -518,7 +518,7 @@ static void bmips_set_reset_vec(int cpu,
+ 		info.val = val;
+ 		bmips_set_reset_vec_remote(&info);
+ 	} else {
+-		void __iomem *cbr = BMIPS_GET_CBR();
++		void __iomem *cbr = bmips_cbr_addr;
+ 
+ 		if (cpu == 0)
+ 			__raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_0);
+@@ -591,7 +591,7 @@ asmlinkage void __weak plat_wired_tlb_se
+ 
+ void bmips_cpu_setup(void)
+ {
+-	void __iomem __maybe_unused *cbr = BMIPS_GET_CBR();
++	void __iomem __maybe_unused *cbr = bmips_cbr_addr;
+ 	u32 __maybe_unused cfg;
+ 
+ 	switch (current_cpu_type()) {

+ 111 - 0
target/linux/generic/backport-6.6/321-v6.11-mips-bmips-setup-make-CBR-address-configurable.patch

@@ -0,0 +1,111 @@
+From b95b30e50aed225d26e20737873ae2404941901c Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Thu, 20 Jun 2024 17:26:44 +0200
+Subject: [PATCH 3/4] mips: bmips: setup: make CBR address configurable
+
+Add support to provide CBR address from DT to handle broken
+SoC/Bootloader that doesn't correctly init it. This permits to use the
+RAC flush even in these condition.
+
+To provide a CBR address from DT, the property "brcm,bmips-cbr-reg"
+needs to be set in the "cpus" node. On DT init, this property presence
+will be checked and will set the bmips_cbr_addr value accordingly. Also
+bmips_rac_flush_disable will be set to false as RAC flush can be
+correctly supported.
+
+The CBR address from DT will overwrite the cached one and the
+one set in the CBR register will be ignored.
+
+Also the DT CBR address is validated on being outside DRAM window.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Acked-by: Florian Fainelli <[email protected]>
+Signed-off-by: Thomas Bogendoerfer <[email protected]>
+---
+ arch/mips/bcm47xx/setup.c |  6 +++++-
+ arch/mips/bcm63xx/setup.c |  6 +++++-
+ arch/mips/bmips/setup.c   | 30 ++++++++++++++++++++++++++++--
+ 3 files changed, 38 insertions(+), 4 deletions(-)
+
+--- a/arch/mips/bcm47xx/setup.c
++++ b/arch/mips/bcm47xx/setup.c
+@@ -46,7 +46,11 @@
+ #include <bcm47xx.h>
+ #include <bcm47xx_board.h>
+ 
+-/* CBR addr doesn't change and we can cache it */
++/*
++ * CBR addr doesn't change and we can cache it.
++ * For broken SoC/Bootloader CBR addr might also be provided via DT
++ * with "brcm,bmips-cbr-reg" in the "cpus" node.
++ */
+ void __iomem *bmips_cbr_addr __read_mostly;
+ 
+ union bcm47xx_bus bcm47xx_bus;
+--- a/arch/mips/bcm63xx/setup.c
++++ b/arch/mips/bcm63xx/setup.c
+@@ -23,7 +23,11 @@
+ #include <bcm63xx_io.h>
+ #include <bcm63xx_gpio.h>
+ 
+-/* CBR addr doesn't change and we can cache it */
++/*
++ * CBR addr doesn't change and we can cache it.
++ * For broken SoC/Bootloader CBR addr might also be provided via DT
++ * with "brcm,bmips-cbr-reg" in the "cpus" node.
++ */
+ void __iomem *bmips_cbr_addr __read_mostly;
+ 
+ void bcm63xx_machine_halt(void)
+--- a/arch/mips/bmips/setup.c
++++ b/arch/mips/bmips/setup.c
+@@ -34,7 +34,11 @@
+ #define REG_BCM6328_OTP		((void __iomem *)CKSEG1ADDR(0x1000062c))
+ #define BCM6328_TP1_DISABLED	BIT(9)
+ 
+-/* CBR addr doesn't change and we can cache it */
++/*
++ * CBR addr doesn't change and we can cache it.
++ * For broken SoC/Bootloader CBR addr might also be provided via DT
++ * with "brcm,bmips-cbr-reg" in the "cpus" node.
++ */
+ void __iomem *bmips_cbr_addr __read_mostly;
+ 
+ extern bool bmips_rac_flush_disable;
+@@ -208,13 +212,35 @@ void __init plat_mem_setup(void)
+ void __init device_tree_init(void)
+ {
+ 	struct device_node *np;
++	u32 addr;
+ 
+ 	unflatten_and_copy_device_tree();
+ 
+ 	/* Disable SMP boot unless both CPUs are listed in DT and !disabled */
+ 	np = of_find_node_by_name(NULL, "cpus");
+-	if (np && of_get_available_child_count(np) <= 1)
++	if (!np)
++		return;
++
++	if (of_get_available_child_count(np) <= 1)
+ 		bmips_smp_enabled = 0;
++
++	/* Check if DT provide a CBR address */
++	if (of_property_read_u32(np, "brcm,bmips-cbr-reg", &addr))
++		goto exit;
++
++	/* Make sure CBR address is outside DRAM window */
++	if (addr >= (u32)memblock_start_of_DRAM() &&
++	    addr < (u32)memblock_end_of_DRAM()) {
++		WARN(1, "DT CBR %x inside DRAM window. Ignoring DT CBR.\n",
++		     addr);
++		goto exit;
++	}
++
++	bmips_cbr_addr = (void __iomem *)addr;
++	/* Since CBR is provided by DT, enable RAC flush */
++	bmips_rac_flush_disable = false;
++
++exit:
+ 	of_node_put(np);
+ }
+ 

+ 57 - 0
target/linux/generic/backport-6.6/322-v6.11-mips-bmips-enable-RAC-on-BMIPS4350.patch

@@ -0,0 +1,57 @@
+From 04f38d1a4db017f17e82442727b91ce03dd72759 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Daniel=20Gonz=C3=A1lez=20Cabanelas?= <[email protected]>
+Date: Thu, 20 Jun 2024 17:26:45 +0200
+Subject: [PATCH 4/4] mips: bmips: enable RAC on BMIPS4350
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The data RAC is left disabled by the bootloader in some SoCs, at least in
+the core it boots from.
+Enabling this feature increases the performance up to +30% depending on the
+task.
+
+Signed-off-by: Daniel González Cabanelas <[email protected]>
+Signed-off-by: Álvaro Fernández Rojas <[email protected]>
+[ rework code and reduce code duplication ]
+Acked-by: Florian Fainelli <[email protected]>
+Signed-off-by: Christian Marangi <[email protected]>
+Signed-off-by: Thomas Bogendoerfer <[email protected]>
+---
+ arch/mips/kernel/smp-bmips.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/arch/mips/kernel/smp-bmips.c
++++ b/arch/mips/kernel/smp-bmips.c
+@@ -592,6 +592,7 @@ asmlinkage void __weak plat_wired_tlb_se
+ void bmips_cpu_setup(void)
+ {
+ 	void __iomem __maybe_unused *cbr = bmips_cbr_addr;
++	u32 __maybe_unused rac_addr;
+ 	u32 __maybe_unused cfg;
+ 
+ 	switch (current_cpu_type()) {
+@@ -620,6 +621,23 @@ void bmips_cpu_setup(void)
+ 		__raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
+ 		break;
+ 
++	case CPU_BMIPS4350:
++		rac_addr = BMIPS_RAC_CONFIG_1;
++
++		if (!(read_c0_brcm_cmt_local() & (1 << 31)))
++			rac_addr = BMIPS_RAC_CONFIG;
++
++		/* Enable data RAC */
++		cfg = __raw_readl(cbr + rac_addr);
++		__raw_writel(cfg | 0xf, cbr + rac_addr);
++		__raw_readl(cbr + rac_addr);
++
++		/* Flush stale data out of the readahead cache */
++		cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
++		__raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
++		__raw_readl(cbr + BMIPS_RAC_CONFIG);
++		break;
++
+ 	case CPU_BMIPS4380:
+ 		/* CBG workaround for early BMIPS4380 CPUs */
+ 		switch (read_c0_prid()) {

+ 36 - 0
target/linux/generic/backport-6.6/400-v6.9-mtd-rawnand-brcmnand-Support-write-protection-settin.patch

@@ -0,0 +1,36 @@
+From 8e7daa85641c9559c113f6b217bdc923397de77c Mon Sep 17 00:00:00 2001
+From: William Zhang <[email protected]>
+Date: Thu, 22 Feb 2024 19:47:58 -0800
+Subject: [PATCH] mtd: rawnand: brcmnand: Support write protection setting from
+ dts
+
+The write protection feature is controlled by the module parameter wp_on
+with default set to enabled. But not all the board use this feature
+especially in BCMBCA broadband board. And module parameter is not
+sufficient as different board can have different option.  Add a device
+tree property and allow this feature to be configured through the board
+dts on per board basis.
+
+Signed-off-by: William Zhang <[email protected]>
+Reviewed-by: Florian Fainelli <[email protected]>
+Reviewed-by: Kamal Dasu <[email protected]>
+Reviewed-by: David Regan <[email protected]>
+Signed-off-by: Miquel Raynal <[email protected]>
+Link: https://lore.kernel.org/linux-mtd/[email protected]
+---
+ drivers/mtd/nand/raw/brcmnand/brcmnand.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
++++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+@@ -3194,6 +3194,10 @@ int brcmnand_probe(struct platform_devic
+ 	/* Disable XOR addressing */
+ 	brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
+ 
++	/* Check if the board connects the WP pin */
++	if (of_property_read_bool(dn, "brcm,wp-not-connected"))
++		wp_on = 0;
++
+ 	if (ctrl->features & BRCMNAND_HAS_WP) {
+ 		/* Permanently disable write protection */
+ 		if (wp_on == 2)

+ 123 - 0
target/linux/generic/backport-6.6/401-v6.9-dt-bindings-mtd-add-basic-bindings-for-UBI.patch

@@ -0,0 +1,123 @@
+From 25d88bfd35bac3196eafa666e3b05033b46ffa21 Mon Sep 17 00:00:00 2001
+From: Daniel Golle <[email protected]>
+Date: Tue, 19 Dec 2023 02:32:00 +0000
+Subject: [PATCH 1/8] dt-bindings: mtd: add basic bindings for UBI
+
+Add basic bindings for UBI devices and volumes.
+
+Signed-off-by: Daniel Golle <[email protected]>
+Reviewed-by: Rob Herring <[email protected]>
+Signed-off-by: Richard Weinberger <[email protected]>
+---
+ .../bindings/mtd/partitions/linux,ubi.yaml    | 65 +++++++++++++++++++
+ .../bindings/mtd/partitions/ubi-volume.yaml   | 35 ++++++++++
+ 2 files changed, 100 insertions(+)
+ create mode 100644 Documentation/devicetree/bindings/mtd/partitions/linux,ubi.yaml
+ create mode 100644 Documentation/devicetree/bindings/mtd/partitions/ubi-volume.yaml
+
+--- /dev/null
++++ b/Documentation/devicetree/bindings/mtd/partitions/linux,ubi.yaml
+@@ -0,0 +1,65 @@
++# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
++%YAML 1.2
++---
++$id: http://devicetree.org/schemas/mtd/partitions/linux,ubi.yaml#
++$schema: http://devicetree.org/meta-schemas/core.yaml#
++
++title: Unsorted Block Images
++
++description: |
++  UBI ("Unsorted Block Images") is a volume management system for raw
++  flash devices which manages multiple logical volumes on a single
++  physical flash device and spreads the I/O load (i.e wear-leveling)
++  across the whole flash chip.
++
++maintainers:
++  - Daniel Golle <[email protected]>
++
++allOf:
++  - $ref: partition.yaml#
++
++properties:
++  compatible:
++    const: linux,ubi
++
++  volumes:
++    type: object
++    description: UBI Volumes
++
++    patternProperties:
++      "^ubi-volume-.*$":
++        $ref: /schemas/mtd/partitions/ubi-volume.yaml#
++
++    unevaluatedProperties: false
++
++required:
++  - compatible
++
++unevaluatedProperties: false
++
++examples:
++  - |
++    partitions {
++        compatible = "fixed-partitions";
++        #address-cells = <1>;
++        #size-cells = <1>;
++
++        partition@0 {
++            reg = <0x0 0x100000>;
++            label = "bootloader";
++            read-only;
++        };
++
++        partition@100000 {
++            reg = <0x100000 0x1ff00000>;
++            label = "ubi";
++            compatible = "linux,ubi";
++
++            volumes {
++                ubi-volume-caldata {
++                    volid = <2>;
++                    volname = "rf";
++                };
++            };
++        };
++    };
+--- /dev/null
++++ b/Documentation/devicetree/bindings/mtd/partitions/ubi-volume.yaml
+@@ -0,0 +1,35 @@
++# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
++%YAML 1.2
++---
++$id: http://devicetree.org/schemas/mtd/partitions/ubi-volume.yaml#
++$schema: http://devicetree.org/meta-schemas/core.yaml#
++
++title: UBI volume
++
++description: |
++  This binding describes a single UBI volume. Volumes can be matches either
++  by their ID or their name, or both.
++
++maintainers:
++  - Daniel Golle <[email protected]>
++
++properties:
++  volid:
++    $ref: /schemas/types.yaml#/definitions/uint32
++    description:
++      Match UBI volume ID
++
++  volname:
++    $ref: /schemas/types.yaml#/definitions/string
++    description:
++      Match UBI volume ID
++
++anyOf:
++  - required:
++      - volid
++
++  - required:
++      - volname
++
++# This is a generic file other binding inherit from and extend
++additionalProperties: true

+ 50 - 0
target/linux/generic/backport-6.6/402-v6.9-dt-bindings-mtd-ubi-volume-allow-UBI-volumes-to-prov.patch

@@ -0,0 +1,50 @@
+From 95b113222b5164ac0887eb5c514ff3970a0136f0 Mon Sep 17 00:00:00 2001
+From: Daniel Golle <[email protected]>
+Date: Tue, 19 Dec 2023 02:32:11 +0000
+Subject: [PATCH 2/8] dt-bindings: mtd: ubi-volume: allow UBI volumes to
+ provide NVMEM
+
+UBI volumes may be used to contain NVMEM bits, typically device MAC
+addresses or wireless radio calibration data.
+
+Signed-off-by: Daniel Golle <[email protected]>
+Reviewed-by: Rob Herring <[email protected]>
+Signed-off-by: Richard Weinberger <[email protected]>
+---
+ .../devicetree/bindings/mtd/partitions/linux,ubi.yaml  | 10 ++++++++++
+ .../devicetree/bindings/mtd/partitions/ubi-volume.yaml |  5 +++++
+ 2 files changed, 15 insertions(+)
+
+--- a/Documentation/devicetree/bindings/mtd/partitions/linux,ubi.yaml
++++ b/Documentation/devicetree/bindings/mtd/partitions/linux,ubi.yaml
+@@ -59,6 +59,16 @@ examples:
+                 ubi-volume-caldata {
+                     volid = <2>;
+                     volname = "rf";
++
++                    nvmem-layout {
++                        compatible = "fixed-layout";
++                        #address-cells = <1>;
++                        #size-cells = <1>;
++
++                        eeprom@0 {
++                            reg = <0x0 0x1000>;
++                        };
++                    };
+                 };
+             };
+         };
+--- a/Documentation/devicetree/bindings/mtd/partitions/ubi-volume.yaml
++++ b/Documentation/devicetree/bindings/mtd/partitions/ubi-volume.yaml
+@@ -24,6 +24,11 @@ properties:
+     description:
+       Match UBI volume ID
+ 
++  nvmem-layout:
++    $ref: /schemas/nvmem/layouts/nvmem-layout.yaml#
++    description:
++      This container may reference an NVMEM layout parser.
++
+ anyOf:
+   - required:
+       - volid

+ 285 - 0
target/linux/generic/backport-6.6/403-v6.9-mtd-ubi-block-use-notifier-to-create-ubiblock-from-p.patch

@@ -0,0 +1,285 @@
+From 2bba1cdcfcd2907d0696cc0139f1bd078d36ee81 Mon Sep 17 00:00:00 2001
+From: Daniel Golle <[email protected]>
+Date: Tue, 19 Dec 2023 02:32:35 +0000
+Subject: [PATCH 3/8] mtd: ubi: block: use notifier to create ubiblock from
+ parameter
+
+Use UBI_VOLUME_ADDED notification to create ubiblock device specified
+on kernel cmdline or module parameter.
+This makes thing more simple and has the advantage that ubiblock devices
+on volumes which are not present at the time the ubi module is probed
+will still be created.
+
+Suggested-by: Zhihao Cheng <[email protected]>
+Signed-off-by: Daniel Golle <[email protected]>
+Signed-off-by: Richard Weinberger <[email protected]>
+---
+ drivers/mtd/ubi/block.c | 136 ++++++++++++++++++++--------------------
+ drivers/mtd/ubi/kapi.c  |  54 +++++++++++-----
+ drivers/mtd/ubi/ubi.h   |   1 +
+ 3 files changed, 106 insertions(+), 85 deletions(-)
+
+--- a/drivers/mtd/ubi/block.c
++++ b/drivers/mtd/ubi/block.c
+@@ -65,10 +65,10 @@ struct ubiblock_pdu {
+ };
+ 
+ /* Numbers of elements set in the @ubiblock_param array */
+-static int ubiblock_devs __initdata;
++static int ubiblock_devs;
+ 
+ /* MTD devices specification parameters */
+-static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
++static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES];
+ 
+ struct ubiblock {
+ 	struct ubi_volume_desc *desc;
+@@ -532,6 +532,70 @@ static int ubiblock_resize(struct ubi_vo
+ 	return 0;
+ }
+ 
++static bool
++match_volume_desc(struct ubi_volume_info *vi, const char *name, int ubi_num, int vol_id)
++{
++	int err, len, cur_ubi_num, cur_vol_id;
++
++	if (ubi_num == -1) {
++		/* No ubi num, name must be a vol device path */
++		err = ubi_get_num_by_path(name, &cur_ubi_num, &cur_vol_id);
++		if (err || vi->ubi_num != cur_ubi_num || vi->vol_id != cur_vol_id)
++			return false;
++
++		return true;
++	}
++
++	if (vol_id == -1) {
++		/* Got ubi_num, but no vol_id, name must be volume name */
++		if (vi->ubi_num != ubi_num)
++			return false;
++
++		len = strnlen(name, UBI_VOL_NAME_MAX + 1);
++		if (len < 1 || vi->name_len != len)
++			return false;
++
++		if (strcmp(name, vi->name))
++			return false;
++
++		return true;
++	}
++
++	if (vi->ubi_num != ubi_num)
++		return false;
++
++	if (vi->vol_id != vol_id)
++		return false;
++
++	return true;
++}
++
++static void
++ubiblock_create_from_param(struct ubi_volume_info *vi)
++{
++	int i, ret = 0;
++	struct ubiblock_param *p;
++
++	/*
++	 * Iterate over ubiblock cmdline parameters. If a parameter matches the
++	 * newly added volume create the ubiblock device for it.
++	 */
++	for (i = 0; i < ubiblock_devs; i++) {
++		p = &ubiblock_param[i];
++
++		if (!match_volume_desc(vi, p->name, p->ubi_num, p->vol_id))
++			continue;
++
++		ret = ubiblock_create(vi);
++		if (ret) {
++			pr_err(
++			       "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
++			       vi->name, p->ubi_num, p->vol_id, ret);
++		}
++		break;
++	}
++}
++
+ static int ubiblock_notify(struct notifier_block *nb,
+ 			 unsigned long notification_type, void *ns_ptr)
+ {
+@@ -539,10 +603,7 @@ static int ubiblock_notify(struct notifi
+ 
+ 	switch (notification_type) {
+ 	case UBI_VOLUME_ADDED:
+-		/*
+-		 * We want to enforce explicit block device creation for
+-		 * volumes, so when a volume is added we do nothing.
+-		 */
++		ubiblock_create_from_param(&nt->vi);
+ 		break;
+ 	case UBI_VOLUME_REMOVED:
+ 		ubiblock_remove(&nt->vi);
+@@ -568,56 +629,6 @@ static struct notifier_block ubiblock_no
+ 	.notifier_call = ubiblock_notify,
+ };
+ 
+-static struct ubi_volume_desc * __init
+-open_volume_desc(const char *name, int ubi_num, int vol_id)
+-{
+-	if (ubi_num == -1)
+-		/* No ubi num, name must be a vol device path */
+-		return ubi_open_volume_path(name, UBI_READONLY);
+-	else if (vol_id == -1)
+-		/* No vol_id, must be vol_name */
+-		return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
+-	else
+-		return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
+-}
+-
+-static void __init ubiblock_create_from_param(void)
+-{
+-	int i, ret = 0;
+-	struct ubiblock_param *p;
+-	struct ubi_volume_desc *desc;
+-	struct ubi_volume_info vi;
+-
+-	/*
+-	 * If there is an error creating one of the ubiblocks, continue on to
+-	 * create the following ubiblocks. This helps in a circumstance where
+-	 * the kernel command-line specifies multiple block devices and some
+-	 * may be broken, but we still want the working ones to come up.
+-	 */
+-	for (i = 0; i < ubiblock_devs; i++) {
+-		p = &ubiblock_param[i];
+-
+-		desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
+-		if (IS_ERR(desc)) {
+-			pr_err(
+-			       "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
+-			       p->ubi_num, p->vol_id, PTR_ERR(desc));
+-			continue;
+-		}
+-
+-		ubi_get_volume_info(desc, &vi);
+-		ubi_close_volume(desc);
+-
+-		ret = ubiblock_create(&vi);
+-		if (ret) {
+-			pr_err(
+-			       "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
+-			       vi.name, p->ubi_num, p->vol_id, ret);
+-			continue;
+-		}
+-	}
+-}
+-
+ static void ubiblock_remove_all(void)
+ {
+ 	struct ubiblock *next;
+@@ -643,18 +654,7 @@ int __init ubiblock_init(void)
+ 	if (ubiblock_major < 0)
+ 		return ubiblock_major;
+ 
+-	/*
+-	 * Attach block devices from 'block=' module param.
+-	 * Even if one block device in the param list fails to come up,
+-	 * still allow the module to load and leave any others up.
+-	 */
+-	ubiblock_create_from_param();
+-
+-	/*
+-	 * Block devices are only created upon user requests, so we ignore
+-	 * existing volumes.
+-	 */
+-	ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
++	ret = ubi_register_volume_notifier(&ubiblock_notifier, 0);
+ 	if (ret)
+ 		goto err_unreg;
+ 	return 0;
+--- a/drivers/mtd/ubi/kapi.c
++++ b/drivers/mtd/ubi/kapi.c
+@@ -280,6 +280,41 @@ struct ubi_volume_desc *ubi_open_volume_
+ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
+ 
+ /**
++ * ubi_get_num_by_path - get UBI device and volume number from device path
++ * @pathname: volume character device node path
++ * @ubi_num: pointer to UBI device number to be set
++ * @vol_id: pointer to UBI volume ID to be set
++ *
++ * Returns 0 on success and sets ubi_num and vol_id, returns error otherwise.
++ */
++int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id)
++{
++	int error;
++	struct path path;
++	struct kstat stat;
++
++	error = kern_path(pathname, LOOKUP_FOLLOW, &path);
++	if (error)
++		return error;
++
++	error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
++	path_put(&path);
++	if (error)
++		return error;
++
++	if (!S_ISCHR(stat.mode))
++		return -EINVAL;
++
++	*ubi_num = ubi_major2num(MAJOR(stat.rdev));
++	*vol_id = MINOR(stat.rdev) - 1;
++
++	if (*vol_id < 0 || *ubi_num < 0)
++		return -ENODEV;
++
++	return 0;
++}
++
++/**
+  * ubi_open_volume_path - open UBI volume by its character device node path.
+  * @pathname: volume character device node path
+  * @mode: open mode
+@@ -290,32 +325,17 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
+ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
+ {
+ 	int error, ubi_num, vol_id;
+-	struct path path;
+-	struct kstat stat;
+ 
+ 	dbg_gen("open volume %s, mode %d", pathname, mode);
+ 
+ 	if (!pathname || !*pathname)
+ 		return ERR_PTR(-EINVAL);
+ 
+-	error = kern_path(pathname, LOOKUP_FOLLOW, &path);
+-	if (error)
+-		return ERR_PTR(error);
+-
+-	error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
+-	path_put(&path);
++	error = ubi_get_num_by_path(pathname, &ubi_num, &vol_id);
+ 	if (error)
+ 		return ERR_PTR(error);
+ 
+-	if (!S_ISCHR(stat.mode))
+-		return ERR_PTR(-EINVAL);
+-
+-	ubi_num = ubi_major2num(MAJOR(stat.rdev));
+-	vol_id = MINOR(stat.rdev) - 1;
+-
+-	if (vol_id >= 0 && ubi_num >= 0)
+-		return ubi_open_volume(ubi_num, vol_id, mode);
+-	return ERR_PTR(-ENODEV);
++	return ubi_open_volume(ubi_num, vol_id, mode);
+ }
+ EXPORT_SYMBOL_GPL(ubi_open_volume_path);
+ 
+--- a/drivers/mtd/ubi/ubi.h
++++ b/drivers/mtd/ubi/ubi.h
+@@ -956,6 +956,7 @@ void ubi_free_internal_volumes(struct ub
+ void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
+ void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
+ 			    struct ubi_volume_info *vi);
++int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id);
+ /* scan.c */
+ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ 		      int pnum, const struct ubi_vid_hdr *vid_hdr);

+ 205 - 0
target/linux/generic/backport-6.6/404-v6.9-mtd-ubi-attach-from-device-tree.patch

@@ -0,0 +1,205 @@
+From 6e331888643887ce85657527bc03f97d46235e71 Mon Sep 17 00:00:00 2001
+From: Daniel Golle <[email protected]>
+Date: Tue, 19 Dec 2023 02:33:14 +0000
+Subject: [PATCH 4/8] mtd: ubi: attach from device tree
+
+Introduce device tree compatible 'linux,ubi' and attach compatible MTD
+devices using the MTD add notifier. This is needed for a UBI device to
+be available early at boot (and not only after late_initcall), so
+volumes on them can be used eg. as NVMEM providers for other drivers.
+
+Signed-off-by: Daniel Golle <[email protected]>
+Signed-off-by: Richard Weinberger <[email protected]>
+---
+ drivers/mtd/ubi/build.c | 135 ++++++++++++++++++++++++++++------------
+ 1 file changed, 96 insertions(+), 39 deletions(-)
+
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -27,6 +27,7 @@
+ #include <linux/log2.h>
+ #include <linux/kthread.h>
+ #include <linux/kernel.h>
++#include <linux/of.h>
+ #include <linux/slab.h>
+ #include <linux/major.h>
+ #include "ubi.h"
+@@ -1214,43 +1215,43 @@ static struct mtd_info * __init open_mtd
+ 	return mtd;
+ }
+ 
+-static int __init ubi_init(void)
++static void ubi_notify_add(struct mtd_info *mtd)
+ {
+-	int err, i, k;
++	struct device_node *np = mtd_get_of_node(mtd);
++	int err;
+ 
+-	/* Ensure that EC and VID headers have correct size */
+-	BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
+-	BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
++	if (!of_device_is_compatible(np, "linux,ubi"))
++		return;
+ 
+-	if (mtd_devs > UBI_MAX_DEVICES) {
+-		pr_err("UBI error: too many MTD devices, maximum is %d\n",
+-		       UBI_MAX_DEVICES);
+-		return -EINVAL;
+-	}
++	/*
++	 * we are already holding &mtd_table_mutex, but still need
++	 * to bump refcount
++	 */
++	err = __get_mtd_device(mtd);
++	if (err)
++		return;
+ 
+-	/* Create base sysfs directory and sysfs files */
+-	err = class_register(&ubi_class);
++	/* called while holding mtd_table_mutex */
++	mutex_lock_nested(&ubi_devices_mutex, SINGLE_DEPTH_NESTING);
++	err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 0, 0, false);
++	mutex_unlock(&ubi_devices_mutex);
+ 	if (err < 0)
+-		return err;
+-
+-	err = misc_register(&ubi_ctrl_cdev);
+-	if (err) {
+-		pr_err("UBI error: cannot register device\n");
+-		goto out;
+-	}
++		__put_mtd_device(mtd);
++}
+ 
+-	ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
+-					      sizeof(struct ubi_wl_entry),
+-					      0, 0, NULL);
+-	if (!ubi_wl_entry_slab) {
+-		err = -ENOMEM;
+-		goto out_dev_unreg;
+-	}
++static void ubi_notify_remove(struct mtd_info *mtd)
++{
++	/* do nothing for now */
++}
+ 
+-	err = ubi_debugfs_init();
+-	if (err)
+-		goto out_slab;
++static struct mtd_notifier ubi_mtd_notifier = {
++	.add = ubi_notify_add,
++	.remove = ubi_notify_remove,
++};
+ 
++static int __init ubi_init_attach(void)
++{
++	int err, i, k;
+ 
+ 	/* Attach MTD devices */
+ 	for (i = 0; i < mtd_devs; i++) {
+@@ -1298,25 +1299,79 @@ static int __init ubi_init(void)
+ 		}
+ 	}
+ 
++	return 0;
++
++out_detach:
++	for (k = 0; k < i; k++)
++		if (ubi_devices[k]) {
++			mutex_lock(&ubi_devices_mutex);
++			ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
++			mutex_unlock(&ubi_devices_mutex);
++		}
++	return err;
++}
++#ifndef CONFIG_MTD_UBI_MODULE
++late_initcall(ubi_init_attach);
++#endif
++
++static int __init ubi_init(void)
++{
++	int err;
++
++	/* Ensure that EC and VID headers have correct size */
++	BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
++	BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
++
++	if (mtd_devs > UBI_MAX_DEVICES) {
++		pr_err("UBI error: too many MTD devices, maximum is %d\n",
++		       UBI_MAX_DEVICES);
++		return -EINVAL;
++	}
++
++	/* Create base sysfs directory and sysfs files */
++	err = class_register(&ubi_class);
++	if (err < 0)
++		return err;
++
++	err = misc_register(&ubi_ctrl_cdev);
++	if (err) {
++		pr_err("UBI error: cannot register device\n");
++		goto out;
++	}
++
++	ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
++					      sizeof(struct ubi_wl_entry),
++					      0, 0, NULL);
++	if (!ubi_wl_entry_slab) {
++		err = -ENOMEM;
++		goto out_dev_unreg;
++	}
++
++	err = ubi_debugfs_init();
++	if (err)
++		goto out_slab;
++
+ 	err = ubiblock_init();
+ 	if (err) {
+ 		pr_err("UBI error: block: cannot initialize, error %d\n", err);
+ 
+ 		/* See comment above re-ubi_is_module(). */
+ 		if (ubi_is_module())
+-			goto out_detach;
++			goto out_slab;
++	}
++
++	register_mtd_user(&ubi_mtd_notifier);
++
++	if (ubi_is_module()) {
++		err = ubi_init_attach();
++		if (err)
++			goto out_mtd_notifier;
+ 	}
+ 
+ 	return 0;
+ 
+-out_detach:
+-	for (k = 0; k < i; k++)
+-		if (ubi_devices[k]) {
+-			mutex_lock(&ubi_devices_mutex);
+-			ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
+-			mutex_unlock(&ubi_devices_mutex);
+-		}
+-	ubi_debugfs_exit();
++out_mtd_notifier:
++	unregister_mtd_user(&ubi_mtd_notifier);
+ out_slab:
+ 	kmem_cache_destroy(ubi_wl_entry_slab);
+ out_dev_unreg:
+@@ -1326,13 +1381,15 @@ out:
+ 	pr_err("UBI error: cannot initialize UBI, error %d\n", err);
+ 	return err;
+ }
+-late_initcall(ubi_init);
++device_initcall(ubi_init);
++
+ 
+ static void __exit ubi_exit(void)
+ {
+ 	int i;
+ 
+ 	ubiblock_exit();
++	unregister_mtd_user(&ubi_mtd_notifier);
+ 
+ 	for (i = 0; i < UBI_MAX_DEVICES; i++)
+ 		if (ubi_devices[i]) {

+ 180 - 0
target/linux/generic/backport-6.6/405-v6.9-mtd-ubi-introduce-pre-removal-notification-for-UBI-v.patch

@@ -0,0 +1,180 @@
+From 924731fbed3247e3b82b8ab17db587ee28c2e781 Mon Sep 17 00:00:00 2001
+From: Daniel Golle <[email protected]>
+Date: Tue, 19 Dec 2023 02:33:24 +0000
+Subject: [PATCH 5/8] mtd: ubi: introduce pre-removal notification for UBI
+ volumes
+
+Introduce a new notification type UBI_VOLUME_SHUTDOWN to inform users
+that a volume is just about to be removed.
+This is needed because users (such as the NVMEM subsystem) expect that
+at the time their removal function is called, the parenting device is
+still available (for removal of sysfs nodes, for example, in case of
+NVMEM which otherwise WARNs on volume removal).
+
+Signed-off-by: Daniel Golle <[email protected]>
+Signed-off-by: Richard Weinberger <[email protected]>
+---
+ drivers/mtd/ubi/build.c | 19 ++++++++++++++-----
+ drivers/mtd/ubi/kapi.c  |  2 +-
+ drivers/mtd/ubi/ubi.h   |  2 ++
+ drivers/mtd/ubi/vmt.c   | 17 +++++++++++++++--
+ include/linux/mtd/ubi.h |  2 ++
+ 5 files changed, 34 insertions(+), 8 deletions(-)
+
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -91,7 +91,7 @@ static struct ubi_device *ubi_devices[UB
+ /* Serializes UBI devices creations and removals */
+ DEFINE_MUTEX(ubi_devices_mutex);
+ 
+-/* Protects @ubi_devices and @ubi->ref_count */
++/* Protects @ubi_devices, @ubi->ref_count and @ubi->is_dead */
+ static DEFINE_SPINLOCK(ubi_devices_lock);
+ 
+ /* "Show" method for files in '/<sysfs>/class/ubi/' */
+@@ -259,6 +259,9 @@ struct ubi_device *ubi_get_device(int ub
+ 
+ 	spin_lock(&ubi_devices_lock);
+ 	ubi = ubi_devices[ubi_num];
++	if (ubi && ubi->is_dead)
++		ubi = NULL;
++
+ 	if (ubi) {
+ 		ubi_assert(ubi->ref_count >= 0);
+ 		ubi->ref_count += 1;
+@@ -296,7 +299,7 @@ struct ubi_device *ubi_get_by_major(int
+ 	spin_lock(&ubi_devices_lock);
+ 	for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ 		ubi = ubi_devices[i];
+-		if (ubi && MAJOR(ubi->cdev.dev) == major) {
++		if (ubi && !ubi->is_dead && MAJOR(ubi->cdev.dev) == major) {
+ 			ubi_assert(ubi->ref_count >= 0);
+ 			ubi->ref_count += 1;
+ 			get_device(&ubi->dev);
+@@ -325,7 +328,7 @@ int ubi_major2num(int major)
+ 	for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ 		struct ubi_device *ubi = ubi_devices[i];
+ 
+-		if (ubi && MAJOR(ubi->cdev.dev) == major) {
++		if (ubi && !ubi->is_dead && MAJOR(ubi->cdev.dev) == major) {
+ 			ubi_num = ubi->ubi_num;
+ 			break;
+ 		}
+@@ -512,7 +515,7 @@ static void ubi_free_volumes_from(struct
+ 	int i;
+ 
+ 	for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+-		if (!ubi->volumes[i])
++		if (!ubi->volumes[i] || ubi->volumes[i]->is_dead)
+ 			continue;
+ 		ubi_eba_replace_table(ubi->volumes[i], NULL);
+ 		ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
+@@ -1094,7 +1097,6 @@ int ubi_detach_mtd_dev(int ubi_num, int
+ 		return -EINVAL;
+ 
+ 	spin_lock(&ubi_devices_lock);
+-	put_device(&ubi->dev);
+ 	ubi->ref_count -= 1;
+ 	if (ubi->ref_count) {
+ 		if (!anyway) {
+@@ -1105,6 +1107,13 @@ int ubi_detach_mtd_dev(int ubi_num, int
+ 		ubi_err(ubi, "%s reference count %d, destroy anyway",
+ 			ubi->ubi_name, ubi->ref_count);
+ 	}
++	ubi->is_dead = true;
++	spin_unlock(&ubi_devices_lock);
++
++	ubi_notify_all(ubi, UBI_VOLUME_SHUTDOWN, NULL);
++
++	spin_lock(&ubi_devices_lock);
++	put_device(&ubi->dev);
+ 	ubi_devices[ubi_num] = NULL;
+ 	spin_unlock(&ubi_devices_lock);
+ 
+--- a/drivers/mtd/ubi/kapi.c
++++ b/drivers/mtd/ubi/kapi.c
+@@ -152,7 +152,7 @@ struct ubi_volume_desc *ubi_open_volume(
+ 
+ 	spin_lock(&ubi->volumes_lock);
+ 	vol = ubi->volumes[vol_id];
+-	if (!vol)
++	if (!vol || vol->is_dead)
+ 		goto out_unlock;
+ 
+ 	err = -EBUSY;
+--- a/drivers/mtd/ubi/ubi.h
++++ b/drivers/mtd/ubi/ubi.h
+@@ -345,6 +345,7 @@ struct ubi_volume {
+ 	int writers;
+ 	int exclusive;
+ 	int metaonly;
++	bool is_dead;
+ 
+ 	int reserved_pebs;
+ 	int vol_type;
+@@ -564,6 +565,7 @@ struct ubi_device {
+ 	spinlock_t volumes_lock;
+ 	int ref_count;
+ 	int image_seq;
++	bool is_dead;
+ 
+ 	int rsvd_pebs;
+ 	int avail_pebs;
+--- a/drivers/mtd/ubi/vmt.c
++++ b/drivers/mtd/ubi/vmt.c
+@@ -59,7 +59,7 @@ static ssize_t vol_attribute_show(struct
+ 	struct ubi_device *ubi = vol->ubi;
+ 
+ 	spin_lock(&ubi->volumes_lock);
+-	if (!ubi->volumes[vol->vol_id]) {
++	if (!ubi->volumes[vol->vol_id] || ubi->volumes[vol->vol_id]->is_dead) {
+ 		spin_unlock(&ubi->volumes_lock);
+ 		return -ENODEV;
+ 	}
+@@ -189,7 +189,7 @@ int ubi_create_volume(struct ubi_device
+ 
+ 	/* Ensure that the name is unique */
+ 	for (i = 0; i < ubi->vtbl_slots; i++)
+-		if (ubi->volumes[i] &&
++		if (ubi->volumes[i] && !ubi->volumes[i]->is_dead &&
+ 		    ubi->volumes[i]->name_len == req->name_len &&
+ 		    !strcmp(ubi->volumes[i]->name, req->name)) {
+ 			ubi_err(ubi, "volume \"%s\" exists (ID %d)",
+@@ -352,6 +352,19 @@ int ubi_remove_volume(struct ubi_volume_
+ 		err = -EBUSY;
+ 		goto out_unlock;
+ 	}
++
++	/*
++	 * Mark volume as dead at this point to prevent that anyone
++	 * can take a reference to the volume from now on.
++	 * This is necessary as we have to release the spinlock before
++	 * calling ubi_volume_notify.
++	 */
++	vol->is_dead = true;
++	spin_unlock(&ubi->volumes_lock);
++
++	ubi_volume_notify(ubi, vol, UBI_VOLUME_SHUTDOWN);
++
++	spin_lock(&ubi->volumes_lock);
+ 	ubi->volumes[vol_id] = NULL;
+ 	spin_unlock(&ubi->volumes_lock);
+ 
+--- a/include/linux/mtd/ubi.h
++++ b/include/linux/mtd/ubi.h
+@@ -192,6 +192,7 @@ struct ubi_device_info {
+  *			or a volume was removed)
+  * @UBI_VOLUME_RESIZED: a volume has been re-sized
+  * @UBI_VOLUME_RENAMED: a volume has been re-named
++ * @UBI_VOLUME_SHUTDOWN: a volume is going to removed, shutdown users
+  * @UBI_VOLUME_UPDATED: data has been written to a volume
+  *
+  * These constants define which type of event has happened when a volume
+@@ -202,6 +203,7 @@ enum {
+ 	UBI_VOLUME_REMOVED,
+ 	UBI_VOLUME_RESIZED,
+ 	UBI_VOLUME_RENAMED,
++	UBI_VOLUME_SHUTDOWN,
+ 	UBI_VOLUME_UPDATED,
+ };
+ 

+ 66 - 0
target/linux/generic/backport-6.6/406-v6.9-mtd-ubi-populate-ubi-volume-fwnode.patch

@@ -0,0 +1,66 @@
+From 1c54542170819e36baa43c17ca55bb3d7da89a53 Mon Sep 17 00:00:00 2001
+From: Daniel Golle <[email protected]>
+Date: Tue, 19 Dec 2023 02:33:38 +0000
+Subject: [PATCH 6/8] mtd: ubi: populate ubi volume fwnode
+
+Look for the 'volumes' subnode of an MTD partition attached to a UBI
+device and attach matching child nodes to UBI volumes.
+This allows UBI volumes to be referenced in device tree, e.g. for use
+as NVMEM providers.
+
+Signed-off-by: Daniel Golle <[email protected]>
+Signed-off-by: Richard Weinberger <[email protected]>
+---
+ drivers/mtd/ubi/vmt.c | 27 +++++++++++++++++++++++++++
+ 1 file changed, 27 insertions(+)
+
+--- a/drivers/mtd/ubi/vmt.c
++++ b/drivers/mtd/ubi/vmt.c
+@@ -124,6 +124,31 @@ static void vol_release(struct device *d
+ 	kfree(vol);
+ }
+ 
++static struct fwnode_handle *find_volume_fwnode(struct ubi_volume *vol)
++{
++	struct fwnode_handle *fw_vols, *fw_vol;
++	const char *volname;
++	u32 volid;
++
++	fw_vols = device_get_named_child_node(vol->dev.parent->parent, "volumes");
++	if (!fw_vols)
++		return NULL;
++
++	fwnode_for_each_child_node(fw_vols, fw_vol) {
++		if (!fwnode_property_read_string(fw_vol, "volname", &volname) &&
++		    strncmp(volname, vol->name, vol->name_len))
++			continue;
++
++		if (!fwnode_property_read_u32(fw_vol, "volid", &volid) &&
++		    vol->vol_id != volid)
++			continue;
++
++		return fw_vol;
++	}
++
++	return NULL;
++}
++
+ /**
+  * ubi_create_volume - create volume.
+  * @ubi: UBI device description object
+@@ -223,6 +248,7 @@ int ubi_create_volume(struct ubi_device
+ 	vol->name_len  = req->name_len;
+ 	memcpy(vol->name, req->name, vol->name_len);
+ 	vol->ubi = ubi;
++	device_set_node(&vol->dev, find_volume_fwnode(vol));
+ 
+ 	/*
+ 	 * Finish all pending erases because there may be some LEBs belonging
+@@ -605,6 +631,7 @@ int ubi_add_volume(struct ubi_device *ub
+ 	vol->dev.class = &ubi_class;
+ 	vol->dev.groups = volume_dev_groups;
+ 	dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
++	device_set_node(&vol->dev, find_volume_fwnode(vol));
+ 	err = device_register(&vol->dev);
+ 	if (err) {
+ 		cdev_del(&vol->cdev);

+ 244 - 0
target/linux/generic/backport-6.6/407-v6.9-mtd-ubi-provide-NVMEM-layer-over-UBI-volumes.patch

@@ -0,0 +1,244 @@
+From 15fc7dc926c91c871f6c0305b2938dbdeb14203b Mon Sep 17 00:00:00 2001
+From: Daniel Golle <[email protected]>
+Date: Tue, 19 Dec 2023 02:33:48 +0000
+Subject: [PATCH 7/8] mtd: ubi: provide NVMEM layer over UBI volumes
+
+In an ideal world we would like UBI to be used where ever possible on a
+NAND chip. And with UBI support in ARM Trusted Firmware and U-Boot it
+is possible to achieve an (almost-)all-UBI flash layout. Hence the need
+for a way to also use UBI volumes to store board-level constants, such
+as MAC addresses and calibration data of wireless interfaces.
+
+Add UBI volume NVMEM driver module exposing UBI volumes as NVMEM
+providers. Allow UBI devices to have a "volumes" firmware subnode with
+volumes which may be compatible with "nvmem-cells".
+Access to UBI volumes via the NVMEM interface at this point is
+read-only, and it is slow, opening and closing the UBI volume for each
+access due to limitations of the NVMEM provider API.
+
+Signed-off-by: Daniel Golle <[email protected]>
+Signed-off-by: Richard Weinberger <[email protected]>
+---
+ drivers/mtd/ubi/Kconfig  |  12 +++
+ drivers/mtd/ubi/Makefile |   1 +
+ drivers/mtd/ubi/nvmem.c  | 188 +++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 201 insertions(+)
+ create mode 100644 drivers/mtd/ubi/nvmem.c
+
+--- a/drivers/mtd/ubi/Kconfig
++++ b/drivers/mtd/ubi/Kconfig
+@@ -104,4 +104,16 @@ config MTD_UBI_BLOCK
+ 
+ 	   If in doubt, say "N".
+ 
++config MTD_UBI_NVMEM
++	tristate "UBI virtual NVMEM"
++	default n
++	depends on NVMEM
++	help
++	   This option enabled an additional driver exposing UBI volumes as NVMEM
++	   providers, intended for platforms where UBI is part of the firmware
++	   specification and used to store also e.g. MAC addresses or board-
++	   specific Wi-Fi calibration data.
++
++	   If in doubt, say "N".
++
+ endif # MTD_UBI
+--- a/drivers/mtd/ubi/Makefile
++++ b/drivers/mtd/ubi/Makefile
+@@ -7,3 +7,4 @@ ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap
+ ubi-$(CONFIG_MTD_UBI_BLOCK) += block.o
+ 
+ obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
++obj-$(CONFIG_MTD_UBI_NVMEM) += nvmem.o
+--- /dev/null
++++ b/drivers/mtd/ubi/nvmem.c
+@@ -0,0 +1,188 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright (c) 2023 Daniel Golle <[email protected]>
++ */
++
++/* UBI NVMEM provider */
++#include "ubi.h"
++#include <linux/nvmem-provider.h>
++#include <asm/div64.h>
++
++/* List of all NVMEM devices */
++static LIST_HEAD(nvmem_devices);
++static DEFINE_MUTEX(devices_mutex);
++
++struct ubi_nvmem {
++	struct nvmem_device *nvmem;
++	int ubi_num;
++	int vol_id;
++	int usable_leb_size;
++	struct list_head list;
++};
++
++static int ubi_nvmem_reg_read(void *priv, unsigned int from,
++			      void *val, size_t bytes)
++{
++	int err = 0, lnum = from, offs, bytes_left = bytes, to_read;
++	struct ubi_nvmem *unv = priv;
++	struct ubi_volume_desc *desc;
++
++	desc = ubi_open_volume(unv->ubi_num, unv->vol_id, UBI_READONLY);
++	if (IS_ERR(desc))
++		return PTR_ERR(desc);
++
++	offs = do_div(lnum, unv->usable_leb_size);
++	while (bytes_left) {
++		to_read = unv->usable_leb_size - offs;
++
++		if (to_read > bytes_left)
++			to_read = bytes_left;
++
++		err = ubi_read(desc, lnum, val, offs, to_read);
++		if (err)
++			break;
++
++		lnum += 1;
++		offs = 0;
++		bytes_left -= to_read;
++		val += to_read;
++	}
++	ubi_close_volume(desc);
++
++	if (err)
++		return err;
++
++	return bytes_left == 0 ? 0 : -EIO;
++}
++
++static int ubi_nvmem_add(struct ubi_volume_info *vi)
++{
++	struct device_node *np = dev_of_node(vi->dev);
++	struct nvmem_config config = {};
++	struct ubi_nvmem *unv;
++	int ret;
++
++	if (!np)
++		return 0;
++
++	if (!of_get_child_by_name(np, "nvmem-layout"))
++		return 0;
++
++	if (WARN_ON_ONCE(vi->usable_leb_size <= 0) ||
++	    WARN_ON_ONCE(vi->size <= 0))
++		return -EINVAL;
++
++	unv = kzalloc(sizeof(struct ubi_nvmem), GFP_KERNEL);
++	if (!unv)
++		return -ENOMEM;
++
++	config.id = NVMEM_DEVID_NONE;
++	config.dev = vi->dev;
++	config.name = dev_name(vi->dev);
++	config.owner = THIS_MODULE;
++	config.priv = unv;
++	config.reg_read = ubi_nvmem_reg_read;
++	config.size = vi->usable_leb_size * vi->size;
++	config.word_size = 1;
++	config.stride = 1;
++	config.read_only = true;
++	config.root_only = true;
++	config.ignore_wp = true;
++	config.of_node = np;
++
++	unv->ubi_num = vi->ubi_num;
++	unv->vol_id = vi->vol_id;
++	unv->usable_leb_size = vi->usable_leb_size;
++	unv->nvmem = nvmem_register(&config);
++	if (IS_ERR(unv->nvmem)) {
++		ret = dev_err_probe(vi->dev, PTR_ERR(unv->nvmem),
++				    "Failed to register NVMEM device\n");
++		kfree(unv);
++		return ret;
++	}
++
++	mutex_lock(&devices_mutex);
++	list_add_tail(&unv->list, &nvmem_devices);
++	mutex_unlock(&devices_mutex);
++
++	return 0;
++}
++
++static void ubi_nvmem_remove(struct ubi_volume_info *vi)
++{
++	struct ubi_nvmem *unv_c, *unv = NULL;
++
++	mutex_lock(&devices_mutex);
++	list_for_each_entry(unv_c, &nvmem_devices, list)
++		if (unv_c->ubi_num == vi->ubi_num && unv_c->vol_id == vi->vol_id) {
++			unv = unv_c;
++			break;
++		}
++
++	if (!unv) {
++		mutex_unlock(&devices_mutex);
++		return;
++	}
++
++	list_del(&unv->list);
++	mutex_unlock(&devices_mutex);
++	nvmem_unregister(unv->nvmem);
++	kfree(unv);
++}
++
++/**
++ * nvmem_notify - UBI notification handler.
++ * @nb: registered notifier block
++ * @l: notification type
++ * @ns_ptr: pointer to the &struct ubi_notification object
++ */
++static int nvmem_notify(struct notifier_block *nb, unsigned long l,
++			 void *ns_ptr)
++{
++	struct ubi_notification *nt = ns_ptr;
++
++	switch (l) {
++	case UBI_VOLUME_RESIZED:
++		ubi_nvmem_remove(&nt->vi);
++		fallthrough;
++	case UBI_VOLUME_ADDED:
++		ubi_nvmem_add(&nt->vi);
++		break;
++	case UBI_VOLUME_SHUTDOWN:
++		ubi_nvmem_remove(&nt->vi);
++		break;
++	default:
++		break;
++	}
++	return NOTIFY_OK;
++}
++
++static struct notifier_block nvmem_notifier = {
++	.notifier_call = nvmem_notify,
++};
++
++static int __init ubi_nvmem_init(void)
++{
++	return ubi_register_volume_notifier(&nvmem_notifier, 0);
++}
++
++static void __exit ubi_nvmem_exit(void)
++{
++	struct ubi_nvmem *unv, *tmp;
++
++	mutex_lock(&devices_mutex);
++	list_for_each_entry_safe(unv, tmp, &nvmem_devices, list) {
++		nvmem_unregister(unv->nvmem);
++		list_del(&unv->list);
++		kfree(unv);
++	}
++	mutex_unlock(&devices_mutex);
++
++	ubi_unregister_volume_notifier(&nvmem_notifier);
++}
++
++module_init(ubi_nvmem_init);
++module_exit(ubi_nvmem_exit);
++MODULE_DESCRIPTION("NVMEM layer over UBI volumes");
++MODULE_AUTHOR("Daniel Golle");
++MODULE_LICENSE("GPL");

+ 34 - 0
target/linux/generic/backport-6.6/408-v6.9-mtd-ubi-fix-NVMEM-over-UBI-volumes-on-32-bit-systems.patch

@@ -0,0 +1,34 @@
+From 04231c61dcd51db0f12061e49bb761b197109f2f Mon Sep 17 00:00:00 2001
+From: Daniel Golle <[email protected]>
+Date: Thu, 29 Feb 2024 03:47:24 +0000
+Subject: [PATCH 8/8] mtd: ubi: fix NVMEM over UBI volumes on 32-bit systems
+
+A compiler warning related to sizeof(int) != 8 when calling do_div()
+is triggered when building on 32-bit platforms.
+Address this by using integer types having a well-defined size.
+
+Fixes: 3ce485803da1 ("mtd: ubi: provide NVMEM layer over UBI volumes")
+Signed-off-by: Daniel Golle <[email protected]>
+Reviewed-by: Zhihao Cheng <[email protected]>
+Tested-by: Randy Dunlap <[email protected]>
+Signed-off-by: Richard Weinberger <[email protected]>
+---
+ drivers/mtd/ubi/nvmem.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/ubi/nvmem.c
++++ b/drivers/mtd/ubi/nvmem.c
+@@ -23,9 +23,12 @@ struct ubi_nvmem {
+ static int ubi_nvmem_reg_read(void *priv, unsigned int from,
+ 			      void *val, size_t bytes)
+ {
+-	int err = 0, lnum = from, offs, bytes_left = bytes, to_read;
++	size_t to_read, bytes_left = bytes;
+ 	struct ubi_nvmem *unv = priv;
+ 	struct ubi_volume_desc *desc;
++	uint32_t offs;
++	uint64_t lnum = from;
++	int err = 0;
+ 
+ 	desc = ubi_open_volume(unv->ubi_num, unv->vol_id, UBI_READONLY);
+ 	if (IS_ERR(desc))

+ 53 - 0
target/linux/generic/backport-6.6/410-v6.13-01-block-add-support-for-defining-read-only-partitions.patch

@@ -0,0 +1,53 @@
+From 03cb793b26834ddca170ba87057c8f883772dd45 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Thu, 3 Oct 2024 00:11:41 +0200
+Subject: [PATCH 1/5] block: add support for defining read-only partitions
+
+Add support for defining read-only partitions and complete support for
+it in the cmdline partition parser as the additional "ro" after a
+partition is scanned but never actually applied.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Christoph Hellwig <[email protected]>
+Link: https://lore.kernel.org/r/[email protected]
+Signed-off-by: Jens Axboe <[email protected]>
+---
+ block/blk.h                | 1 +
+ block/partitions/cmdline.c | 3 +++
+ block/partitions/core.c    | 3 +++
+ 3 files changed, 7 insertions(+)
+
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -424,6 +424,7 @@ void blk_free_ext_minor(unsigned int min
+ #define ADDPART_FLAG_NONE	0
+ #define ADDPART_FLAG_RAID	1
+ #define ADDPART_FLAG_WHOLEDISK	2
++#define ADDPART_FLAG_READONLY	4
+ int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
+ 		sector_t length);
+ int bdev_del_partition(struct gendisk *disk, int partno);
+--- a/block/partitions/cmdline.c
++++ b/block/partitions/cmdline.c
+@@ -237,6 +237,9 @@ static int add_part(int slot, struct cmd
+ 	put_partition(state, slot, subpart->from >> 9,
+ 		      subpart->size >> 9);
+ 
++	if (subpart->flags & PF_RDONLY)
++		state->parts[slot].flags |= ADDPART_FLAG_READONLY;
++
+ 	info = &state->parts[slot].info;
+ 
+ 	strscpy(info->volname, subpart->name, sizeof(info->volname));
+--- a/block/partitions/core.c
++++ b/block/partitions/core.c
+@@ -392,6 +392,9 @@ static struct block_device *add_partitio
+ 			goto out_del;
+ 	}
+ 
++	if (flags & ADDPART_FLAG_READONLY)
++		bdev->bd_read_only = true;
++
+ 	/* everything is up and running, commence */
+ 	err = xa_insert(&disk->part_tbl, partno, bdev, GFP_KERNEL);
+ 	if (err)

+ 94 - 0
target/linux/generic/backport-6.6/410-v6.13-03-block-introduce-add_disk_fwnode.patch

@@ -0,0 +1,94 @@
+From e5f587242b6072ffab4f4a084a459a59f3035873 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Thu, 3 Oct 2024 00:11:43 +0200
+Subject: [PATCH 3/5] block: introduce add_disk_fwnode()
+
+Introduce add_disk_fwnode() as a replacement of device_add_disk() that
+permits to pass and attach a fwnode to disk dev.
+
+This variant can be useful for eMMC that might have the partition table
+for the disk defined in DT. A parser can later make use of the attached
+fwnode to parse the related table and init the hardcoded partition for
+the disk.
+
+device_add_disk() is converted to a simple wrapper of add_disk_fwnode()
+with the fwnode entry set as NULL.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Christoph Hellwig <[email protected]>
+Link: https://lore.kernel.org/r/[email protected]
+Signed-off-by: Jens Axboe <[email protected]>
+---
+ block/genhd.c          | 28 ++++++++++++++++++++++++----
+ include/linux/blkdev.h |  3 +++
+ 2 files changed, 27 insertions(+), 4 deletions(-)
+
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -383,16 +383,18 @@ int disk_scan_partitions(struct gendisk
+ }
+ 
+ /**
+- * device_add_disk - add disk information to kernel list
++ * add_disk_fwnode - add disk information to kernel list with fwnode
+  * @parent: parent device for the disk
+  * @disk: per-device partitioning information
+  * @groups: Additional per-device sysfs groups
++ * @fwnode: attached disk fwnode
+  *
+  * This function registers the partitioning information in @disk
+- * with the kernel.
++ * with the kernel. Also attach a fwnode to the disk device.
+  */
+-int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+-				 const struct attribute_group **groups)
++int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
++				 const struct attribute_group **groups,
++				 struct fwnode_handle *fwnode)
+ 
+ {
+ 	struct device *ddev = disk_to_dev(disk);
+@@ -451,6 +453,8 @@ int __must_check device_add_disk(struct
+ 	ddev->parent = parent;
+ 	ddev->groups = groups;
+ 	dev_set_name(ddev, "%s", disk->disk_name);
++	if (fwnode)
++		device_set_node(ddev, fwnode);
+ 	if (!(disk->flags & GENHD_FL_HIDDEN))
+ 		ddev->devt = MKDEV(disk->major, disk->first_minor);
+ 	ret = device_add(ddev);
+@@ -552,6 +556,22 @@ out_exit_elevator:
+ 		elevator_exit(disk->queue);
+ 	return ret;
+ }
++EXPORT_SYMBOL_GPL(add_disk_fwnode);
++
++/**
++ * device_add_disk - add disk information to kernel list
++ * @parent: parent device for the disk
++ * @disk: per-device partitioning information
++ * @groups: Additional per-device sysfs groups
++ *
++ * This function registers the partitioning information in @disk
++ * with the kernel.
++ */
++int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
++				 const struct attribute_group **groups)
++{
++	return add_disk_fwnode(parent, disk, groups, NULL);
++}
+ EXPORT_SYMBOL(device_add_disk);
+ 
+ static void blk_report_disk_dead(struct gendisk *disk, bool surprise)
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -741,6 +741,9 @@ static inline unsigned int blk_queue_dep
+ #define for_each_bio(_bio)		\
+ 	for (; _bio; _bio = _bio->bi_next)
+ 
++int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
++				 const struct attribute_group **groups,
++				 struct fwnode_handle *fwnode);
+ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ 				 const struct attribute_group **groups);
+ static inline int __must_check add_disk(struct gendisk *disk)

+ 104 - 0
target/linux/generic/backport-6.6/410-v6.13-04-mmc-block-attach-partitions-fwnode-if-found-in-mmc-c.patch

@@ -0,0 +1,104 @@
+From 45ff6c340ddfc2dade74d5b7a8962c778ab7042c Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Thu, 3 Oct 2024 00:11:44 +0200
+Subject: [PATCH 4/5] mmc: block: attach partitions fwnode if found in mmc-card
+
+Attach partitions fwnode if found in mmc-card and register disk with it.
+
+This permits block partition to reference the node and register a
+partition table defined in DT for the special case for embedded device
+that doesn't have a partition table flashed but have an hardcoded
+partition table passed from the system.
+
+JEDEC BOOT partition boot0/boot1 are supported but in DT we refer with
+the JEDEC name of boot1 and boot2 to better adhere to documentation.
+
+Also JEDEC GP partition gp0/1/2/3 are supported but in DT we refer with
+the JEDEC name of gp1/2/3/4 to better adhere to documentration.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Linus Walleij <[email protected]>
+Link: https://lore.kernel.org/r/[email protected]
+Signed-off-by: Jens Axboe <[email protected]>
+---
+ drivers/mmc/core/block.c | 55 +++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 54 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -2455,6 +2455,56 @@ static inline int mmc_blk_readonly(struc
+ 	       !(card->csd.cmdclass & CCC_BLOCK_WRITE);
+ }
+ 
++/*
++ * Search for a declared partitions node for the disk in mmc-card related node.
++ *
++ * This is to permit support for partition table defined in DT in special case
++ * where a partition table is not written in the disk and is expected to be
++ * passed from the running system.
++ *
++ * For the user disk, "partitions" node is searched.
++ * For the special HW disk, "partitions-" node with the appended name is used
++ * following this conversion table (to adhere to JEDEC naming)
++ * - boot0 -> partitions-boot1
++ * - boot1 -> partitions-boot2
++ * - gp0 -> partitions-gp1
++ * - gp1 -> partitions-gp2
++ * - gp2 -> partitions-gp3
++ * - gp3 -> partitions-gp4
++ */
++static struct fwnode_handle *mmc_blk_get_partitions_node(struct device *mmc_dev,
++							 const char *subname)
++{
++	const char *node_name = "partitions";
++
++	if (subname) {
++		mmc_dev = mmc_dev->parent;
++
++		/*
++		 * Check if we are allocating a BOOT disk boot0/1 disk.
++		 * In DT we use the JEDEC naming boot1/2.
++		 */
++		if (!strcmp(subname, "boot0"))
++			node_name = "partitions-boot1";
++		if (!strcmp(subname, "boot1"))
++			node_name = "partitions-boot2";
++		/*
++		 * Check if we are allocating a GP disk gp0/1/2/3 disk.
++		 * In DT we use the JEDEC naming gp1/2/3/4.
++		 */
++		if (!strcmp(subname, "gp0"))
++			node_name = "partitions-gp1";
++		if (!strcmp(subname, "gp1"))
++			node_name = "partitions-gp2";
++		if (!strcmp(subname, "gp2"))
++			node_name = "partitions-gp3";
++		if (!strcmp(subname, "gp3"))
++			node_name = "partitions-gp4";
++	}
++
++	return device_get_named_child_node(mmc_dev, node_name);
++}
++
+ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
+ 					      struct device *parent,
+ 					      sector_t size,
+@@ -2463,6 +2513,7 @@ static struct mmc_blk_data *mmc_blk_allo
+ 					      int area_type,
+ 					      unsigned int part_type)
+ {
++	struct fwnode_handle *disk_fwnode;
+ 	struct mmc_blk_data *md;
+ 	int devidx, ret;
+ 	char cap_str[10];
+@@ -2568,7 +2619,9 @@ static struct mmc_blk_data *mmc_blk_allo
+ 	/* used in ->open, must be set before add_disk: */
+ 	if (area_type == MMC_BLK_DATA_AREA_MAIN)
+ 		dev_set_drvdata(&card->dev, md);
+-	ret = device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
++	disk_fwnode = mmc_blk_get_partitions_node(parent, subname);
++	ret = add_disk_fwnode(md->parent, md->disk, mmc_disk_attr_groups,
++			      disk_fwnode);
+ 	if (ret)
+ 		goto err_put_disk;
+ 	return md;

+ 200 - 0
target/linux/generic/backport-6.6/410-v6.13-05-block-add-support-for-partition-table-defined-in-OF.patch

@@ -0,0 +1,200 @@
+From 884555b557e5e6d41c866e2cd8d7b32f50ec974b Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Thu, 3 Oct 2024 00:11:45 +0200
+Subject: [PATCH 5/5] block: add support for partition table defined in OF
+
+Add support for partition table defined in Device Tree. Similar to how
+it's done with MTD, add support for defining a fixed partition table in
+device tree.
+
+A common scenario for this is fixed block (eMMC) embedded devices that
+have no MBR or GPT partition table to save storage space. Bootloader
+access the block device with absolute address of data.
+
+This is to complete the functionality with an equivalent implementation
+with providing partition table with bootargs, for case where the booargs
+can't be modified and tweaking the Device Tree is the only solution to
+have an usabe partition table.
+
+The implementation follow the fixed-partitions parser used on MTD
+devices where a "partitions" node is expected to be declared with
+"fixed-partitions" compatible in the OF node of the disk device
+(mmc-card for eMMC for example) and each child node declare a label
+and a reg with offset and size. If label is not declared, the node name
+is used as fallback. Eventually is also possible to declare the read-only
+property to flag the partition as read-only.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Christoph Hellwig <[email protected]>
+Link: https://lore.kernel.org/r/[email protected]
+Signed-off-by: Jens Axboe <[email protected]>
+---
+ block/partitions/Kconfig  |   9 ++++
+ block/partitions/Makefile |   1 +
+ block/partitions/check.h  |   1 +
+ block/partitions/core.c   |   3 ++
+ block/partitions/of.c     | 110 ++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 124 insertions(+)
+ create mode 100644 block/partitions/of.c
+
+--- a/block/partitions/Kconfig
++++ b/block/partitions/Kconfig
+@@ -270,4 +270,13 @@ config CMDLINE_PARTITION
+ 	  Say Y here if you want to read the partition table from bootargs.
+ 	  The format for the command line is just like mtdparts.
+ 
++config OF_PARTITION
++	bool "Device Tree partition support" if PARTITION_ADVANCED
++	depends on OF
++	help
++	  Say Y here if you want to enable support for partition table
++	  defined in Device Tree. (mainly for eMMC)
++	  The format for the device tree node is just like MTD fixed-partition
++	  schema.
++
+ endmenu
+--- a/block/partitions/Makefile
++++ b/block/partitions/Makefile
+@@ -12,6 +12,7 @@ obj-$(CONFIG_CMDLINE_PARTITION) += cmdli
+ obj-$(CONFIG_MAC_PARTITION) += mac.o
+ obj-$(CONFIG_LDM_PARTITION) += ldm.o
+ obj-$(CONFIG_MSDOS_PARTITION) += msdos.o
++obj-$(CONFIG_OF_PARTITION) += of.o
+ obj-$(CONFIG_OSF_PARTITION) += osf.o
+ obj-$(CONFIG_SGI_PARTITION) += sgi.o
+ obj-$(CONFIG_SUN_PARTITION) += sun.o
+--- a/block/partitions/check.h
++++ b/block/partitions/check.h
+@@ -62,6 +62,7 @@ int karma_partition(struct parsed_partit
+ int ldm_partition(struct parsed_partitions *state);
+ int mac_partition(struct parsed_partitions *state);
+ int msdos_partition(struct parsed_partitions *state);
++int of_partition(struct parsed_partitions *state);
+ int osf_partition(struct parsed_partitions *state);
+ int sgi_partition(struct parsed_partitions *state);
+ int sun_partition(struct parsed_partitions *state);
+--- a/block/partitions/core.c
++++ b/block/partitions/core.c
+@@ -43,6 +43,9 @@ static int (*const check_part[])(struct
+ #ifdef CONFIG_CMDLINE_PARTITION
+ 	cmdline_partition,
+ #endif
++#ifdef CONFIG_OF_PARTITION
++	of_partition,		/* cmdline have priority to OF */
++#endif
+ #ifdef CONFIG_EFI_PARTITION
+ 	efi_partition,		/* this must come before msdos */
+ #endif
+--- /dev/null
++++ b/block/partitions/of.c
+@@ -0,0 +1,110 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/blkdev.h>
++#include <linux/major.h>
++#include <linux/of.h>
++#include <linux/string.h>
++#include "check.h"
++
++static int validate_of_partition(struct device_node *np, int slot)
++{
++	u64 offset, size;
++	int len;
++
++	const __be32 *reg = of_get_property(np, "reg", &len);
++	int a_cells = of_n_addr_cells(np);
++	int s_cells = of_n_size_cells(np);
++
++	/* Make sure reg len match the expected addr and size cells */
++	if (len / sizeof(*reg) != a_cells + s_cells)
++		return -EINVAL;
++
++	/* Validate offset conversion from bytes to sectors */
++	offset = of_read_number(reg, a_cells);
++	if (offset % SECTOR_SIZE)
++		return -EINVAL;
++
++	/* Validate size conversion from bytes to sectors */
++	size = of_read_number(reg + a_cells, s_cells);
++	if (!size || size % SECTOR_SIZE)
++		return -EINVAL;
++
++	return 0;
++}
++
++static void add_of_partition(struct parsed_partitions *state, int slot,
++			     struct device_node *np)
++{
++	struct partition_meta_info *info;
++	char tmp[sizeof(info->volname) + 4];
++	const char *partname;
++	int len;
++
++	const __be32 *reg = of_get_property(np, "reg", &len);
++	int a_cells = of_n_addr_cells(np);
++	int s_cells = of_n_size_cells(np);
++
++	/* Convert bytes to sector size */
++	u64 offset = of_read_number(reg, a_cells) / SECTOR_SIZE;
++	u64 size = of_read_number(reg + a_cells, s_cells) / SECTOR_SIZE;
++
++	put_partition(state, slot, offset, size);
++
++	if (of_property_read_bool(np, "read-only"))
++		state->parts[slot].flags |= ADDPART_FLAG_READONLY;
++
++	/*
++	 * Follow MTD label logic, search for label property,
++	 * fallback to node name if not found.
++	 */
++	info = &state->parts[slot].info;
++	partname = of_get_property(np, "label", &len);
++	if (!partname)
++		partname = of_get_property(np, "name", &len);
++	strscpy(info->volname, partname, sizeof(info->volname));
++
++	snprintf(tmp, sizeof(tmp), "(%s)", info->volname);
++	strlcat(state->pp_buf, tmp, PAGE_SIZE);
++}
++
++int of_partition(struct parsed_partitions *state)
++{
++	struct device *ddev = disk_to_dev(state->disk);
++	struct device_node *np;
++	int slot;
++
++	struct device_node *partitions_np = of_node_get(ddev->of_node);
++
++	if (!partitions_np ||
++	    !of_device_is_compatible(partitions_np, "fixed-partitions"))
++		return 0;
++
++	slot = 1;
++	/* Validate parition offset and size */
++	for_each_child_of_node(partitions_np, np) {
++		if (validate_of_partition(np, slot)) {
++			of_node_put(np);
++			of_node_put(partitions_np);
++
++			return -1;
++		}
++
++		slot++;
++	}
++
++	slot = 1;
++	for_each_child_of_node(partitions_np, np) {
++		if (slot >= state->limit) {
++			of_node_put(np);
++			break;
++		}
++
++		add_of_partition(state, slot, np);
++
++		slot++;
++	}
++
++	strlcat(state->pp_buf, "\n", PAGE_SIZE);
++
++	return 1;
++}

+ 146 - 0
target/linux/generic/backport-6.6/411-v6.7-mtd-spinand-add-support-for-FORESEE-F35SQA002G.patch

@@ -0,0 +1,146 @@
+From 49c8e854869d673df8452f24dfa8989cd0f615a8 Mon Sep 17 00:00:00 2001
+From: Martin Kurbanov <[email protected]>
+Date: Mon, 2 Oct 2023 17:04:58 +0300
+Subject: [PATCH] mtd: spinand: add support for FORESEE F35SQA002G
+
+Add support for FORESEE F35SQA002G SPI NAND.
+Datasheet:
+  https://www.longsys.com/uploads/LM-00006FORESEEF35SQA002GDatasheet_1650183701.pdf
+
+Signed-off-by: Martin Kurbanov <[email protected]>
+Signed-off-by: Miquel Raynal <[email protected]>
+Link: https://lore.kernel.org/linux-mtd/[email protected]
+---
+ drivers/mtd/nand/spi/Makefile  |  2 +-
+ drivers/mtd/nand/spi/core.c    |  1 +
+ drivers/mtd/nand/spi/foresee.c | 95 ++++++++++++++++++++++++++++++++++
+ include/linux/mtd/spinand.h    |  1 +
+ 4 files changed, 98 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/mtd/nand/spi/foresee.c
+
+--- a/drivers/mtd/nand/spi/Makefile
++++ b/drivers/mtd/nand/spi/Makefile
+@@ -1,4 +1,4 @@
+ # SPDX-License-Identifier: GPL-2.0
+-spinand-objs := core.o alliancememory.o ato.o esmt.o gigadevice.o macronix.o
++spinand-objs := core.o alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o
+ spinand-objs += micron.o paragon.o toshiba.o winbond.o xtx.o
+ obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
+--- a/drivers/mtd/nand/spi/core.c
++++ b/drivers/mtd/nand/spi/core.c
+@@ -940,6 +940,7 @@ static const struct spinand_manufacturer
+ 	&alliancememory_spinand_manufacturer,
+ 	&ato_spinand_manufacturer,
+ 	&esmt_c8_spinand_manufacturer,
++	&foresee_spinand_manufacturer,
+ 	&gigadevice_spinand_manufacturer,
+ 	&macronix_spinand_manufacturer,
+ 	&micron_spinand_manufacturer,
+--- /dev/null
++++ b/drivers/mtd/nand/spi/foresee.c
+@@ -0,0 +1,95 @@
++// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
++/*
++ * Copyright (c) 2023, SberDevices. All Rights Reserved.
++ *
++ * Author: Martin Kurbanov <[email protected]>
++ */
++
++#include <linux/device.h>
++#include <linux/kernel.h>
++#include <linux/mtd/spinand.h>
++
++#define SPINAND_MFR_FORESEE		0xCD
++
++static SPINAND_OP_VARIANTS(read_cache_variants,
++		SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
++		SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
++		SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
++		SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
++
++static SPINAND_OP_VARIANTS(write_cache_variants,
++		SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
++		SPINAND_PROG_LOAD(true, 0, NULL, 0));
++
++static SPINAND_OP_VARIANTS(update_cache_variants,
++		SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
++		SPINAND_PROG_LOAD(false, 0, NULL, 0));
++
++static int f35sqa002g_ooblayout_ecc(struct mtd_info *mtd, int section,
++				    struct mtd_oob_region *region)
++{
++	return -ERANGE;
++}
++
++static int f35sqa002g_ooblayout_free(struct mtd_info *mtd, int section,
++				     struct mtd_oob_region *region)
++{
++	if (section)
++		return -ERANGE;
++
++	/* Reserve 2 bytes for the BBM. */
++	region->offset = 2;
++	region->length = 62;
++
++	return 0;
++}
++
++static const struct mtd_ooblayout_ops f35sqa002g_ooblayout = {
++	.ecc = f35sqa002g_ooblayout_ecc,
++	.free = f35sqa002g_ooblayout_free,
++};
++
++static int f35sqa002g_ecc_get_status(struct spinand_device *spinand, u8 status)
++{
++	struct nand_device *nand = spinand_to_nand(spinand);
++
++	switch (status & STATUS_ECC_MASK) {
++	case STATUS_ECC_NO_BITFLIPS:
++		return 0;
++
++	case STATUS_ECC_HAS_BITFLIPS:
++		return nanddev_get_ecc_conf(nand)->strength;
++
++	default:
++		break;
++	}
++
++	/* More than 1-bit error was detected in one or more sectors and
++	 * cannot be corrected.
++	 */
++	return -EBADMSG;
++}
++
++static const struct spinand_info foresee_spinand_table[] = {
++	SPINAND_INFO("F35SQA002G",
++		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x72, 0x72),
++		     NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
++		     NAND_ECCREQ(1, 512),
++		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
++					      &write_cache_variants,
++					      &update_cache_variants),
++		     SPINAND_HAS_QE_BIT,
++		     SPINAND_ECCINFO(&f35sqa002g_ooblayout,
++				     f35sqa002g_ecc_get_status)),
++};
++
++static const struct spinand_manufacturer_ops foresee_spinand_manuf_ops = {
++};
++
++const struct spinand_manufacturer foresee_spinand_manufacturer = {
++	.id = SPINAND_MFR_FORESEE,
++	.name = "FORESEE",
++	.chips = foresee_spinand_table,
++	.nchips = ARRAY_SIZE(foresee_spinand_table),
++	.ops = &foresee_spinand_manuf_ops,
++};
+--- a/include/linux/mtd/spinand.h
++++ b/include/linux/mtd/spinand.h
+@@ -263,6 +263,7 @@ struct spinand_manufacturer {
+ extern const struct spinand_manufacturer alliancememory_spinand_manufacturer;
+ extern const struct spinand_manufacturer ato_spinand_manufacturer;
+ extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer;
++extern const struct spinand_manufacturer foresee_spinand_manufacturer;
+ extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
+ extern const struct spinand_manufacturer macronix_spinand_manufacturer;
+ extern const struct spinand_manufacturer micron_spinand_manufacturer;

+ 38 - 0
target/linux/generic/backport-6.6/412-v6.14-mtd-spinand-add-support-for-FORESEE-F35SQA001G.patch

@@ -0,0 +1,38 @@
+From ae461cde5c559675fc4c0ba351c7c31ace705f56 Mon Sep 17 00:00:00 2001
+From: Bohdan Chubuk <[email protected]>
+Date: Sun, 10 Nov 2024 22:50:47 +0200
+Subject: [PATCH] mtd: spinand: add support for FORESEE F35SQA001G
+
+Add support for FORESEE F35SQA001G SPI NAND.
+
+Similar to F35SQA002G, but differs in capacity.
+Datasheet:
+  -  https://cdn.ozdisan.com/ETicaret_Dosya/704795_871495.pdf
+
+Tested on Xiaomi AX3000T flashed with OpenWRT.
+
+Signed-off-by: Bohdan Chubuk <[email protected]>
+Signed-off-by: Miquel Raynal <[email protected]>
+---
+ drivers/mtd/nand/spi/foresee.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/mtd/nand/spi/foresee.c
++++ b/drivers/mtd/nand/spi/foresee.c
+@@ -81,6 +81,16 @@ static const struct spinand_info foresee
+ 		     SPINAND_HAS_QE_BIT,
+ 		     SPINAND_ECCINFO(&f35sqa002g_ooblayout,
+ 				     f35sqa002g_ecc_get_status)),
++	SPINAND_INFO("F35SQA001G",
++		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x71, 0x71),
++		     NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
++		     NAND_ECCREQ(1, 512),
++		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
++					      &write_cache_variants,
++					      &update_cache_variants),
++		     SPINAND_HAS_QE_BIT,
++		     SPINAND_ECCINFO(&f35sqa002g_ooblayout,
++				     f35sqa002g_ecc_get_status)),
+ };
+ 
+ static const struct spinand_manufacturer_ops foresee_spinand_manuf_ops = {

+ 1013 - 0
target/linux/generic/backport-6.6/413-01-v6.14-mtd-rawnand-qcom-cleanup-qcom_nandc-driver.patch

@@ -0,0 +1,1013 @@
+From 8c52932da5e6756fa66f52f0720da283fba13aa6 Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <[email protected]>
+Date: Wed, 20 Nov 2024 14:45:00 +0530
+Subject: [PATCH 1/4] mtd: rawnand: qcom: cleanup qcom_nandc driver
+
+Perform a global cleanup of the Qualcomm NAND
+controller driver with the following improvements:
+
+- Remove register value indirection API
+
+- Remove set_reg() API
+
+- Convert read_loc_first & read_loc_last macro to functions
+
+- Rename multiple variables
+
+Signed-off-by: Md Sadre Alam <[email protected]>
+Signed-off-by: Miquel Raynal <[email protected]>
+---
+ drivers/mtd/nand/raw/qcom_nandc.c | 516 ++++++++++++++----------------
+ 1 file changed, 234 insertions(+), 282 deletions(-)
+
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -189,17 +189,6 @@
+ #define	ECC_BCH_4BIT	BIT(2)
+ #define	ECC_BCH_8BIT	BIT(3)
+ 
+-#define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc)	\
+-nandc_set_reg(chip, reg,			\
+-	      ((cw_offset) << READ_LOCATION_OFFSET) |		\
+-	      ((read_size) << READ_LOCATION_SIZE) |			\
+-	      ((is_last_read_loc) << READ_LOCATION_LAST))
+-
+-#define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc)	\
+-nandc_set_reg(chip, reg,			\
+-	      ((cw_offset) << READ_LOCATION_OFFSET) |		\
+-	      ((read_size) << READ_LOCATION_SIZE) |			\
+-	      ((is_last_read_loc) << READ_LOCATION_LAST))
+ /*
+  * Returns the actual register address for all NAND_DEV_ registers
+  * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
+@@ -257,8 +246,6 @@ nandc_set_reg(chip, reg,			\
+  * @tx_sgl_start - start index in data sgl for tx.
+  * @rx_sgl_pos - current index in data sgl for rx.
+  * @rx_sgl_start - start index in data sgl for rx.
+- * @wait_second_completion - wait for second DMA desc completion before making
+- *			     the NAND transfer completion.
+  */
+ struct bam_transaction {
+ 	struct bam_cmd_element *bam_ce;
+@@ -275,7 +262,6 @@ struct bam_transaction {
+ 	u32 tx_sgl_start;
+ 	u32 rx_sgl_pos;
+ 	u32 rx_sgl_start;
+-	bool wait_second_completion;
+ };
+ 
+ /*
+@@ -471,9 +457,9 @@ struct qcom_op {
+ 	unsigned int data_instr_idx;
+ 	unsigned int rdy_timeout_ms;
+ 	unsigned int rdy_delay_ns;
+-	u32 addr1_reg;
+-	u32 addr2_reg;
+-	u32 cmd_reg;
++	__le32 addr1_reg;
++	__le32 addr2_reg;
++	__le32 cmd_reg;
+ 	u8 flag;
+ };
+ 
+@@ -549,17 +535,17 @@ struct qcom_nand_host {
+  * among different NAND controllers.
+  * @ecc_modes - ecc mode for NAND
+  * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+- * @is_bam - whether NAND controller is using BAM
+- * @is_qpic - whether NAND CTRL is part of qpic IP
+- * @qpic_v2 - flag to indicate QPIC IP version 2
++ * @supports_bam - whether NAND controller is using Bus Access Manager (BAM)
++ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
++ * @qpic_version2 - flag to indicate QPIC IP version 2
+  * @use_codeword_fixup - whether NAND has different layout for boot partitions
+  */
+ struct qcom_nandc_props {
+ 	u32 ecc_modes;
+ 	u32 dev_cmd_reg_start;
+-	bool is_bam;
+-	bool is_qpic;
+-	bool qpic_v2;
++	bool supports_bam;
++	bool nandc_part_of_qpic;
++	bool qpic_version2;
+ 	bool use_codeword_fixup;
+ };
+ 
+@@ -613,19 +599,11 @@ static void clear_bam_transaction(struct
+ {
+ 	struct bam_transaction *bam_txn = nandc->bam_txn;
+ 
+-	if (!nandc->props->is_bam)
++	if (!nandc->props->supports_bam)
+ 		return;
+ 
+-	bam_txn->bam_ce_pos = 0;
+-	bam_txn->bam_ce_start = 0;
+-	bam_txn->cmd_sgl_pos = 0;
+-	bam_txn->cmd_sgl_start = 0;
+-	bam_txn->tx_sgl_pos = 0;
+-	bam_txn->tx_sgl_start = 0;
+-	bam_txn->rx_sgl_pos = 0;
+-	bam_txn->rx_sgl_start = 0;
++	memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
+ 	bam_txn->last_data_desc = NULL;
+-	bam_txn->wait_second_completion = false;
+ 
+ 	sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
+ 		      QPIC_PER_CW_CMD_SGL);
+@@ -640,46 +618,35 @@ static void qpic_bam_dma_done(void *data
+ {
+ 	struct bam_transaction *bam_txn = data;
+ 
+-	/*
+-	 * In case of data transfer with NAND, 2 callbacks will be generated.
+-	 * One for command channel and another one for data channel.
+-	 * If current transaction has data descriptors
+-	 * (i.e. wait_second_completion is true), then set this to false
+-	 * and wait for second DMA descriptor completion.
+-	 */
+-	if (bam_txn->wait_second_completion)
+-		bam_txn->wait_second_completion = false;
+-	else
+-		complete(&bam_txn->txn_done);
++	complete(&bam_txn->txn_done);
+ }
+ 
+-static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
++static struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+ {
+ 	return container_of(chip, struct qcom_nand_host, chip);
+ }
+ 
+-static inline struct qcom_nand_controller *
++static struct qcom_nand_controller *
+ get_qcom_nand_controller(struct nand_chip *chip)
+ {
+ 	return container_of(chip->controller, struct qcom_nand_controller,
+ 			    controller);
+ }
+ 
+-static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
++static u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
+ {
+ 	return ioread32(nandc->base + offset);
+ }
+ 
+-static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
+-			       u32 val)
++static void nandc_write(struct qcom_nand_controller *nandc, int offset,
++			u32 val)
+ {
+ 	iowrite32(val, nandc->base + offset);
+ }
+ 
+-static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
+-					  bool is_cpu)
++static void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
+ {
+-	if (!nandc->props->is_bam)
++	if (!nandc->props->supports_bam)
+ 		return;
+ 
+ 	if (is_cpu)
+@@ -694,93 +661,90 @@ static inline void nandc_read_buffer_syn
+ 					   DMA_FROM_DEVICE);
+ }
+ 
+-static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
+-{
+-	switch (offset) {
+-	case NAND_FLASH_CMD:
+-		return &regs->cmd;
+-	case NAND_ADDR0:
+-		return &regs->addr0;
+-	case NAND_ADDR1:
+-		return &regs->addr1;
+-	case NAND_FLASH_CHIP_SELECT:
+-		return &regs->chip_sel;
+-	case NAND_EXEC_CMD:
+-		return &regs->exec;
+-	case NAND_FLASH_STATUS:
+-		return &regs->clrflashstatus;
+-	case NAND_DEV0_CFG0:
+-		return &regs->cfg0;
+-	case NAND_DEV0_CFG1:
+-		return &regs->cfg1;
+-	case NAND_DEV0_ECC_CFG:
+-		return &regs->ecc_bch_cfg;
+-	case NAND_READ_STATUS:
+-		return &regs->clrreadstatus;
+-	case NAND_DEV_CMD1:
+-		return &regs->cmd1;
+-	case NAND_DEV_CMD1_RESTORE:
+-		return &regs->orig_cmd1;
+-	case NAND_DEV_CMD_VLD:
+-		return &regs->vld;
+-	case NAND_DEV_CMD_VLD_RESTORE:
+-		return &regs->orig_vld;
+-	case NAND_EBI2_ECC_BUF_CFG:
+-		return &regs->ecc_buf_cfg;
+-	case NAND_READ_LOCATION_0:
+-		return &regs->read_location0;
+-	case NAND_READ_LOCATION_1:
+-		return &regs->read_location1;
+-	case NAND_READ_LOCATION_2:
+-		return &regs->read_location2;
+-	case NAND_READ_LOCATION_3:
+-		return &regs->read_location3;
+-	case NAND_READ_LOCATION_LAST_CW_0:
+-		return &regs->read_location_last0;
+-	case NAND_READ_LOCATION_LAST_CW_1:
+-		return &regs->read_location_last1;
+-	case NAND_READ_LOCATION_LAST_CW_2:
+-		return &regs->read_location_last2;
+-	case NAND_READ_LOCATION_LAST_CW_3:
+-		return &regs->read_location_last3;
+-	default:
+-		return NULL;
+-	}
+-}
+-
+-static void nandc_set_reg(struct nand_chip *chip, int offset,
+-			  u32 val)
+-{
+-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+-	struct nandc_regs *regs = nandc->regs;
+-	__le32 *reg;
+-
+-	reg = offset_to_nandc_reg(regs, offset);
+-
+-	if (reg)
+-		*reg = cpu_to_le32(val);
+-}
+-
+-/* Helper to check the code word, whether it is last cw or not */
++/* Helper to check whether this is the last CW or not */
+ static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
+ {
+ 	return cw == (ecc->steps - 1);
+ }
+ 
++/**
++ * nandc_set_read_loc_first() - to set read location first register
++ * @chip:		NAND Private Flash Chip Data
++ * @reg_base:		location register base
++ * @cw_offset:		code word offset
++ * @read_size:		code word read length
++ * @is_last_read_loc:	is this the last read location
++ *
++ * This function will set location register value
++ */
++static void nandc_set_read_loc_first(struct nand_chip *chip,
++				     int reg_base, u32 cw_offset,
++				     u32 read_size, u32 is_last_read_loc)
++{
++	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
++	__le32 locreg_val;
++	u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
++		  ((read_size) << READ_LOCATION_SIZE) |
++		  ((is_last_read_loc) << READ_LOCATION_LAST));
++
++	locreg_val = cpu_to_le32(val);
++
++	if (reg_base == NAND_READ_LOCATION_0)
++		nandc->regs->read_location0 = locreg_val;
++	else if (reg_base == NAND_READ_LOCATION_1)
++		nandc->regs->read_location1 = locreg_val;
++	else if (reg_base == NAND_READ_LOCATION_2)
++		nandc->regs->read_location2 = locreg_val;
++	else if (reg_base == NAND_READ_LOCATION_3)
++		nandc->regs->read_location3 = locreg_val;
++}
++
++/**
++ * nandc_set_read_loc_last - to set read location last register
++ * @chip:		NAND Private Flash Chip Data
++ * @reg_base:		location register base
++ * @cw_offset:		code word offset
++ * @read_size:		code word read length
++ * @is_last_read_loc:	is this the last read location
++ *
++ * This function will set location last register value
++ */
++static void nandc_set_read_loc_last(struct nand_chip *chip,
++				    int reg_base, u32 cw_offset,
++				    u32 read_size, u32 is_last_read_loc)
++{
++	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
++	__le32 locreg_val;
++	u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
++		  ((read_size) << READ_LOCATION_SIZE) |
++		  ((is_last_read_loc) << READ_LOCATION_LAST));
++
++	locreg_val = cpu_to_le32(val);
++
++	if (reg_base == NAND_READ_LOCATION_LAST_CW_0)
++		nandc->regs->read_location_last0 = locreg_val;
++	else if (reg_base == NAND_READ_LOCATION_LAST_CW_1)
++		nandc->regs->read_location_last1 = locreg_val;
++	else if (reg_base == NAND_READ_LOCATION_LAST_CW_2)
++		nandc->regs->read_location_last2 = locreg_val;
++	else if (reg_base == NAND_READ_LOCATION_LAST_CW_3)
++		nandc->regs->read_location_last3 = locreg_val;
++}
++
+ /* helper to configure location register values */
+ static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
+-			       int cw_offset, int read_size, int is_last_read_loc)
++			       u32 cw_offset, u32 read_size, u32 is_last_read_loc)
+ {
+ 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ 	struct nand_ecc_ctrl *ecc = &chip->ecc;
+ 	int reg_base = NAND_READ_LOCATION_0;
+ 
+-	if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
++	if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
+ 		reg_base = NAND_READ_LOCATION_LAST_CW_0;
+ 
+ 	reg_base += reg * 4;
+ 
+-	if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
++	if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
+ 		return nandc_set_read_loc_last(chip, reg_base, cw_offset,
+ 				read_size, is_last_read_loc);
+ 	else
+@@ -792,12 +756,13 @@ static void nandc_set_read_loc(struct na
+ static void set_address(struct qcom_nand_host *host, u16 column, int page)
+ {
+ 	struct nand_chip *chip = &host->chip;
++	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ 
+ 	if (chip->options & NAND_BUSWIDTH_16)
+ 		column >>= 1;
+ 
+-	nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
+-	nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
++	nandc->regs->addr0 = cpu_to_le32(page << 16 | column);
++	nandc->regs->addr1 = cpu_to_le32(page >> 16 & 0xff);
+ }
+ 
+ /*
+@@ -811,41 +776,43 @@ static void set_address(struct qcom_nand
+ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
+ {
+ 	struct nand_chip *chip = &host->chip;
+-	u32 cmd, cfg0, cfg1, ecc_bch_cfg;
++	__le32 cmd, cfg0, cfg1, ecc_bch_cfg;
+ 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ 
+ 	if (read) {
+ 		if (host->use_ecc)
+-			cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
++			cmd = cpu_to_le32(OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE);
+ 		else
+-			cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
++			cmd = cpu_to_le32(OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
+ 	} else {
+-		cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
++		cmd = cpu_to_le32(OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE);
+ 	}
+ 
+ 	if (host->use_ecc) {
+-		cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
+-				(num_cw - 1) << CW_PER_PAGE;
++		cfg0 = cpu_to_le32((host->cfg0 & ~(7U << CW_PER_PAGE)) |
++				(num_cw - 1) << CW_PER_PAGE);
+ 
+-		cfg1 = host->cfg1;
+-		ecc_bch_cfg = host->ecc_bch_cfg;
++		cfg1 = cpu_to_le32(host->cfg1);
++		ecc_bch_cfg = cpu_to_le32(host->ecc_bch_cfg);
+ 	} else {
+-		cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+-				(num_cw - 1) << CW_PER_PAGE;
++		cfg0 = cpu_to_le32((host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
++				(num_cw - 1) << CW_PER_PAGE);
+ 
+-		cfg1 = host->cfg1_raw;
+-		ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
++		cfg1 = cpu_to_le32(host->cfg1_raw);
++		ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
+ 	}
+ 
+-	nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
+-	nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
+-	nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
+-	nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
+-	if (!nandc->props->qpic_v2)
+-		nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
+-	nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
+-	nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
+-	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
++	nandc->regs->cmd = cmd;
++	nandc->regs->cfg0 = cfg0;
++	nandc->regs->cfg1 = cfg1;
++	nandc->regs->ecc_bch_cfg = ecc_bch_cfg;
++
++	if (!nandc->props->qpic_version2)
++		nandc->regs->ecc_buf_cfg = cpu_to_le32(host->ecc_buf_cfg);
++
++	nandc->regs->clrflashstatus = cpu_to_le32(host->clrflashstatus);
++	nandc->regs->clrreadstatus = cpu_to_le32(host->clrreadstatus);
++	nandc->regs->exec = cpu_to_le32(1);
+ 
+ 	if (read)
+ 		nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
+@@ -1121,7 +1088,7 @@ static int read_reg_dma(struct qcom_nand
+ 	if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
+ 		first = dev_cmd_reg_addr(nandc, first);
+ 
+-	if (nandc->props->is_bam)
++	if (nandc->props->supports_bam)
+ 		return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+ 					     num_regs, flags);
+ 
+@@ -1136,25 +1103,16 @@ static int read_reg_dma(struct qcom_nand
+  * write_reg_dma:	prepares a descriptor to write a given number of
+  *			contiguous registers
+  *
++ * @vaddr:		contiguous memory from where register value will
++ *			be written
+  * @first:		offset of the first register in the contiguous block
+  * @num_regs:		number of registers to write
+  * @flags:		flags to control DMA descriptor preparation
+  */
+-static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
+-			 int num_regs, unsigned int flags)
++static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
++			 int first, int num_regs, unsigned int flags)
+ {
+ 	bool flow_control = false;
+-	struct nandc_regs *regs = nandc->regs;
+-	void *vaddr;
+-
+-	vaddr = offset_to_nandc_reg(regs, first);
+-
+-	if (first == NAND_ERASED_CW_DETECT_CFG) {
+-		if (flags & NAND_ERASED_CW_SET)
+-			vaddr = &regs->erased_cw_detect_cfg_set;
+-		else
+-			vaddr = &regs->erased_cw_detect_cfg_clr;
+-	}
+ 
+ 	if (first == NAND_EXEC_CMD)
+ 		flags |= NAND_BAM_NWD;
+@@ -1165,7 +1123,7 @@ static int write_reg_dma(struct qcom_nan
+ 	if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
+ 		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+ 
+-	if (nandc->props->is_bam)
++	if (nandc->props->supports_bam)
+ 		return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+ 					     num_regs, flags);
+ 
+@@ -1188,7 +1146,7 @@ static int write_reg_dma(struct qcom_nan
+ static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ 			 const u8 *vaddr, int size, unsigned int flags)
+ {
+-	if (nandc->props->is_bam)
++	if (nandc->props->supports_bam)
+ 		return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+ 
+ 	return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+@@ -1206,7 +1164,7 @@ static int read_data_dma(struct qcom_nan
+ static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ 			  const u8 *vaddr, int size, unsigned int flags)
+ {
+-	if (nandc->props->is_bam)
++	if (nandc->props->supports_bam)
+ 		return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+ 
+ 	return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+@@ -1220,13 +1178,14 @@ static void config_nand_page_read(struct
+ {
+ 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ 
+-	write_reg_dma(nandc, NAND_ADDR0, 2, 0);
+-	write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
+-	if (!nandc->props->qpic_v2)
+-		write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+-	write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
+-	write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
+-		      NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
++	write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
++	write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++	if (!nandc->props->qpic_version2)
++		write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
++	write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
++		      NAND_ERASED_CW_DETECT_CFG, 1, 0);
++	write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
++		      NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+ }
+ 
+ /*
+@@ -1239,16 +1198,16 @@ config_nand_cw_read(struct nand_chip *ch
+ 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ 	struct nand_ecc_ctrl *ecc = &chip->ecc;
+ 
+-	int reg = NAND_READ_LOCATION_0;
++	__le32 *reg = &nandc->regs->read_location0;
+ 
+-	if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
+-		reg = NAND_READ_LOCATION_LAST_CW_0;
++	if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
++		reg = &nandc->regs->read_location_last0;
+ 
+-	if (nandc->props->is_bam)
+-		write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
++	if (nandc->props->supports_bam)
++		write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
+ 
+-	write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+-	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ 
+ 	if (use_ecc) {
+ 		read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
+@@ -1279,10 +1238,10 @@ static void config_nand_page_write(struc
+ {
+ 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ 
+-	write_reg_dma(nandc, NAND_ADDR0, 2, 0);
+-	write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
+-	if (!nandc->props->qpic_v2)
+-		write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
++	write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
++	write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++	if (!nandc->props->qpic_version2)
++		write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
+ 			      NAND_BAM_NEXT_SGL);
+ }
+ 
+@@ -1294,13 +1253,13 @@ static void config_nand_cw_write(struct
+ {
+ 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ 
+-	write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+-	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ 
+ 	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ 
+-	write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
+-	write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
++	write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
++	write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
+ }
+ 
+ /* helpers to submit/free our list of dma descriptors */
+@@ -1311,7 +1270,7 @@ static int submit_descs(struct qcom_nand
+ 	struct bam_transaction *bam_txn = nandc->bam_txn;
+ 	int ret = 0;
+ 
+-	if (nandc->props->is_bam) {
++	if (nandc->props->supports_bam) {
+ 		if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
+ 			ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+ 			if (ret)
+@@ -1336,14 +1295,9 @@ static int submit_descs(struct qcom_nand
+ 	list_for_each_entry(desc, &nandc->desc_list, node)
+ 		cookie = dmaengine_submit(desc->dma_desc);
+ 
+-	if (nandc->props->is_bam) {
++	if (nandc->props->supports_bam) {
+ 		bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
+ 		bam_txn->last_cmd_desc->callback_param = bam_txn;
+-		if (bam_txn->last_data_desc) {
+-			bam_txn->last_data_desc->callback = qpic_bam_dma_done;
+-			bam_txn->last_data_desc->callback_param = bam_txn;
+-			bam_txn->wait_second_completion = true;
+-		}
+ 
+ 		dma_async_issue_pending(nandc->tx_chan);
+ 		dma_async_issue_pending(nandc->rx_chan);
+@@ -1365,7 +1319,7 @@ err_unmap_free_desc:
+ 	list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+ 		list_del(&desc->node);
+ 
+-		if (nandc->props->is_bam)
++		if (nandc->props->supports_bam)
+ 			dma_unmap_sg(nandc->dev, desc->bam_sgl,
+ 				     desc->sgl_cnt, desc->dir);
+ 		else
+@@ -1382,7 +1336,7 @@ err_unmap_free_desc:
+ static void clear_read_regs(struct qcom_nand_controller *nandc)
+ {
+ 	nandc->reg_read_pos = 0;
+-	nandc_read_buffer_sync(nandc, false);
++	nandc_dev_to_mem(nandc, false);
+ }
+ 
+ /*
+@@ -1446,7 +1400,7 @@ static int check_flash_errors(struct qco
+ 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ 	int i;
+ 
+-	nandc_read_buffer_sync(nandc, true);
++	nandc_dev_to_mem(nandc, true);
+ 
+ 	for (i = 0; i < cw_cnt; i++) {
+ 		u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
+@@ -1476,7 +1430,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *
+ 	clear_read_regs(nandc);
+ 	host->use_ecc = false;
+ 
+-	if (nandc->props->qpic_v2)
++	if (nandc->props->qpic_version2)
+ 		raw_cw = ecc->steps - 1;
+ 
+ 	clear_bam_transaction(nandc);
+@@ -1497,7 +1451,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *
+ 		oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+ 	}
+ 
+-	if (nandc->props->is_bam) {
++	if (nandc->props->supports_bam) {
+ 		nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
+ 		read_loc += data_size1;
+ 
+@@ -1621,7 +1575,7 @@ static int parse_read_errors(struct qcom
+ 	u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
+ 
+ 	buf = (struct read_stats *)nandc->reg_read_buf;
+-	nandc_read_buffer_sync(nandc, true);
++	nandc_dev_to_mem(nandc, true);
+ 
+ 	for (i = 0; i < ecc->steps; i++, buf++) {
+ 		u32 flash, buffer, erased_cw;
+@@ -1734,7 +1688,7 @@ static int read_page_ecc(struct qcom_nan
+ 			oob_size = host->ecc_bytes_hw + host->spare_bytes;
+ 		}
+ 
+-		if (nandc->props->is_bam) {
++		if (nandc->props->supports_bam) {
+ 			if (data_buf && oob_buf) {
+ 				nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
+ 				nandc_set_read_loc(chip, i, 1, data_size,
+@@ -2455,14 +2409,14 @@ static int qcom_nand_attach_chip(struct
+ 
+ 	mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
+ 	/* Free the initially allocated BAM transaction for reading the ONFI params */
+-	if (nandc->props->is_bam)
++	if (nandc->props->supports_bam)
+ 		free_bam_transaction(nandc);
+ 
+ 	nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
+ 				     cwperpage);
+ 
+ 	/* Now allocate the BAM transaction based on updated max_cwperpage */
+-	if (nandc->props->is_bam) {
++	if (nandc->props->supports_bam) {
+ 		nandc->bam_txn = alloc_bam_transaction(nandc);
+ 		if (!nandc->bam_txn) {
+ 			dev_err(nandc->dev,
+@@ -2522,7 +2476,7 @@ static int qcom_nand_attach_chip(struct
+ 				| ecc_mode << ECC_MODE
+ 				| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
+ 
+-	if (!nandc->props->qpic_v2)
++	if (!nandc->props->qpic_version2)
+ 		host->ecc_buf_cfg = 0x203 << NUM_STEPS;
+ 
+ 	host->clrflashstatus = FS_READY_BSY_N;
+@@ -2556,7 +2510,7 @@ static int qcom_op_cmd_mapping(struct na
+ 		cmd = OP_FETCH_ID;
+ 		break;
+ 	case NAND_CMD_PARAM:
+-		if (nandc->props->qpic_v2)
++		if (nandc->props->qpic_version2)
+ 			cmd = OP_PAGE_READ_ONFI_READ;
+ 		else
+ 			cmd = OP_PAGE_READ;
+@@ -2609,7 +2563,7 @@ static int qcom_parse_instructions(struc
+ 			if (ret < 0)
+ 				return ret;
+ 
+-			q_op->cmd_reg = ret;
++			q_op->cmd_reg = cpu_to_le32(ret);
+ 			q_op->rdy_delay_ns = instr->delay_ns;
+ 			break;
+ 
+@@ -2619,10 +2573,10 @@ static int qcom_parse_instructions(struc
+ 			addrs = &instr->ctx.addr.addrs[offset];
+ 
+ 			for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+-				q_op->addr1_reg |= addrs[i] << (i * 8);
++				q_op->addr1_reg |= cpu_to_le32(addrs[i] << (i * 8));
+ 
+ 			if (naddrs > 4)
+-				q_op->addr2_reg |= addrs[4];
++				q_op->addr2_reg |= cpu_to_le32(addrs[4]);
+ 
+ 			q_op->rdy_delay_ns = instr->delay_ns;
+ 			break;
+@@ -2663,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nan
+ 	unsigned long start = jiffies + msecs_to_jiffies(time_ms);
+ 	u32 flash;
+ 
+-	nandc_read_buffer_sync(nandc, true);
++	nandc_dev_to_mem(nandc, true);
+ 
+ 	do {
+ 		flash = le32_to_cpu(nandc->reg_read_buf[0]);
+@@ -2706,11 +2660,11 @@ static int qcom_read_status_exec(struct
+ 	clear_read_regs(nandc);
+ 	clear_bam_transaction(nandc);
+ 
+-	nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+-	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
++	nandc->regs->cmd = q_op.cmd_reg;
++	nandc->regs->exec = cpu_to_le32(1);
+ 
+-	write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+-	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ 	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ 
+ 	ret = submit_descs(nandc);
+@@ -2719,7 +2673,7 @@ static int qcom_read_status_exec(struct
+ 		goto err_out;
+ 	}
+ 
+-	nandc_read_buffer_sync(nandc, true);
++	nandc_dev_to_mem(nandc, true);
+ 
+ 	for (i = 0; i < num_cw; i++) {
+ 		flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
+@@ -2763,16 +2717,14 @@ static int qcom_read_id_type_exec(struct
+ 	clear_read_regs(nandc);
+ 	clear_bam_transaction(nandc);
+ 
+-	nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+-	nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
+-	nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
+-	nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
+-		      nandc->props->is_bam ? 0 : DM_EN);
++	nandc->regs->cmd = q_op.cmd_reg;
++	nandc->regs->addr0 = q_op.addr1_reg;
++	nandc->regs->addr1 = q_op.addr2_reg;
++	nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
++	nandc->regs->exec = cpu_to_le32(1);
+ 
+-	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+-
+-	write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
+-	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
++	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ 
+ 	read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+ 
+@@ -2786,7 +2738,7 @@ static int qcom_read_id_type_exec(struct
+ 	op_id = q_op.data_instr_idx;
+ 	len = nand_subop_get_data_len(subop, op_id);
+ 
+-	nandc_read_buffer_sync(nandc, true);
++	nandc_dev_to_mem(nandc, true);
+ 	memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
+ 
+ err_out:
+@@ -2807,15 +2759,14 @@ static int qcom_misc_cmd_type_exec(struc
+ 
+ 	if (q_op.flag == OP_PROGRAM_PAGE) {
+ 		goto wait_rdy;
+-	} else if (q_op.cmd_reg == OP_BLOCK_ERASE) {
+-		q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
+-		nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
+-		nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
+-		nandc_set_reg(chip, NAND_DEV0_CFG0,
+-			      host->cfg0_raw & ~(7 << CW_PER_PAGE));
+-		nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
++	} else if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE)) {
++		q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
++		nandc->regs->addr0 = q_op.addr1_reg;
++		nandc->regs->addr1 = q_op.addr2_reg;
++		nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~(7 << CW_PER_PAGE));
++		nandc->regs->cfg1 = cpu_to_le32(host->cfg1_raw);
+ 		instrs = 3;
+-	} else if (q_op.cmd_reg != OP_RESET_DEVICE) {
++	} else if (q_op.cmd_reg != cpu_to_le32(OP_RESET_DEVICE)) {
+ 		return 0;
+ 	}
+ 
+@@ -2826,14 +2777,14 @@ static int qcom_misc_cmd_type_exec(struc
+ 	clear_read_regs(nandc);
+ 	clear_bam_transaction(nandc);
+ 
+-	nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+-	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
++	nandc->regs->cmd = q_op.cmd_reg;
++	nandc->regs->exec = cpu_to_le32(1);
+ 
+-	write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
+-	if (q_op.cmd_reg == OP_BLOCK_ERASE)
+-		write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
++	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
++	if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
++		write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
+ 
+-	write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ 	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ 
+ 	ret = submit_descs(nandc);
+@@ -2864,7 +2815,7 @@ static int qcom_param_page_type_exec(str
+ 	if (ret)
+ 		return ret;
+ 
+-	q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
++	q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
+ 
+ 	nandc->buf_count = 0;
+ 	nandc->buf_start = 0;
+@@ -2872,38 +2823,38 @@ static int qcom_param_page_type_exec(str
+ 	clear_read_regs(nandc);
+ 	clear_bam_transaction(nandc);
+ 
+-	nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
++	nandc->regs->cmd = q_op.cmd_reg;
++	nandc->regs->addr0 = 0;
++	nandc->regs->addr1 = 0;
++
++	nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE |
++					512 << UD_SIZE_BYTES |
++					5 << NUM_ADDR_CYCLES |
++					0 << SPARE_SIZE_BYTES);
++
++	nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES |
++					0 << CS_ACTIVE_BSY |
++					17 << BAD_BLOCK_BYTE_NUM |
++					1 << BAD_BLOCK_IN_SPARE_AREA |
++					2 << WR_RD_BSY_GAP |
++					0 << WIDE_FLASH |
++					1 << DEV0_CFG1_ECC_DISABLE);
+ 
+-	nandc_set_reg(chip, NAND_ADDR0, 0);
+-	nandc_set_reg(chip, NAND_ADDR1, 0);
+-	nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
+-					| 512 << UD_SIZE_BYTES
+-					| 5 << NUM_ADDR_CYCLES
+-					| 0 << SPARE_SIZE_BYTES);
+-	nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
+-					| 0 << CS_ACTIVE_BSY
+-					| 17 << BAD_BLOCK_BYTE_NUM
+-					| 1 << BAD_BLOCK_IN_SPARE_AREA
+-					| 2 << WR_RD_BSY_GAP
+-					| 0 << WIDE_FLASH
+-					| 1 << DEV0_CFG1_ECC_DISABLE);
+-	if (!nandc->props->qpic_v2)
+-		nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
++	if (!nandc->props->qpic_version2)
++		nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
+ 
+ 	/* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
+-	if (!nandc->props->qpic_v2) {
+-		nandc_set_reg(chip, NAND_DEV_CMD_VLD,
+-			      (nandc->vld & ~READ_START_VLD));
+-		nandc_set_reg(chip, NAND_DEV_CMD1,
+-			      (nandc->cmd1 & ~(0xFF << READ_ADDR))
+-			      | NAND_CMD_PARAM << READ_ADDR);
++	if (!nandc->props->qpic_version2) {
++		nandc->regs->vld = cpu_to_le32((nandc->vld & ~READ_START_VLD));
++		nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~(0xFF << READ_ADDR))
++				    | NAND_CMD_PARAM << READ_ADDR);
+ 	}
+ 
+-	nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+-
+-	if (!nandc->props->qpic_v2) {
+-		nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
+-		nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
++	nandc->regs->exec = cpu_to_le32(1);
++
++	if (!nandc->props->qpic_version2) {
++		nandc->regs->orig_cmd1 = cpu_to_le32(nandc->cmd1);
++		nandc->regs->orig_vld = cpu_to_le32(nandc->vld);
+ 	}
+ 
+ 	instr = q_op.data_instr;
+@@ -2912,9 +2863,9 @@ static int qcom_param_page_type_exec(str
+ 
+ 	nandc_set_read_loc(chip, 0, 0, 0, len, 1);
+ 
+-	if (!nandc->props->qpic_v2) {
+-		write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
+-		write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
++	if (!nandc->props->qpic_version2) {
++		write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
++		write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+ 	}
+ 
+ 	nandc->buf_count = len;
+@@ -2926,9 +2877,10 @@ static int qcom_param_page_type_exec(str
+ 		      nandc->buf_count, 0);
+ 
+ 	/* restore CMD1 and VLD regs */
+-	if (!nandc->props->qpic_v2) {
+-		write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
+-		write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
++	if (!nandc->props->qpic_version2) {
++		write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
++		write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
++			      NAND_BAM_NEXT_SGL);
+ 	}
+ 
+ 	ret = submit_descs(nandc);
+@@ -3017,7 +2969,7 @@ static const struct nand_controller_ops
+ 
+ static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+ {
+-	if (nandc->props->is_bam) {
++	if (nandc->props->supports_bam) {
+ 		if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
+ 			dma_unmap_single(nandc->dev, nandc->reg_read_dma,
+ 					 MAX_REG_RD *
+@@ -3070,7 +3022,7 @@ static int qcom_nandc_alloc(struct qcom_
+ 	if (!nandc->reg_read_buf)
+ 		return -ENOMEM;
+ 
+-	if (nandc->props->is_bam) {
++	if (nandc->props->supports_bam) {
+ 		nandc->reg_read_dma =
+ 			dma_map_single(nandc->dev, nandc->reg_read_buf,
+ 				       MAX_REG_RD *
+@@ -3151,15 +3103,15 @@ static int qcom_nandc_setup(struct qcom_
+ 	u32 nand_ctrl;
+ 
+ 	/* kill onenand */
+-	if (!nandc->props->is_qpic)
++	if (!nandc->props->nandc_part_of_qpic)
+ 		nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+ 
+-	if (!nandc->props->qpic_v2)
++	if (!nandc->props->qpic_version2)
+ 		nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
+ 			    NAND_DEV_CMD_VLD_VAL);
+ 
+ 	/* enable ADM or BAM DMA */
+-	if (nandc->props->is_bam) {
++	if (nandc->props->supports_bam) {
+ 		nand_ctrl = nandc_read(nandc, NAND_CTRL);
+ 
+ 		/*
+@@ -3176,7 +3128,7 @@ static int qcom_nandc_setup(struct qcom_
+ 	}
+ 
+ 	/* save the original values of these registers */
+-	if (!nandc->props->qpic_v2) {
++	if (!nandc->props->qpic_version2) {
+ 		nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
+ 		nandc->vld = NAND_DEV_CMD_VLD_VAL;
+ 	}
+@@ -3349,7 +3301,7 @@ static int qcom_nandc_parse_dt(struct pl
+ 	struct device_node *np = nandc->dev->of_node;
+ 	int ret;
+ 
+-	if (!nandc->props->is_bam) {
++	if (!nandc->props->supports_bam) {
+ 		ret = of_property_read_u32(np, "qcom,cmd-crci",
+ 					   &nandc->cmd_crci);
+ 		if (ret) {
+@@ -3474,30 +3426,30 @@ static void qcom_nandc_remove(struct pla
+ 
+ static const struct qcom_nandc_props ipq806x_nandc_props = {
+ 	.ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
+-	.is_bam = false,
++	.supports_bam = false,
+ 	.use_codeword_fixup = true,
+ 	.dev_cmd_reg_start = 0x0,
+ };
+ 
+ static const struct qcom_nandc_props ipq4019_nandc_props = {
+ 	.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+-	.is_bam = true,
+-	.is_qpic = true,
++	.supports_bam = true,
++	.nandc_part_of_qpic = true,
+ 	.dev_cmd_reg_start = 0x0,
+ };
+ 
+ static const struct qcom_nandc_props ipq8074_nandc_props = {
+ 	.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+-	.is_bam = true,
+-	.is_qpic = true,
++	.supports_bam = true,
++	.nandc_part_of_qpic = true,
+ 	.dev_cmd_reg_start = 0x7000,
+ };
+ 
+ static const struct qcom_nandc_props sdx55_nandc_props = {
+ 	.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
+-	.is_bam = true,
+-	.is_qpic = true,
+-	.qpic_v2 = true,
++	.supports_bam = true,
++	.nandc_part_of_qpic = true,
++	.qpic_version2 = true,
+ 	.dev_cmd_reg_start = 0x7000,
+ };
+ 

+ 880 - 0
target/linux/generic/backport-6.6/413-02-v6.14-mtd-rawnand-qcom-Add-qcom-prefix-to-common-api.patch

@@ -0,0 +1,880 @@
+From 1d479f5b345e0c3650fec4dddeef9fc6fab30c8b Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <[email protected]>
+Date: Wed, 20 Nov 2024 14:45:01 +0530
+Subject: [PATCH 2/4] mtd: rawnand: qcom: Add qcom prefix to common api
+
+Add qcom prefix to all the api which will be commonly
+used by spi nand driver and raw nand driver.
+
+Reviewed-by: Konrad Dybcio <[email protected]>
+Signed-off-by: Md Sadre Alam <[email protected]>
+Signed-off-by: Miquel Raynal <[email protected]>
+---
+ drivers/mtd/nand/raw/qcom_nandc.c | 320 +++++++++++++++---------------
+ 1 file changed, 160 insertions(+), 160 deletions(-)
+
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -53,7 +53,7 @@
+ #define	NAND_READ_LOCATION_LAST_CW_2	0xf48
+ #define	NAND_READ_LOCATION_LAST_CW_3	0xf4c
+ 
+-/* dummy register offsets, used by write_reg_dma */
++/* dummy register offsets, used by qcom_write_reg_dma */
+ #define	NAND_DEV_CMD1_RESTORE		0xdead
+ #define	NAND_DEV_CMD_VLD_RESTORE	0xbeef
+ 
+@@ -211,7 +211,7 @@
+ 
+ /*
+  * Flags used in DMA descriptor preparation helper functions
+- * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
++ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
+  */
+ /* Don't set the EOT in current tx BAM sgl */
+ #define NAND_BAM_NO_EOT			BIT(0)
+@@ -550,7 +550,7 @@ struct qcom_nandc_props {
+ };
+ 
+ /* Frees the BAM transaction memory */
+-static void free_bam_transaction(struct qcom_nand_controller *nandc)
++static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
+ {
+ 	struct bam_transaction *bam_txn = nandc->bam_txn;
+ 
+@@ -559,7 +559,7 @@ static void free_bam_transaction(struct
+ 
+ /* Allocates and Initializes the BAM transaction */
+ static struct bam_transaction *
+-alloc_bam_transaction(struct qcom_nand_controller *nandc)
++qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
+ {
+ 	struct bam_transaction *bam_txn;
+ 	size_t bam_txn_size;
+@@ -595,7 +595,7 @@ alloc_bam_transaction(struct qcom_nand_c
+ }
+ 
+ /* Clears the BAM transaction indexes */
+-static void clear_bam_transaction(struct qcom_nand_controller *nandc)
++static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
+ {
+ 	struct bam_transaction *bam_txn = nandc->bam_txn;
+ 
+@@ -614,7 +614,7 @@ static void clear_bam_transaction(struct
+ }
+ 
+ /* Callback for DMA descriptor completion */
+-static void qpic_bam_dma_done(void *data)
++static void qcom_qpic_bam_dma_done(void *data)
+ {
+ 	struct bam_transaction *bam_txn = data;
+ 
+@@ -644,7 +644,7 @@ static void nandc_write(struct qcom_nand
+ 	iowrite32(val, nandc->base + offset);
+ }
+ 
+-static void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
++static void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
+ {
+ 	if (!nandc->props->supports_bam)
+ 		return;
+@@ -824,9 +824,9 @@ static void update_rw_regs(struct qcom_n
+  * for BAM. This descriptor will be added in the NAND DMA descriptor queue
+  * which will be submitted to DMA engine.
+  */
+-static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+-				  struct dma_chan *chan,
+-				  unsigned long flags)
++static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
++				       struct dma_chan *chan,
++				       unsigned long flags)
+ {
+ 	struct desc_info *desc;
+ 	struct scatterlist *sgl;
+@@ -903,9 +903,9 @@ static int prepare_bam_async_desc(struct
+  * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
+  * after the current command element.
+  */
+-static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+-				 int reg_off, const void *vaddr,
+-				 int size, unsigned int flags)
++static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
++				      int reg_off, const void *vaddr,
++				      int size, unsigned int flags)
+ {
+ 	int bam_ce_size;
+ 	int i, ret;
+@@ -943,9 +943,9 @@ static int prep_bam_dma_desc_cmd(struct
+ 		bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+ 
+ 		if (flags & NAND_BAM_NWD) {
+-			ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+-						     DMA_PREP_FENCE |
+-						     DMA_PREP_CMD);
++			ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
++							  DMA_PREP_FENCE |
++							  DMA_PREP_CMD);
+ 			if (ret)
+ 				return ret;
+ 		}
+@@ -958,9 +958,8 @@ static int prep_bam_dma_desc_cmd(struct
+  * Prepares the data descriptor for BAM DMA which will be used for NAND
+  * data reads and writes.
+  */
+-static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+-				  const void *vaddr,
+-				  int size, unsigned int flags)
++static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
++				       const void *vaddr, int size, unsigned int flags)
+ {
+ 	int ret;
+ 	struct bam_transaction *bam_txn = nandc->bam_txn;
+@@ -979,8 +978,8 @@ static int prep_bam_dma_desc_data(struct
+ 		 * is not set, form the DMA descriptor
+ 		 */
+ 		if (!(flags & NAND_BAM_NO_EOT)) {
+-			ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
+-						     DMA_PREP_INTERRUPT);
++			ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
++							  DMA_PREP_INTERRUPT);
+ 			if (ret)
+ 				return ret;
+ 		}
+@@ -989,9 +988,9 @@ static int prep_bam_dma_desc_data(struct
+ 	return 0;
+ }
+ 
+-static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
+-			     int reg_off, const void *vaddr, int size,
+-			     bool flow_control)
++static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
++				  int reg_off, const void *vaddr, int size,
++				  bool flow_control)
+ {
+ 	struct desc_info *desc;
+ 	struct dma_async_tx_descriptor *dma_desc;
+@@ -1069,15 +1068,15 @@ err:
+ }
+ 
+ /*
+- * read_reg_dma:	prepares a descriptor to read a given number of
++ * qcom_read_reg_dma:	prepares a descriptor to read a given number of
+  *			contiguous registers to the reg_read_buf pointer
+  *
+  * @first:		offset of the first register in the contiguous block
+  * @num_regs:		number of registers to read
+  * @flags:		flags to control DMA descriptor preparation
+  */
+-static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
+-			int num_regs, unsigned int flags)
++static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
++			     int num_regs, unsigned int flags)
+ {
+ 	bool flow_control = false;
+ 	void *vaddr;
+@@ -1089,18 +1088,18 @@ static int read_reg_dma(struct qcom_nand
+ 		first = dev_cmd_reg_addr(nandc, first);
+ 
+ 	if (nandc->props->supports_bam)
+-		return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
++		return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+ 					     num_regs, flags);
+ 
+ 	if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+ 		flow_control = true;
+ 
+-	return prep_adm_dma_desc(nandc, true, first, vaddr,
++	return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
+ 				 num_regs * sizeof(u32), flow_control);
+ }
+ 
+ /*
+- * write_reg_dma:	prepares a descriptor to write a given number of
++ * qcom_write_reg_dma:	prepares a descriptor to write a given number of
+  *			contiguous registers
+  *
+  * @vaddr:		contiguous memory from where register value will
+@@ -1109,8 +1108,8 @@ static int read_reg_dma(struct qcom_nand
+  * @num_regs:		number of registers to write
+  * @flags:		flags to control DMA descriptor preparation
+  */
+-static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
+-			 int first, int num_regs, unsigned int flags)
++static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
++			      int first, int num_regs, unsigned int flags)
+ {
+ 	bool flow_control = false;
+ 
+@@ -1124,18 +1123,18 @@ static int write_reg_dma(struct qcom_nan
+ 		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+ 
+ 	if (nandc->props->supports_bam)
+-		return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
++		return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+ 					     num_regs, flags);
+ 
+ 	if (first == NAND_FLASH_CMD)
+ 		flow_control = true;
+ 
+-	return prep_adm_dma_desc(nandc, false, first, vaddr,
++	return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
+ 				 num_regs * sizeof(u32), flow_control);
+ }
+ 
+ /*
+- * read_data_dma:	prepares a DMA descriptor to transfer data from the
++ * qcom_read_data_dma:	prepares a DMA descriptor to transfer data from the
+  *			controller's internal buffer to the buffer 'vaddr'
+  *
+  * @reg_off:		offset within the controller's data buffer
+@@ -1143,17 +1142,17 @@ static int write_reg_dma(struct qcom_nan
+  * @size:		DMA transaction size in bytes
+  * @flags:		flags to control DMA descriptor preparation
+  */
+-static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+-			 const u8 *vaddr, int size, unsigned int flags)
++static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
++			      const u8 *vaddr, int size, unsigned int flags)
+ {
+ 	if (nandc->props->supports_bam)
+-		return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
++		return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+ 
+-	return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
++	return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+ }
+ 
+ /*
+- * write_data_dma:	prepares a DMA descriptor to transfer data from
++ * qcom_write_data_dma:	prepares a DMA descriptor to transfer data from
+  *			'vaddr' to the controller's internal buffer
+  *
+  * @reg_off:		offset within the controller's data buffer
+@@ -1161,13 +1160,13 @@ static int read_data_dma(struct qcom_nan
+  * @size:		DMA transaction size in bytes
+  * @flags:		flags to control DMA descriptor preparation
+  */
+-static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+-			  const u8 *vaddr, int size, unsigned int flags)
++static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
++			       const u8 *vaddr, int size, unsigned int flags)
+ {
+ 	if (nandc->props->supports_bam)
+-		return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
++		return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+ 
+-	return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
++	return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+ }
+ 
+ /*
+@@ -1178,14 +1177,14 @@ static void config_nand_page_read(struct
+ {
+ 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ 
+-	write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
+-	write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++	qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
++	qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ 	if (!nandc->props->qpic_version2)
+-		write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+-	write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
+-		      NAND_ERASED_CW_DETECT_CFG, 1, 0);
+-	write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
+-		      NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
++		qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
++	qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
++			   NAND_ERASED_CW_DETECT_CFG, 1, 0);
++	qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
++			   NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+ }
+ 
+ /*
+@@ -1204,17 +1203,17 @@ config_nand_cw_read(struct nand_chip *ch
+ 		reg = &nandc->regs->read_location_last0;
+ 
+ 	if (nandc->props->supports_bam)
+-		write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
++		qcom_write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
+ 
+-	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+-	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++	qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++	qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ 
+ 	if (use_ecc) {
+-		read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
+-		read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
+-			     NAND_BAM_NEXT_SGL);
++		qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
++		qcom_read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
++				  NAND_BAM_NEXT_SGL);
+ 	} else {
+-		read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++		qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ 	}
+ }
+ 
+@@ -1238,11 +1237,11 @@ static void config_nand_page_write(struc
+ {
+ 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ 
+-	write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
+-	write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++	qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
++	qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ 	if (!nandc->props->qpic_version2)
+-		write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
+-			      NAND_BAM_NEXT_SGL);
++		qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
++				   NAND_BAM_NEXT_SGL);
+ }
+ 
+ /*
+@@ -1253,17 +1252,18 @@ static void config_nand_cw_write(struct
+ {
+ 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ 
+-	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+-	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++	qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++	qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ 
+-	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++	qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ 
+-	write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
+-	write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
++	qcom_write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
++	qcom_write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
++			   NAND_BAM_NEXT_SGL);
+ }
+ 
+ /* helpers to submit/free our list of dma descriptors */
+-static int submit_descs(struct qcom_nand_controller *nandc)
++static int qcom_submit_descs(struct qcom_nand_controller *nandc)
+ {
+ 	struct desc_info *desc, *n;
+ 	dma_cookie_t cookie = 0;
+@@ -1272,21 +1272,21 @@ static int submit_descs(struct qcom_nand
+ 
+ 	if (nandc->props->supports_bam) {
+ 		if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
+-			ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
++			ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+ 			if (ret)
+ 				goto err_unmap_free_desc;
+ 		}
+ 
+ 		if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
+-			ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
+-						   DMA_PREP_INTERRUPT);
++			ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
++							  DMA_PREP_INTERRUPT);
+ 			if (ret)
+ 				goto err_unmap_free_desc;
+ 		}
+ 
+ 		if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
+-			ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+-						   DMA_PREP_CMD);
++			ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
++							  DMA_PREP_CMD);
+ 			if (ret)
+ 				goto err_unmap_free_desc;
+ 		}
+@@ -1296,7 +1296,7 @@ static int submit_descs(struct qcom_nand
+ 		cookie = dmaengine_submit(desc->dma_desc);
+ 
+ 	if (nandc->props->supports_bam) {
+-		bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
++		bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
+ 		bam_txn->last_cmd_desc->callback_param = bam_txn;
+ 
+ 		dma_async_issue_pending(nandc->tx_chan);
+@@ -1314,7 +1314,7 @@ static int submit_descs(struct qcom_nand
+ err_unmap_free_desc:
+ 	/*
+ 	 * Unmap the dma sg_list and free the desc allocated by both
+-	 * prepare_bam_async_desc() and prep_adm_dma_desc() functions.
++	 * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
+ 	 */
+ 	list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+ 		list_del(&desc->node);
+@@ -1333,10 +1333,10 @@ err_unmap_free_desc:
+ }
+ 
+ /* reset the register read buffer for next NAND operation */
+-static void clear_read_regs(struct qcom_nand_controller *nandc)
++static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
+ {
+ 	nandc->reg_read_pos = 0;
+-	nandc_dev_to_mem(nandc, false);
++	qcom_nandc_dev_to_mem(nandc, false);
+ }
+ 
+ /*
+@@ -1400,7 +1400,7 @@ static int check_flash_errors(struct qco
+ 	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ 	int i;
+ 
+-	nandc_dev_to_mem(nandc, true);
++	qcom_nandc_dev_to_mem(nandc, true);
+ 
+ 	for (i = 0; i < cw_cnt; i++) {
+ 		u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
+@@ -1427,13 +1427,13 @@ qcom_nandc_read_cw_raw(struct mtd_info *
+ 	nand_read_page_op(chip, page, 0, NULL, 0);
+ 	nandc->buf_count = 0;
+ 	nandc->buf_start = 0;
+-	clear_read_regs(nandc);
++	qcom_clear_read_regs(nandc);
+ 	host->use_ecc = false;
+ 
+ 	if (nandc->props->qpic_version2)
+ 		raw_cw = ecc->steps - 1;
+ 
+-	clear_bam_transaction(nandc);
++	qcom_clear_bam_transaction(nandc);
+ 	set_address(host, host->cw_size * cw, page);
+ 	update_rw_regs(host, 1, true, raw_cw);
+ 	config_nand_page_read(chip);
+@@ -1466,18 +1466,18 @@ qcom_nandc_read_cw_raw(struct mtd_info *
+ 
+ 	config_nand_cw_read(chip, false, raw_cw);
+ 
+-	read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
++	qcom_read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
+ 	reg_off += data_size1;
+ 
+-	read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
++	qcom_read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
+ 	reg_off += oob_size1;
+ 
+-	read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
++	qcom_read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
+ 	reg_off += data_size2;
+ 
+-	read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
++	qcom_read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
+ 
+-	ret = submit_descs(nandc);
++	ret = qcom_submit_descs(nandc);
+ 	if (ret) {
+ 		dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
+ 		return ret;
+@@ -1575,7 +1575,7 @@ static int parse_read_errors(struct qcom
+ 	u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
+ 
+ 	buf = (struct read_stats *)nandc->reg_read_buf;
+-	nandc_dev_to_mem(nandc, true);
++	qcom_nandc_dev_to_mem(nandc, true);
+ 
+ 	for (i = 0; i < ecc->steps; i++, buf++) {
+ 		u32 flash, buffer, erased_cw;
+@@ -1704,8 +1704,8 @@ static int read_page_ecc(struct qcom_nan
+ 		config_nand_cw_read(chip, true, i);
+ 
+ 		if (data_buf)
+-			read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
+-				      data_size, 0);
++			qcom_read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
++					   data_size, 0);
+ 
+ 		/*
+ 		 * when ecc is enabled, the controller doesn't read the real
+@@ -1720,8 +1720,8 @@ static int read_page_ecc(struct qcom_nan
+ 			for (j = 0; j < host->bbm_size; j++)
+ 				*oob_buf++ = 0xff;
+ 
+-			read_data_dma(nandc, FLASH_BUF_ACC + data_size,
+-				      oob_buf, oob_size, 0);
++			qcom_read_data_dma(nandc, FLASH_BUF_ACC + data_size,
++					   oob_buf, oob_size, 0);
+ 		}
+ 
+ 		if (data_buf)
+@@ -1730,7 +1730,7 @@ static int read_page_ecc(struct qcom_nan
+ 			oob_buf += oob_size;
+ 	}
+ 
+-	ret = submit_descs(nandc);
++	ret = qcom_submit_descs(nandc);
+ 	if (ret) {
+ 		dev_err(nandc->dev, "failure to read page/oob\n");
+ 		return ret;
+@@ -1751,7 +1751,7 @@ static int copy_last_cw(struct qcom_nand
+ 	int size;
+ 	int ret;
+ 
+-	clear_read_regs(nandc);
++	qcom_clear_read_regs(nandc);
+ 
+ 	size = host->use_ecc ? host->cw_data : host->cw_size;
+ 
+@@ -1763,9 +1763,9 @@ static int copy_last_cw(struct qcom_nand
+ 
+ 	config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
+ 
+-	read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
++	qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
+ 
+-	ret = submit_descs(nandc);
++	ret = qcom_submit_descs(nandc);
+ 	if (ret)
+ 		dev_err(nandc->dev, "failed to copy last codeword\n");
+ 
+@@ -1851,14 +1851,14 @@ static int qcom_nandc_read_page(struct n
+ 	nandc->buf_count = 0;
+ 	nandc->buf_start = 0;
+ 	host->use_ecc = true;
+-	clear_read_regs(nandc);
++	qcom_clear_read_regs(nandc);
+ 	set_address(host, 0, page);
+ 	update_rw_regs(host, ecc->steps, true, 0);
+ 
+ 	data_buf = buf;
+ 	oob_buf = oob_required ? chip->oob_poi : NULL;
+ 
+-	clear_bam_transaction(nandc);
++	qcom_clear_bam_transaction(nandc);
+ 
+ 	return read_page_ecc(host, data_buf, oob_buf, page);
+ }
+@@ -1899,8 +1899,8 @@ static int qcom_nandc_read_oob(struct na
+ 	if (host->nr_boot_partitions)
+ 		qcom_nandc_codeword_fixup(host, page);
+ 
+-	clear_read_regs(nandc);
+-	clear_bam_transaction(nandc);
++	qcom_clear_read_regs(nandc);
++	qcom_clear_bam_transaction(nandc);
+ 
+ 	host->use_ecc = true;
+ 	set_address(host, 0, page);
+@@ -1927,8 +1927,8 @@ static int qcom_nandc_write_page(struct
+ 	set_address(host, 0, page);
+ 	nandc->buf_count = 0;
+ 	nandc->buf_start = 0;
+-	clear_read_regs(nandc);
+-	clear_bam_transaction(nandc);
++	qcom_clear_read_regs(nandc);
++	qcom_clear_bam_transaction(nandc);
+ 
+ 	data_buf = (u8 *)buf;
+ 	oob_buf = chip->oob_poi;
+@@ -1949,8 +1949,8 @@ static int qcom_nandc_write_page(struct
+ 			oob_size = ecc->bytes;
+ 		}
+ 
+-		write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
+-			       i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
++		qcom_write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
++				    i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
+ 
+ 		/*
+ 		 * when ECC is enabled, we don't really need to write anything
+@@ -1962,8 +1962,8 @@ static int qcom_nandc_write_page(struct
+ 		if (qcom_nandc_is_last_cw(ecc, i)) {
+ 			oob_buf += host->bbm_size;
+ 
+-			write_data_dma(nandc, FLASH_BUF_ACC + data_size,
+-				       oob_buf, oob_size, 0);
++			qcom_write_data_dma(nandc, FLASH_BUF_ACC + data_size,
++					    oob_buf, oob_size, 0);
+ 		}
+ 
+ 		config_nand_cw_write(chip);
+@@ -1972,7 +1972,7 @@ static int qcom_nandc_write_page(struct
+ 		oob_buf += oob_size;
+ 	}
+ 
+-	ret = submit_descs(nandc);
++	ret = qcom_submit_descs(nandc);
+ 	if (ret) {
+ 		dev_err(nandc->dev, "failure to write page\n");
+ 		return ret;
+@@ -1997,8 +1997,8 @@ static int qcom_nandc_write_page_raw(str
+ 		qcom_nandc_codeword_fixup(host, page);
+ 
+ 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+-	clear_read_regs(nandc);
+-	clear_bam_transaction(nandc);
++	qcom_clear_read_regs(nandc);
++	qcom_clear_bam_transaction(nandc);
+ 
+ 	data_buf = (u8 *)buf;
+ 	oob_buf = chip->oob_poi;
+@@ -2024,28 +2024,28 @@ static int qcom_nandc_write_page_raw(str
+ 			oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+ 		}
+ 
+-		write_data_dma(nandc, reg_off, data_buf, data_size1,
+-			       NAND_BAM_NO_EOT);
++		qcom_write_data_dma(nandc, reg_off, data_buf, data_size1,
++				    NAND_BAM_NO_EOT);
+ 		reg_off += data_size1;
+ 		data_buf += data_size1;
+ 
+-		write_data_dma(nandc, reg_off, oob_buf, oob_size1,
+-			       NAND_BAM_NO_EOT);
++		qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size1,
++				    NAND_BAM_NO_EOT);
+ 		reg_off += oob_size1;
+ 		oob_buf += oob_size1;
+ 
+-		write_data_dma(nandc, reg_off, data_buf, data_size2,
+-			       NAND_BAM_NO_EOT);
++		qcom_write_data_dma(nandc, reg_off, data_buf, data_size2,
++				    NAND_BAM_NO_EOT);
+ 		reg_off += data_size2;
+ 		data_buf += data_size2;
+ 
+-		write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
++		qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
+ 		oob_buf += oob_size2;
+ 
+ 		config_nand_cw_write(chip);
+ 	}
+ 
+-	ret = submit_descs(nandc);
++	ret = qcom_submit_descs(nandc);
+ 	if (ret) {
+ 		dev_err(nandc->dev, "failure to write raw page\n");
+ 		return ret;
+@@ -2075,7 +2075,7 @@ static int qcom_nandc_write_oob(struct n
+ 		qcom_nandc_codeword_fixup(host, page);
+ 
+ 	host->use_ecc = true;
+-	clear_bam_transaction(nandc);
++	qcom_clear_bam_transaction(nandc);
+ 
+ 	/* calculate the data and oob size for the last codeword/step */
+ 	data_size = ecc->size - ((ecc->steps - 1) << 2);
+@@ -2090,11 +2090,11 @@ static int qcom_nandc_write_oob(struct n
+ 	update_rw_regs(host, 1, false, 0);
+ 
+ 	config_nand_page_write(chip);
+-	write_data_dma(nandc, FLASH_BUF_ACC,
+-		       nandc->data_buffer, data_size + oob_size, 0);
++	qcom_write_data_dma(nandc, FLASH_BUF_ACC,
++			    nandc->data_buffer, data_size + oob_size, 0);
+ 	config_nand_cw_write(chip);
+ 
+-	ret = submit_descs(nandc);
++	ret = qcom_submit_descs(nandc);
+ 	if (ret) {
+ 		dev_err(nandc->dev, "failure to write oob\n");
+ 		return ret;
+@@ -2121,7 +2121,7 @@ static int qcom_nandc_block_bad(struct n
+ 	 */
+ 	host->use_ecc = false;
+ 
+-	clear_bam_transaction(nandc);
++	qcom_clear_bam_transaction(nandc);
+ 	ret = copy_last_cw(host, page);
+ 	if (ret)
+ 		goto err;
+@@ -2148,8 +2148,8 @@ static int qcom_nandc_block_markbad(stru
+ 	struct nand_ecc_ctrl *ecc = &chip->ecc;
+ 	int page, ret;
+ 
+-	clear_read_regs(nandc);
+-	clear_bam_transaction(nandc);
++	qcom_clear_read_regs(nandc);
++	qcom_clear_bam_transaction(nandc);
+ 
+ 	/*
+ 	 * to mark the BBM as bad, we flash the entire last codeword with 0s.
+@@ -2166,11 +2166,11 @@ static int qcom_nandc_block_markbad(stru
+ 	update_rw_regs(host, 1, false, ecc->steps - 1);
+ 
+ 	config_nand_page_write(chip);
+-	write_data_dma(nandc, FLASH_BUF_ACC,
+-		       nandc->data_buffer, host->cw_size, 0);
++	qcom_write_data_dma(nandc, FLASH_BUF_ACC,
++			    nandc->data_buffer, host->cw_size, 0);
+ 	config_nand_cw_write(chip);
+ 
+-	ret = submit_descs(nandc);
++	ret = qcom_submit_descs(nandc);
+ 	if (ret) {
+ 		dev_err(nandc->dev, "failure to update BBM\n");
+ 		return ret;
+@@ -2410,14 +2410,14 @@ static int qcom_nand_attach_chip(struct
+ 	mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
+ 	/* Free the initially allocated BAM transaction for reading the ONFI params */
+ 	if (nandc->props->supports_bam)
+-		free_bam_transaction(nandc);
++		qcom_free_bam_transaction(nandc);
+ 
+ 	nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
+ 				     cwperpage);
+ 
+ 	/* Now allocate the BAM transaction based on updated max_cwperpage */
+ 	if (nandc->props->supports_bam) {
+-		nandc->bam_txn = alloc_bam_transaction(nandc);
++		nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
+ 		if (!nandc->bam_txn) {
+ 			dev_err(nandc->dev,
+ 				"failed to allocate bam transaction\n");
+@@ -2617,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nan
+ 	unsigned long start = jiffies + msecs_to_jiffies(time_ms);
+ 	u32 flash;
+ 
+-	nandc_dev_to_mem(nandc, true);
++	qcom_nandc_dev_to_mem(nandc, true);
+ 
+ 	do {
+ 		flash = le32_to_cpu(nandc->reg_read_buf[0]);
+@@ -2657,23 +2657,23 @@ static int qcom_read_status_exec(struct
+ 	nandc->buf_start = 0;
+ 	host->use_ecc = false;
+ 
+-	clear_read_regs(nandc);
+-	clear_bam_transaction(nandc);
++	qcom_clear_read_regs(nandc);
++	qcom_clear_bam_transaction(nandc);
+ 
+ 	nandc->regs->cmd = q_op.cmd_reg;
+ 	nandc->regs->exec = cpu_to_le32(1);
+ 
+-	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+-	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+-	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++	qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++	qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++	qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ 
+-	ret = submit_descs(nandc);
++	ret = qcom_submit_descs(nandc);
+ 	if (ret) {
+ 		dev_err(nandc->dev, "failure in submitting status descriptor\n");
+ 		goto err_out;
+ 	}
+ 
+-	nandc_dev_to_mem(nandc, true);
++	qcom_nandc_dev_to_mem(nandc, true);
+ 
+ 	for (i = 0; i < num_cw; i++) {
+ 		flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
+@@ -2714,8 +2714,8 @@ static int qcom_read_id_type_exec(struct
+ 	nandc->buf_start = 0;
+ 	host->use_ecc = false;
+ 
+-	clear_read_regs(nandc);
+-	clear_bam_transaction(nandc);
++	qcom_clear_read_regs(nandc);
++	qcom_clear_bam_transaction(nandc);
+ 
+ 	nandc->regs->cmd = q_op.cmd_reg;
+ 	nandc->regs->addr0 = q_op.addr1_reg;
+@@ -2723,12 +2723,12 @@ static int qcom_read_id_type_exec(struct
+ 	nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
+ 	nandc->regs->exec = cpu_to_le32(1);
+ 
+-	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
+-	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++	qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
++	qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ 
+-	read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
++	qcom_read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+ 
+-	ret = submit_descs(nandc);
++	ret = qcom_submit_descs(nandc);
+ 	if (ret) {
+ 		dev_err(nandc->dev, "failure in submitting read id descriptor\n");
+ 		goto err_out;
+@@ -2738,7 +2738,7 @@ static int qcom_read_id_type_exec(struct
+ 	op_id = q_op.data_instr_idx;
+ 	len = nand_subop_get_data_len(subop, op_id);
+ 
+-	nandc_dev_to_mem(nandc, true);
++	qcom_nandc_dev_to_mem(nandc, true);
+ 	memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
+ 
+ err_out:
+@@ -2774,20 +2774,20 @@ static int qcom_misc_cmd_type_exec(struc
+ 	nandc->buf_start = 0;
+ 	host->use_ecc = false;
+ 
+-	clear_read_regs(nandc);
+-	clear_bam_transaction(nandc);
++	qcom_clear_read_regs(nandc);
++	qcom_clear_bam_transaction(nandc);
+ 
+ 	nandc->regs->cmd = q_op.cmd_reg;
+ 	nandc->regs->exec = cpu_to_le32(1);
+ 
+-	write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
++	qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
+ 	if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
+-		write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
++		qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
+ 
+-	write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+-	read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++	qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++	qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ 
+-	ret = submit_descs(nandc);
++	ret = qcom_submit_descs(nandc);
+ 	if (ret) {
+ 		dev_err(nandc->dev, "failure in submitting misc descriptor\n");
+ 		goto err_out;
+@@ -2820,8 +2820,8 @@ static int qcom_param_page_type_exec(str
+ 	nandc->buf_count = 0;
+ 	nandc->buf_start = 0;
+ 	host->use_ecc = false;
+-	clear_read_regs(nandc);
+-	clear_bam_transaction(nandc);
++	qcom_clear_read_regs(nandc);
++	qcom_clear_bam_transaction(nandc);
+ 
+ 	nandc->regs->cmd = q_op.cmd_reg;
+ 	nandc->regs->addr0 = 0;
+@@ -2864,8 +2864,8 @@ static int qcom_param_page_type_exec(str
+ 	nandc_set_read_loc(chip, 0, 0, 0, len, 1);
+ 
+ 	if (!nandc->props->qpic_version2) {
+-		write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
+-		write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
++		qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
++		qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+ 	}
+ 
+ 	nandc->buf_count = len;
+@@ -2873,17 +2873,17 @@ static int qcom_param_page_type_exec(str
+ 
+ 	config_nand_single_cw_page_read(chip, false, 0);
+ 
+-	read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+-		      nandc->buf_count, 0);
++	qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
++			   nandc->buf_count, 0);
+ 
+ 	/* restore CMD1 and VLD regs */
+ 	if (!nandc->props->qpic_version2) {
+-		write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
+-		write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
+-			      NAND_BAM_NEXT_SGL);
++		qcom_write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
++		qcom_write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
++				   NAND_BAM_NEXT_SGL);
+ 	}
+ 
+-	ret = submit_descs(nandc);
++	ret = qcom_submit_descs(nandc);
+ 	if (ret) {
+ 		dev_err(nandc->dev, "failure in submitting param page descriptor\n");
+ 		goto err_out;
+@@ -3067,7 +3067,7 @@ static int qcom_nandc_alloc(struct qcom_
+ 		 * maximum codeword size
+ 		 */
+ 		nandc->max_cwperpage = 1;
+-		nandc->bam_txn = alloc_bam_transaction(nandc);
++		nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
+ 		if (!nandc->bam_txn) {
+ 			dev_err(nandc->dev,
+ 				"failed to allocate bam transaction\n");

+ 2436 - 0
target/linux/generic/backport-6.6/413-03-v6.14-mtd-nand-Add-qpic_common-API-file.patch

@@ -0,0 +1,2436 @@
+From fdf3ee5c6e5278dab4f60b998b47ed2d510bf80f Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <[email protected]>
+Date: Wed, 20 Nov 2024 14:45:02 +0530
+Subject: [PATCH 3/4] mtd: nand: Add qpic_common API file
+
+Add qpic_common.c file which hold all the common
+qpic APIs which will be used by both qpic raw nand
+driver and qpic spi nand driver.
+
+Signed-off-by: Md Sadre Alam <[email protected]>
+Signed-off-by: Miquel Raynal <[email protected]>
+---
+ drivers/mtd/nand/Makefile            |    2 +-
+ drivers/mtd/nand/qpic_common.c       |  759 ++++++++++++++++++
+ drivers/mtd/nand/raw/qcom_nandc.c    | 1092 +-------------------------
+ include/linux/mtd/nand-qpic-common.h |  468 +++++++++++
+ 4 files changed, 1240 insertions(+), 1081 deletions(-)
+ create mode 100644 drivers/mtd/nand/qpic_common.c
+ create mode 100644 include/linux/mtd/nand-qpic-common.h
+
+--- a/drivers/mtd/nand/Makefile
++++ b/drivers/mtd/nand/Makefile
+@@ -3,7 +3,7 @@
+ nandcore-objs := core.o bbt.o
+ obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
+ obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
+-
++obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
+ obj-y	+= onenand/
+ obj-y	+= raw/
+ obj-y	+= spi/
+--- /dev/null
++++ b/drivers/mtd/nand/qpic_common.c
+@@ -0,0 +1,759 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
++ */
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/dma/qcom_adm.h>
++#include <linux/dma/qcom_bam_dma.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/mtd/nand-qpic-common.h>
++
++/**
++ * qcom_free_bam_transaction() - Frees the BAM transaction memory
++ * @nandc: qpic nand controller
++ *
++ * This function frees the bam transaction memory
++ */
++void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
++{
++	struct bam_transaction *bam_txn = nandc->bam_txn;
++
++	kfree(bam_txn);
++}
++EXPORT_SYMBOL(qcom_free_bam_transaction);
++
++/**
++ * qcom_alloc_bam_transaction() - allocate BAM transaction
++ * @nandc: qpic nand controller
++ *
++ * This function will allocate and initialize the BAM transaction structure
++ */
++struct bam_transaction *
++qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
++{
++	struct bam_transaction *bam_txn;
++	size_t bam_txn_size;
++	unsigned int num_cw = nandc->max_cwperpage;
++	void *bam_txn_buf;
++
++	bam_txn_size =
++		sizeof(*bam_txn) + num_cw *
++		((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
++		(sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
++		(sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
++
++	bam_txn_buf = kzalloc(bam_txn_size, GFP_KERNEL);
++	if (!bam_txn_buf)
++		return NULL;
++
++	bam_txn = bam_txn_buf;
++	bam_txn_buf += sizeof(*bam_txn);
++
++	bam_txn->bam_ce = bam_txn_buf;
++	bam_txn_buf +=
++		sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
++
++	bam_txn->cmd_sgl = bam_txn_buf;
++	bam_txn_buf +=
++		sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
++
++	bam_txn->data_sgl = bam_txn_buf;
++
++	init_completion(&bam_txn->txn_done);
++
++	return bam_txn;
++}
++EXPORT_SYMBOL(qcom_alloc_bam_transaction);
++
++/**
++ * qcom_clear_bam_transaction() - Clears the BAM transaction
++ * @nandc: qpic nand controller
++ *
++ * This function will clear the BAM transaction indexes.
++ */
++void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
++{
++	struct bam_transaction *bam_txn = nandc->bam_txn;
++
++	if (!nandc->props->supports_bam)
++		return;
++
++	memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
++	bam_txn->last_data_desc = NULL;
++
++	sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
++		      QPIC_PER_CW_CMD_SGL);
++	sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
++		      QPIC_PER_CW_DATA_SGL);
++
++	reinit_completion(&bam_txn->txn_done);
++}
++EXPORT_SYMBOL(qcom_clear_bam_transaction);
++
++/**
++ * qcom_qpic_bam_dma_done() - Callback for DMA descriptor completion
++ * @data: data pointer
++ *
++ * This function is a callback for DMA descriptor completion
++ */
++void qcom_qpic_bam_dma_done(void *data)
++{
++	struct bam_transaction *bam_txn = data;
++
++	complete(&bam_txn->txn_done);
++}
++EXPORT_SYMBOL(qcom_qpic_bam_dma_done);
++
++/**
++ * qcom_nandc_dev_to_mem() - Check for dma sync for cpu or device
++ * @nandc: qpic nand controller
++ * @is_cpu: cpu or Device
++ *
++ * This function will check for dma sync for cpu or device
++ */
++inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
++{
++	if (!nandc->props->supports_bam)
++		return;
++
++	if (is_cpu)
++		dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
++					MAX_REG_RD *
++					sizeof(*nandc->reg_read_buf),
++					DMA_FROM_DEVICE);
++	else
++		dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
++					   MAX_REG_RD *
++					   sizeof(*nandc->reg_read_buf),
++					   DMA_FROM_DEVICE);
++}
++EXPORT_SYMBOL(qcom_nandc_dev_to_mem);
++
++/**
++ * qcom_prepare_bam_async_desc() - Prepare DMA descriptor
++ * @nandc: qpic nand controller
++ * @chan: dma channel
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function maps the scatter gather list for DMA transfer and forms the
++ * DMA descriptor for BAM.This descriptor will be added in the NAND DMA
++ * descriptor queue which will be submitted to DMA engine.
++ */
++int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
++				struct dma_chan *chan, unsigned long flags)
++{
++	struct desc_info *desc;
++	struct scatterlist *sgl;
++	unsigned int sgl_cnt;
++	int ret;
++	struct bam_transaction *bam_txn = nandc->bam_txn;
++	enum dma_transfer_direction dir_eng;
++	struct dma_async_tx_descriptor *dma_desc;
++
++	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
++	if (!desc)
++		return -ENOMEM;
++
++	if (chan == nandc->cmd_chan) {
++		sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
++		sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
++		bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
++		dir_eng = DMA_MEM_TO_DEV;
++		desc->dir = DMA_TO_DEVICE;
++	} else if (chan == nandc->tx_chan) {
++		sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
++		sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
++		bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
++		dir_eng = DMA_MEM_TO_DEV;
++		desc->dir = DMA_TO_DEVICE;
++	} else {
++		sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
++		sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
++		bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
++		dir_eng = DMA_DEV_TO_MEM;
++		desc->dir = DMA_FROM_DEVICE;
++	}
++
++	sg_mark_end(sgl + sgl_cnt - 1);
++	ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
++	if (ret == 0) {
++		dev_err(nandc->dev, "failure in mapping desc\n");
++		kfree(desc);
++		return -ENOMEM;
++	}
++
++	desc->sgl_cnt = sgl_cnt;
++	desc->bam_sgl = sgl;
++
++	dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
++					   flags);
++
++	if (!dma_desc) {
++		dev_err(nandc->dev, "failure in prep desc\n");
++		dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
++		kfree(desc);
++		return -EINVAL;
++	}
++
++	desc->dma_desc = dma_desc;
++
++	/* update last data/command descriptor */
++	if (chan == nandc->cmd_chan)
++		bam_txn->last_cmd_desc = dma_desc;
++	else
++		bam_txn->last_data_desc = dma_desc;
++
++	list_add_tail(&desc->node, &nandc->desc_list);
++
++	return 0;
++}
++EXPORT_SYMBOL(qcom_prepare_bam_async_desc);
++
++/**
++ * qcom_prep_bam_dma_desc_cmd() - Prepares the command descriptor for BAM DMA
++ * @nandc: qpic nand controller
++ * @read: read or write type
++ * @reg_off: offset within the controller's data buffer
++ * @vaddr: virtual address of the buffer we want to write to
++ * @size: DMA transaction size in bytes
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares the command descriptor for BAM DMA
++ * which will be used for NAND register reads and writes.
++ */
++int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
++			       int reg_off, const void *vaddr,
++			       int size, unsigned int flags)
++{
++	int bam_ce_size;
++	int i, ret;
++	struct bam_cmd_element *bam_ce_buffer;
++	struct bam_transaction *bam_txn = nandc->bam_txn;
++
++	bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
++
++	/* fill the command desc */
++	for (i = 0; i < size; i++) {
++		if (read)
++			bam_prep_ce(&bam_ce_buffer[i],
++				    nandc_reg_phys(nandc, reg_off + 4 * i),
++				    BAM_READ_COMMAND,
++				    reg_buf_dma_addr(nandc,
++						     (__le32 *)vaddr + i));
++		else
++			bam_prep_ce_le32(&bam_ce_buffer[i],
++					 nandc_reg_phys(nandc, reg_off + 4 * i),
++					 BAM_WRITE_COMMAND,
++					 *((__le32 *)vaddr + i));
++	}
++
++	bam_txn->bam_ce_pos += size;
++
++	/* use the separate sgl after this command */
++	if (flags & NAND_BAM_NEXT_SGL) {
++		bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
++		bam_ce_size = (bam_txn->bam_ce_pos -
++				bam_txn->bam_ce_start) *
++				sizeof(struct bam_cmd_element);
++		sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
++			   bam_ce_buffer, bam_ce_size);
++		bam_txn->cmd_sgl_pos++;
++		bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
++
++		if (flags & NAND_BAM_NWD) {
++			ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
++							  DMA_PREP_FENCE | DMA_PREP_CMD);
++			if (ret)
++				return ret;
++		}
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL(qcom_prep_bam_dma_desc_cmd);
++
++/**
++ * qcom_prep_bam_dma_desc_data() - Prepares the data descriptor for BAM DMA
++ * @nandc: qpic nand controller
++ * @read: read or write type
++ * @vaddr: virtual address of the buffer we want to write to
++ * @size: DMA transaction size in bytes
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares the data descriptor for BAM DMA which
++ * will be used for NAND data reads and writes.
++ */
++int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
++				const void *vaddr, int size, unsigned int flags)
++{
++	int ret;
++	struct bam_transaction *bam_txn = nandc->bam_txn;
++
++	if (read) {
++		sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
++			   vaddr, size);
++		bam_txn->rx_sgl_pos++;
++	} else {
++		sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
++			   vaddr, size);
++		bam_txn->tx_sgl_pos++;
++
++		/*
++		 * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
++		 * is not set, form the DMA descriptor
++		 */
++		if (!(flags & NAND_BAM_NO_EOT)) {
++			ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
++							  DMA_PREP_INTERRUPT);
++			if (ret)
++				return ret;
++		}
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL(qcom_prep_bam_dma_desc_data);
++
++/**
++ * qcom_prep_adm_dma_desc() - Prepare descriptor for adma
++ * @nandc: qpic nand controller
++ * @read: read or write type
++ * @reg_off: offset within the controller's data buffer
++ * @vaddr: virtual address of the buffer we want to write to
++ * @size: adm dma transaction size in bytes
++ * @flow_control: flow controller
++ *
++ * This function will prepare descriptor for adma
++ */
++int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
++			   int reg_off, const void *vaddr, int size,
++			   bool flow_control)
++{
++	struct qcom_adm_peripheral_config periph_conf = {};
++	struct dma_async_tx_descriptor *dma_desc;
++	struct dma_slave_config slave_conf = {0};
++	enum dma_transfer_direction dir_eng;
++	struct desc_info *desc;
++	struct scatterlist *sgl;
++	int ret;
++
++	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
++	if (!desc)
++		return -ENOMEM;
++
++	sgl = &desc->adm_sgl;
++
++	sg_init_one(sgl, vaddr, size);
++
++	if (read) {
++		dir_eng = DMA_DEV_TO_MEM;
++		desc->dir = DMA_FROM_DEVICE;
++	} else {
++		dir_eng = DMA_MEM_TO_DEV;
++		desc->dir = DMA_TO_DEVICE;
++	}
++
++	ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
++	if (!ret) {
++		ret = -ENOMEM;
++		goto err;
++	}
++
++	slave_conf.device_fc = flow_control;
++	if (read) {
++		slave_conf.src_maxburst = 16;
++		slave_conf.src_addr = nandc->base_dma + reg_off;
++		if (nandc->data_crci) {
++			periph_conf.crci = nandc->data_crci;
++			slave_conf.peripheral_config = &periph_conf;
++			slave_conf.peripheral_size = sizeof(periph_conf);
++		}
++	} else {
++		slave_conf.dst_maxburst = 16;
++		slave_conf.dst_addr = nandc->base_dma + reg_off;
++		if (nandc->cmd_crci) {
++			periph_conf.crci = nandc->cmd_crci;
++			slave_conf.peripheral_config = &periph_conf;
++			slave_conf.peripheral_size = sizeof(periph_conf);
++		}
++	}
++
++	ret = dmaengine_slave_config(nandc->chan, &slave_conf);
++	if (ret) {
++		dev_err(nandc->dev, "failed to configure dma channel\n");
++		goto err;
++	}
++
++	dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
++	if (!dma_desc) {
++		dev_err(nandc->dev, "failed to prepare desc\n");
++		ret = -EINVAL;
++		goto err;
++	}
++
++	desc->dma_desc = dma_desc;
++
++	list_add_tail(&desc->node, &nandc->desc_list);
++
++	return 0;
++err:
++	kfree(desc);
++
++	return ret;
++}
++EXPORT_SYMBOL(qcom_prep_adm_dma_desc);
++
++/**
++ * qcom_read_reg_dma() - read a given number of registers to the reg_read_buf pointer
++ * @nandc: qpic nand controller
++ * @first: offset of the first register in the contiguous block
++ * @num_regs: number of registers to read
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares a descriptor to read a given number of
++ * contiguous registers to the reg_read_buf pointer.
++ */
++int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
++		      int num_regs, unsigned int flags)
++{
++	bool flow_control = false;
++	void *vaddr;
++
++	vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
++	nandc->reg_read_pos += num_regs;
++
++	if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
++		first = dev_cmd_reg_addr(nandc, first);
++
++	if (nandc->props->supports_bam)
++		return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
++					     num_regs, flags);
++
++	if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
++		flow_control = true;
++
++	return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
++				      num_regs * sizeof(u32), flow_control);
++}
++EXPORT_SYMBOL(qcom_read_reg_dma);
++
++/**
++ * qcom_write_reg_dma() - write a given number of registers
++ * @nandc: qpic nand controller
++ * @vaddr: contiguous memory from where register value will
++ *	   be written
++ * @first: offset of the first register in the contiguous block
++ * @num_regs: number of registers to write
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares a descriptor to write a given number of
++ * contiguous registers
++ */
++int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
++		       int first, int num_regs, unsigned int flags)
++{
++	bool flow_control = false;
++
++	if (first == NAND_EXEC_CMD)
++		flags |= NAND_BAM_NWD;
++
++	if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
++		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
++
++	if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
++		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
++
++	if (nandc->props->supports_bam)
++		return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
++						  num_regs, flags);
++
++	if (first == NAND_FLASH_CMD)
++		flow_control = true;
++
++	return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
++				      num_regs * sizeof(u32), flow_control);
++}
++EXPORT_SYMBOL(qcom_write_reg_dma);
++
++/**
++ * qcom_read_data_dma() - transfer data
++ * @nandc: qpic nand controller
++ * @reg_off: offset within the controller's data buffer
++ * @vaddr: virtual address of the buffer we want to write to
++ * @size: DMA transaction size in bytes
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares a DMA descriptor to transfer data from the
++ * controller's internal buffer to the buffer 'vaddr'
++ */
++int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
++		       const u8 *vaddr, int size, unsigned int flags)
++{
++	if (nandc->props->supports_bam)
++		return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
++
++	return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
++}
++EXPORT_SYMBOL(qcom_read_data_dma);
++
++/**
++ * qcom_write_data_dma() - transfer data
++ * @nandc: qpic nand controller
++ * @reg_off: offset within the controller's data buffer
++ * @vaddr: virtual address of the buffer we want to read from
++ * @size: DMA transaction size in bytes
++ * @flags: flags to control DMA descriptor preparation
++ *
++ * This function will prepares a DMA descriptor to transfer data from
++ * 'vaddr' to the controller's internal buffer
++ */
++int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
++			const u8 *vaddr, int size, unsigned int flags)
++{
++	if (nandc->props->supports_bam)
++		return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
++
++	return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
++}
++EXPORT_SYMBOL(qcom_write_data_dma);
++
++/**
++ * qcom_submit_descs() - submit dma descriptor
++ * @nandc: qpic nand controller
++ *
++ * This function will submit all the prepared dma descriptor
++ * cmd or data descriptor
++ */
++int qcom_submit_descs(struct qcom_nand_controller *nandc)
++{
++	struct desc_info *desc, *n;
++	dma_cookie_t cookie = 0;
++	struct bam_transaction *bam_txn = nandc->bam_txn;
++	int ret = 0;
++
++	if (nandc->props->supports_bam) {
++		if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
++			ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
++			if (ret)
++				goto err_unmap_free_desc;
++		}
++
++		if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
++			ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
++							  DMA_PREP_INTERRUPT);
++			if (ret)
++				goto err_unmap_free_desc;
++		}
++
++		if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
++			ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
++							  DMA_PREP_CMD);
++			if (ret)
++				goto err_unmap_free_desc;
++		}
++	}
++
++	list_for_each_entry(desc, &nandc->desc_list, node)
++		cookie = dmaengine_submit(desc->dma_desc);
++
++	if (nandc->props->supports_bam) {
++		bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
++		bam_txn->last_cmd_desc->callback_param = bam_txn;
++
++		dma_async_issue_pending(nandc->tx_chan);
++		dma_async_issue_pending(nandc->rx_chan);
++		dma_async_issue_pending(nandc->cmd_chan);
++
++		if (!wait_for_completion_timeout(&bam_txn->txn_done,
++						 QPIC_NAND_COMPLETION_TIMEOUT))
++			ret = -ETIMEDOUT;
++	} else {
++		if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
++			ret = -ETIMEDOUT;
++	}
++
++err_unmap_free_desc:
++	/*
++	 * Unmap the dma sg_list and free the desc allocated by both
++	 * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
++	 */
++	list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
++		list_del(&desc->node);
++
++		if (nandc->props->supports_bam)
++			dma_unmap_sg(nandc->dev, desc->bam_sgl,
++				     desc->sgl_cnt, desc->dir);
++		else
++			dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
++				     desc->dir);
++
++		kfree(desc);
++	}
++
++	return ret;
++}
++EXPORT_SYMBOL(qcom_submit_descs);
++
++/**
++ * qcom_clear_read_regs() - reset the read register buffer
++ * @nandc: qpic nand controller
++ *
++ * This function reset the register read buffer for next NAND operation
++ */
++void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
++{
++	nandc->reg_read_pos = 0;
++	qcom_nandc_dev_to_mem(nandc, false);
++}
++EXPORT_SYMBOL(qcom_clear_read_regs);
++
++/**
++ * qcom_nandc_unalloc() - unallocate qpic nand controller
++ * @nandc: qpic nand controller
++ *
++ * This function will unallocate memory alloacted for qpic nand controller
++ */
++void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
++{
++	if (nandc->props->supports_bam) {
++		if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
++			dma_unmap_single(nandc->dev, nandc->reg_read_dma,
++					 MAX_REG_RD *
++					 sizeof(*nandc->reg_read_buf),
++					 DMA_FROM_DEVICE);
++
++		if (nandc->tx_chan)
++			dma_release_channel(nandc->tx_chan);
++
++		if (nandc->rx_chan)
++			dma_release_channel(nandc->rx_chan);
++
++		if (nandc->cmd_chan)
++			dma_release_channel(nandc->cmd_chan);
++	} else {
++		if (nandc->chan)
++			dma_release_channel(nandc->chan);
++	}
++}
++EXPORT_SYMBOL(qcom_nandc_unalloc);
++
++/**
++ * qcom_nandc_alloc() - Allocate qpic nand controller
++ * @nandc: qpic nand controller
++ *
++ * This function will allocate memory for qpic nand controller
++ */
++int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
++{
++	int ret;
++
++	ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
++	if (ret) {
++		dev_err(nandc->dev, "failed to set DMA mask\n");
++		return ret;
++	}
++
++	/*
++	 * we use the internal buffer for reading ONFI params, reading small
++	 * data like ID and status, and preforming read-copy-write operations
++	 * when writing to a codeword partially. 532 is the maximum possible
++	 * size of a codeword for our nand controller
++	 */
++	nandc->buf_size = 532;
++
++	nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
++	if (!nandc->data_buffer)
++		return -ENOMEM;
++
++	nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
++	if (!nandc->regs)
++		return -ENOMEM;
++
++	nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
++					   sizeof(*nandc->reg_read_buf),
++					   GFP_KERNEL);
++	if (!nandc->reg_read_buf)
++		return -ENOMEM;
++
++	if (nandc->props->supports_bam) {
++		nandc->reg_read_dma =
++			dma_map_single(nandc->dev, nandc->reg_read_buf,
++				       MAX_REG_RD *
++				       sizeof(*nandc->reg_read_buf),
++				       DMA_FROM_DEVICE);
++		if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
++			dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
++			return -EIO;
++		}
++
++		nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
++		if (IS_ERR(nandc->tx_chan)) {
++			ret = PTR_ERR(nandc->tx_chan);
++			nandc->tx_chan = NULL;
++			dev_err_probe(nandc->dev, ret,
++				      "tx DMA channel request failed\n");
++			goto unalloc;
++		}
++
++		nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
++		if (IS_ERR(nandc->rx_chan)) {
++			ret = PTR_ERR(nandc->rx_chan);
++			nandc->rx_chan = NULL;
++			dev_err_probe(nandc->dev, ret,
++				      "rx DMA channel request failed\n");
++			goto unalloc;
++		}
++
++		nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
++		if (IS_ERR(nandc->cmd_chan)) {
++			ret = PTR_ERR(nandc->cmd_chan);
++			nandc->cmd_chan = NULL;
++			dev_err_probe(nandc->dev, ret,
++				      "cmd DMA channel request failed\n");
++			goto unalloc;
++		}
++
++		/*
++		 * Initially allocate BAM transaction to read ONFI param page.
++		 * After detecting all the devices, this BAM transaction will
++		 * be freed and the next BAM transaction will be allocated with
++		 * maximum codeword size
++		 */
++		nandc->max_cwperpage = 1;
++		nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
++		if (!nandc->bam_txn) {
++			dev_err(nandc->dev,
++				"failed to allocate bam transaction\n");
++			ret = -ENOMEM;
++			goto unalloc;
++		}
++	} else {
++		nandc->chan = dma_request_chan(nandc->dev, "rxtx");
++		if (IS_ERR(nandc->chan)) {
++			ret = PTR_ERR(nandc->chan);
++			nandc->chan = NULL;
++			dev_err_probe(nandc->dev, ret,
++				      "rxtx DMA channel request failed\n");
++			return ret;
++		}
++	}
++
++	INIT_LIST_HEAD(&nandc->desc_list);
++	INIT_LIST_HEAD(&nandc->host_list);
++
++	return 0;
++unalloc:
++	qcom_nandc_unalloc(nandc);
++	return ret;
++}
++EXPORT_SYMBOL(qcom_nandc_alloc);
++
++MODULE_DESCRIPTION("QPIC controller common api");
++MODULE_LICENSE("GPL");
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -15,417 +15,7 @@
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+-
+-/* NANDc reg offsets */
+-#define	NAND_FLASH_CMD			0x00
+-#define	NAND_ADDR0			0x04
+-#define	NAND_ADDR1			0x08
+-#define	NAND_FLASH_CHIP_SELECT		0x0c
+-#define	NAND_EXEC_CMD			0x10
+-#define	NAND_FLASH_STATUS		0x14
+-#define	NAND_BUFFER_STATUS		0x18
+-#define	NAND_DEV0_CFG0			0x20
+-#define	NAND_DEV0_CFG1			0x24
+-#define	NAND_DEV0_ECC_CFG		0x28
+-#define	NAND_AUTO_STATUS_EN		0x2c
+-#define	NAND_DEV1_CFG0			0x30
+-#define	NAND_DEV1_CFG1			0x34
+-#define	NAND_READ_ID			0x40
+-#define	NAND_READ_STATUS		0x44
+-#define	NAND_DEV_CMD0			0xa0
+-#define	NAND_DEV_CMD1			0xa4
+-#define	NAND_DEV_CMD2			0xa8
+-#define	NAND_DEV_CMD_VLD		0xac
+-#define	SFLASHC_BURST_CFG		0xe0
+-#define	NAND_ERASED_CW_DETECT_CFG	0xe8
+-#define	NAND_ERASED_CW_DETECT_STATUS	0xec
+-#define	NAND_EBI2_ECC_BUF_CFG		0xf0
+-#define	FLASH_BUF_ACC			0x100
+-
+-#define	NAND_CTRL			0xf00
+-#define	NAND_VERSION			0xf08
+-#define	NAND_READ_LOCATION_0		0xf20
+-#define	NAND_READ_LOCATION_1		0xf24
+-#define	NAND_READ_LOCATION_2		0xf28
+-#define	NAND_READ_LOCATION_3		0xf2c
+-#define	NAND_READ_LOCATION_LAST_CW_0	0xf40
+-#define	NAND_READ_LOCATION_LAST_CW_1	0xf44
+-#define	NAND_READ_LOCATION_LAST_CW_2	0xf48
+-#define	NAND_READ_LOCATION_LAST_CW_3	0xf4c
+-
+-/* dummy register offsets, used by qcom_write_reg_dma */
+-#define	NAND_DEV_CMD1_RESTORE		0xdead
+-#define	NAND_DEV_CMD_VLD_RESTORE	0xbeef
+-
+-/* NAND_FLASH_CMD bits */
+-#define	PAGE_ACC			BIT(4)
+-#define	LAST_PAGE			BIT(5)
+-
+-/* NAND_FLASH_CHIP_SELECT bits */
+-#define	NAND_DEV_SEL			0
+-#define	DM_EN				BIT(2)
+-
+-/* NAND_FLASH_STATUS bits */
+-#define	FS_OP_ERR			BIT(4)
+-#define	FS_READY_BSY_N			BIT(5)
+-#define	FS_MPU_ERR			BIT(8)
+-#define	FS_DEVICE_STS_ERR		BIT(16)
+-#define	FS_DEVICE_WP			BIT(23)
+-
+-/* NAND_BUFFER_STATUS bits */
+-#define	BS_UNCORRECTABLE_BIT		BIT(8)
+-#define	BS_CORRECTABLE_ERR_MSK		0x1f
+-
+-/* NAND_DEVn_CFG0 bits */
+-#define	DISABLE_STATUS_AFTER_WRITE	4
+-#define	CW_PER_PAGE			6
+-#define	UD_SIZE_BYTES			9
+-#define	UD_SIZE_BYTES_MASK		GENMASK(18, 9)
+-#define	ECC_PARITY_SIZE_BYTES_RS	19
+-#define	SPARE_SIZE_BYTES		23
+-#define	SPARE_SIZE_BYTES_MASK		GENMASK(26, 23)
+-#define	NUM_ADDR_CYCLES			27
+-#define	STATUS_BFR_READ			30
+-#define	SET_RD_MODE_AFTER_STATUS	31
+-
+-/* NAND_DEVn_CFG0 bits */
+-#define	DEV0_CFG1_ECC_DISABLE		0
+-#define	WIDE_FLASH			1
+-#define	NAND_RECOVERY_CYCLES		2
+-#define	CS_ACTIVE_BSY			5
+-#define	BAD_BLOCK_BYTE_NUM		6
+-#define	BAD_BLOCK_IN_SPARE_AREA		16
+-#define	WR_RD_BSY_GAP			17
+-#define	ENABLE_BCH_ECC			27
+-
+-/* NAND_DEV0_ECC_CFG bits */
+-#define	ECC_CFG_ECC_DISABLE		0
+-#define	ECC_SW_RESET			1
+-#define	ECC_MODE			4
+-#define	ECC_PARITY_SIZE_BYTES_BCH	8
+-#define	ECC_NUM_DATA_BYTES		16
+-#define	ECC_NUM_DATA_BYTES_MASK		GENMASK(25, 16)
+-#define	ECC_FORCE_CLK_OPEN		30
+-
+-/* NAND_DEV_CMD1 bits */
+-#define	READ_ADDR			0
+-
+-/* NAND_DEV_CMD_VLD bits */
+-#define	READ_START_VLD			BIT(0)
+-#define	READ_STOP_VLD			BIT(1)
+-#define	WRITE_START_VLD			BIT(2)
+-#define	ERASE_START_VLD			BIT(3)
+-#define	SEQ_READ_START_VLD		BIT(4)
+-
+-/* NAND_EBI2_ECC_BUF_CFG bits */
+-#define	NUM_STEPS			0
+-
+-/* NAND_ERASED_CW_DETECT_CFG bits */
+-#define	ERASED_CW_ECC_MASK		1
+-#define	AUTO_DETECT_RES			0
+-#define	MASK_ECC			BIT(ERASED_CW_ECC_MASK)
+-#define	RESET_ERASED_DET		BIT(AUTO_DETECT_RES)
+-#define	ACTIVE_ERASED_DET		(0 << AUTO_DETECT_RES)
+-#define	CLR_ERASED_PAGE_DET		(RESET_ERASED_DET | MASK_ECC)
+-#define	SET_ERASED_PAGE_DET		(ACTIVE_ERASED_DET | MASK_ECC)
+-
+-/* NAND_ERASED_CW_DETECT_STATUS bits */
+-#define	PAGE_ALL_ERASED			BIT(7)
+-#define	CODEWORD_ALL_ERASED		BIT(6)
+-#define	PAGE_ERASED			BIT(5)
+-#define	CODEWORD_ERASED			BIT(4)
+-#define	ERASED_PAGE			(PAGE_ALL_ERASED | PAGE_ERASED)
+-#define	ERASED_CW			(CODEWORD_ALL_ERASED | CODEWORD_ERASED)
+-
+-/* NAND_READ_LOCATION_n bits */
+-#define READ_LOCATION_OFFSET		0
+-#define READ_LOCATION_SIZE		16
+-#define READ_LOCATION_LAST		31
+-
+-/* Version Mask */
+-#define	NAND_VERSION_MAJOR_MASK		0xf0000000
+-#define	NAND_VERSION_MAJOR_SHIFT	28
+-#define	NAND_VERSION_MINOR_MASK		0x0fff0000
+-#define	NAND_VERSION_MINOR_SHIFT	16
+-
+-/* NAND OP_CMDs */
+-#define	OP_PAGE_READ			0x2
+-#define	OP_PAGE_READ_WITH_ECC		0x3
+-#define	OP_PAGE_READ_WITH_ECC_SPARE	0x4
+-#define	OP_PAGE_READ_ONFI_READ		0x5
+-#define	OP_PROGRAM_PAGE			0x6
+-#define	OP_PAGE_PROGRAM_WITH_ECC	0x7
+-#define	OP_PROGRAM_PAGE_SPARE		0x9
+-#define	OP_BLOCK_ERASE			0xa
+-#define	OP_CHECK_STATUS			0xc
+-#define	OP_FETCH_ID			0xb
+-#define	OP_RESET_DEVICE			0xd
+-
+-/* Default Value for NAND_DEV_CMD_VLD */
+-#define NAND_DEV_CMD_VLD_VAL		(READ_START_VLD | WRITE_START_VLD | \
+-					 ERASE_START_VLD | SEQ_READ_START_VLD)
+-
+-/* NAND_CTRL bits */
+-#define	BAM_MODE_EN			BIT(0)
+-
+-/*
+- * the NAND controller performs reads/writes with ECC in 516 byte chunks.
+- * the driver calls the chunks 'step' or 'codeword' interchangeably
+- */
+-#define	NANDC_STEP_SIZE			512
+-
+-/*
+- * the largest page size we support is 8K, this will have 16 steps/codewords
+- * of 512 bytes each
+- */
+-#define	MAX_NUM_STEPS			(SZ_8K / NANDC_STEP_SIZE)
+-
+-/* we read at most 3 registers per codeword scan */
+-#define	MAX_REG_RD			(3 * MAX_NUM_STEPS)
+-
+-/* ECC modes supported by the controller */
+-#define	ECC_NONE	BIT(0)
+-#define	ECC_RS_4BIT	BIT(1)
+-#define	ECC_BCH_4BIT	BIT(2)
+-#define	ECC_BCH_8BIT	BIT(3)
+-
+-/*
+- * Returns the actual register address for all NAND_DEV_ registers
+- * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
+- */
+-#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
+-
+-/* Returns the NAND register physical address */
+-#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
+-
+-/* Returns the dma address for reg read buffer */
+-#define reg_buf_dma_addr(chip, vaddr) \
+-	((chip)->reg_read_dma + \
+-	((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
+-
+-#define QPIC_PER_CW_CMD_ELEMENTS	32
+-#define QPIC_PER_CW_CMD_SGL		32
+-#define QPIC_PER_CW_DATA_SGL		8
+-
+-#define QPIC_NAND_COMPLETION_TIMEOUT	msecs_to_jiffies(2000)
+-
+-/*
+- * Flags used in DMA descriptor preparation helper functions
+- * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
+- */
+-/* Don't set the EOT in current tx BAM sgl */
+-#define NAND_BAM_NO_EOT			BIT(0)
+-/* Set the NWD flag in current BAM sgl */
+-#define NAND_BAM_NWD			BIT(1)
+-/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
+-#define NAND_BAM_NEXT_SGL		BIT(2)
+-/*
+- * Erased codeword status is being used two times in single transfer so this
+- * flag will determine the current value of erased codeword status register
+- */
+-#define NAND_ERASED_CW_SET		BIT(4)
+-
+-#define MAX_ADDRESS_CYCLE		5
+-
+-/*
+- * This data type corresponds to the BAM transaction which will be used for all
+- * NAND transfers.
+- * @bam_ce - the array of BAM command elements
+- * @cmd_sgl - sgl for NAND BAM command pipe
+- * @data_sgl - sgl for NAND BAM consumer/producer pipe
+- * @last_data_desc - last DMA desc in data channel (tx/rx).
+- * @last_cmd_desc - last DMA desc in command channel.
+- * @txn_done - completion for NAND transfer.
+- * @bam_ce_pos - the index in bam_ce which is available for next sgl
+- * @bam_ce_start - the index in bam_ce which marks the start position ce
+- *		   for current sgl. It will be used for size calculation
+- *		   for current sgl
+- * @cmd_sgl_pos - current index in command sgl.
+- * @cmd_sgl_start - start index in command sgl.
+- * @tx_sgl_pos - current index in data sgl for tx.
+- * @tx_sgl_start - start index in data sgl for tx.
+- * @rx_sgl_pos - current index in data sgl for rx.
+- * @rx_sgl_start - start index in data sgl for rx.
+- */
+-struct bam_transaction {
+-	struct bam_cmd_element *bam_ce;
+-	struct scatterlist *cmd_sgl;
+-	struct scatterlist *data_sgl;
+-	struct dma_async_tx_descriptor *last_data_desc;
+-	struct dma_async_tx_descriptor *last_cmd_desc;
+-	struct completion txn_done;
+-	u32 bam_ce_pos;
+-	u32 bam_ce_start;
+-	u32 cmd_sgl_pos;
+-	u32 cmd_sgl_start;
+-	u32 tx_sgl_pos;
+-	u32 tx_sgl_start;
+-	u32 rx_sgl_pos;
+-	u32 rx_sgl_start;
+-};
+-
+-/*
+- * This data type corresponds to the nand dma descriptor
+- * @dma_desc - low level DMA engine descriptor
+- * @list - list for desc_info
+- *
+- * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
+- *	      ADM
+- * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
+- * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
+- * @dir - DMA transfer direction
+- */
+-struct desc_info {
+-	struct dma_async_tx_descriptor *dma_desc;
+-	struct list_head node;
+-
+-	union {
+-		struct scatterlist adm_sgl;
+-		struct {
+-			struct scatterlist *bam_sgl;
+-			int sgl_cnt;
+-		};
+-	};
+-	enum dma_data_direction dir;
+-};
+-
+-/*
+- * holds the current register values that we want to write. acts as a contiguous
+- * chunk of memory which we use to write the controller registers through DMA.
+- */
+-struct nandc_regs {
+-	__le32 cmd;
+-	__le32 addr0;
+-	__le32 addr1;
+-	__le32 chip_sel;
+-	__le32 exec;
+-
+-	__le32 cfg0;
+-	__le32 cfg1;
+-	__le32 ecc_bch_cfg;
+-
+-	__le32 clrflashstatus;
+-	__le32 clrreadstatus;
+-
+-	__le32 cmd1;
+-	__le32 vld;
+-
+-	__le32 orig_cmd1;
+-	__le32 orig_vld;
+-
+-	__le32 ecc_buf_cfg;
+-	__le32 read_location0;
+-	__le32 read_location1;
+-	__le32 read_location2;
+-	__le32 read_location3;
+-	__le32 read_location_last0;
+-	__le32 read_location_last1;
+-	__le32 read_location_last2;
+-	__le32 read_location_last3;
+-
+-	__le32 erased_cw_detect_cfg_clr;
+-	__le32 erased_cw_detect_cfg_set;
+-};
+-
+-/*
+- * NAND controller data struct
+- *
+- * @dev:			parent device
+- *
+- * @base:			MMIO base
+- *
+- * @core_clk:			controller clock
+- * @aon_clk:			another controller clock
+- *
+- * @regs:			a contiguous chunk of memory for DMA register
+- *				writes. contains the register values to be
+- *				written to controller
+- *
+- * @props:			properties of current NAND controller,
+- *				initialized via DT match data
+- *
+- * @controller:			base controller structure
+- * @host_list:			list containing all the chips attached to the
+- *				controller
+- *
+- * @chan:			dma channel
+- * @cmd_crci:			ADM DMA CRCI for command flow control
+- * @data_crci:			ADM DMA CRCI for data flow control
+- *
+- * @desc_list:			DMA descriptor list (list of desc_infos)
+- *
+- * @data_buffer:		our local DMA buffer for page read/writes,
+- *				used when we can't use the buffer provided
+- *				by upper layers directly
+- * @reg_read_buf:		local buffer for reading back registers via DMA
+- *
+- * @base_phys:			physical base address of controller registers
+- * @base_dma:			dma base address of controller registers
+- * @reg_read_dma:		contains dma address for register read buffer
+- *
+- * @buf_size/count/start:	markers for chip->legacy.read_buf/write_buf
+- *				functions
+- * @max_cwperpage:		maximum QPIC codewords required. calculated
+- *				from all connected NAND devices pagesize
+- *
+- * @reg_read_pos:		marker for data read in reg_read_buf
+- *
+- * @cmd1/vld:			some fixed controller register values
+- *
+- * @exec_opwrite:		flag to select correct number of code word
+- *				while reading status
+- */
+-struct qcom_nand_controller {
+-	struct device *dev;
+-
+-	void __iomem *base;
+-
+-	struct clk *core_clk;
+-	struct clk *aon_clk;
+-
+-	struct nandc_regs *regs;
+-	struct bam_transaction *bam_txn;
+-
+-	const struct qcom_nandc_props *props;
+-
+-	struct nand_controller controller;
+-	struct list_head host_list;
+-
+-	union {
+-		/* will be used only by QPIC for BAM DMA */
+-		struct {
+-			struct dma_chan *tx_chan;
+-			struct dma_chan *rx_chan;
+-			struct dma_chan *cmd_chan;
+-		};
+-
+-		/* will be used only by EBI2 for ADM DMA */
+-		struct {
+-			struct dma_chan *chan;
+-			unsigned int cmd_crci;
+-			unsigned int data_crci;
+-		};
+-	};
+-
+-	struct list_head desc_list;
+-
+-	u8		*data_buffer;
+-	__le32		*reg_read_buf;
+-
+-	phys_addr_t base_phys;
+-	dma_addr_t base_dma;
+-	dma_addr_t reg_read_dma;
+-
+-	int		buf_size;
+-	int		buf_count;
+-	int		buf_start;
+-	unsigned int	max_cwperpage;
+-
+-	int reg_read_pos;
+-
+-	u32 cmd1, vld;
+-	bool exec_opwrite;
+-};
++#include <linux/mtd/nand-qpic-common.h>
+ 
+ /*
+  * NAND special boot partitions
+@@ -530,97 +120,6 @@ struct qcom_nand_host {
+ 	bool bch_enabled;
+ };
+ 
+-/*
+- * This data type corresponds to the NAND controller properties which varies
+- * among different NAND controllers.
+- * @ecc_modes - ecc mode for NAND
+- * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+- * @supports_bam - whether NAND controller is using Bus Access Manager (BAM)
+- * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
+- * @qpic_version2 - flag to indicate QPIC IP version 2
+- * @use_codeword_fixup - whether NAND has different layout for boot partitions
+- */
+-struct qcom_nandc_props {
+-	u32 ecc_modes;
+-	u32 dev_cmd_reg_start;
+-	bool supports_bam;
+-	bool nandc_part_of_qpic;
+-	bool qpic_version2;
+-	bool use_codeword_fixup;
+-};
+-
+-/* Frees the BAM transaction memory */
+-static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
+-{
+-	struct bam_transaction *bam_txn = nandc->bam_txn;
+-
+-	devm_kfree(nandc->dev, bam_txn);
+-}
+-
+-/* Allocates and Initializes the BAM transaction */
+-static struct bam_transaction *
+-qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
+-{
+-	struct bam_transaction *bam_txn;
+-	size_t bam_txn_size;
+-	unsigned int num_cw = nandc->max_cwperpage;
+-	void *bam_txn_buf;
+-
+-	bam_txn_size =
+-		sizeof(*bam_txn) + num_cw *
+-		((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
+-		(sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
+-		(sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
+-
+-	bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
+-	if (!bam_txn_buf)
+-		return NULL;
+-
+-	bam_txn = bam_txn_buf;
+-	bam_txn_buf += sizeof(*bam_txn);
+-
+-	bam_txn->bam_ce = bam_txn_buf;
+-	bam_txn_buf +=
+-		sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
+-
+-	bam_txn->cmd_sgl = bam_txn_buf;
+-	bam_txn_buf +=
+-		sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
+-
+-	bam_txn->data_sgl = bam_txn_buf;
+-
+-	init_completion(&bam_txn->txn_done);
+-
+-	return bam_txn;
+-}
+-
+-/* Clears the BAM transaction indexes */
+-static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
+-{
+-	struct bam_transaction *bam_txn = nandc->bam_txn;
+-
+-	if (!nandc->props->supports_bam)
+-		return;
+-
+-	memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
+-	bam_txn->last_data_desc = NULL;
+-
+-	sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
+-		      QPIC_PER_CW_CMD_SGL);
+-	sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
+-		      QPIC_PER_CW_DATA_SGL);
+-
+-	reinit_completion(&bam_txn->txn_done);
+-}
+-
+-/* Callback for DMA descriptor completion */
+-static void qcom_qpic_bam_dma_done(void *data)
+-{
+-	struct bam_transaction *bam_txn = data;
+-
+-	complete(&bam_txn->txn_done);
+-}
+-
+ static struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+ {
+ 	return container_of(chip, struct qcom_nand_host, chip);
+@@ -629,8 +128,8 @@ static struct qcom_nand_host *to_qcom_na
+ static struct qcom_nand_controller *
+ get_qcom_nand_controller(struct nand_chip *chip)
+ {
+-	return container_of(chip->controller, struct qcom_nand_controller,
+-			    controller);
++	return (struct qcom_nand_controller *)
++		((u8 *)chip->controller - sizeof(struct qcom_nand_controller));
+ }
+ 
+ static u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
+@@ -644,23 +143,6 @@ static void nandc_write(struct qcom_nand
+ 	iowrite32(val, nandc->base + offset);
+ }
+ 
+-static void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
+-{
+-	if (!nandc->props->supports_bam)
+-		return;
+-
+-	if (is_cpu)
+-		dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
+-					MAX_REG_RD *
+-					sizeof(*nandc->reg_read_buf),
+-					DMA_FROM_DEVICE);
+-	else
+-		dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
+-					   MAX_REG_RD *
+-					   sizeof(*nandc->reg_read_buf),
+-					   DMA_FROM_DEVICE);
+-}
+-
+ /* Helper to check whether this is the last CW or not */
+ static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
+ {
+@@ -820,356 +302,6 @@ static void update_rw_regs(struct qcom_n
+ }
+ 
+ /*
+- * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
+- * for BAM. This descriptor will be added in the NAND DMA descriptor queue
+- * which will be submitted to DMA engine.
+- */
+-static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+-				       struct dma_chan *chan,
+-				       unsigned long flags)
+-{
+-	struct desc_info *desc;
+-	struct scatterlist *sgl;
+-	unsigned int sgl_cnt;
+-	int ret;
+-	struct bam_transaction *bam_txn = nandc->bam_txn;
+-	enum dma_transfer_direction dir_eng;
+-	struct dma_async_tx_descriptor *dma_desc;
+-
+-	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+-	if (!desc)
+-		return -ENOMEM;
+-
+-	if (chan == nandc->cmd_chan) {
+-		sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
+-		sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
+-		bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
+-		dir_eng = DMA_MEM_TO_DEV;
+-		desc->dir = DMA_TO_DEVICE;
+-	} else if (chan == nandc->tx_chan) {
+-		sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
+-		sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
+-		bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
+-		dir_eng = DMA_MEM_TO_DEV;
+-		desc->dir = DMA_TO_DEVICE;
+-	} else {
+-		sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
+-		sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
+-		bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
+-		dir_eng = DMA_DEV_TO_MEM;
+-		desc->dir = DMA_FROM_DEVICE;
+-	}
+-
+-	sg_mark_end(sgl + sgl_cnt - 1);
+-	ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+-	if (ret == 0) {
+-		dev_err(nandc->dev, "failure in mapping desc\n");
+-		kfree(desc);
+-		return -ENOMEM;
+-	}
+-
+-	desc->sgl_cnt = sgl_cnt;
+-	desc->bam_sgl = sgl;
+-
+-	dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
+-					   flags);
+-
+-	if (!dma_desc) {
+-		dev_err(nandc->dev, "failure in prep desc\n");
+-		dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+-		kfree(desc);
+-		return -EINVAL;
+-	}
+-
+-	desc->dma_desc = dma_desc;
+-
+-	/* update last data/command descriptor */
+-	if (chan == nandc->cmd_chan)
+-		bam_txn->last_cmd_desc = dma_desc;
+-	else
+-		bam_txn->last_data_desc = dma_desc;
+-
+-	list_add_tail(&desc->node, &nandc->desc_list);
+-
+-	return 0;
+-}
+-
+-/*
+- * Prepares the command descriptor for BAM DMA which will be used for NAND
+- * register reads and writes. The command descriptor requires the command
+- * to be formed in command element type so this function uses the command
+- * element from bam transaction ce array and fills the same with required
+- * data. A single SGL can contain multiple command elements so
+- * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
+- * after the current command element.
+- */
+-static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+-				      int reg_off, const void *vaddr,
+-				      int size, unsigned int flags)
+-{
+-	int bam_ce_size;
+-	int i, ret;
+-	struct bam_cmd_element *bam_ce_buffer;
+-	struct bam_transaction *bam_txn = nandc->bam_txn;
+-
+-	bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
+-
+-	/* fill the command desc */
+-	for (i = 0; i < size; i++) {
+-		if (read)
+-			bam_prep_ce(&bam_ce_buffer[i],
+-				    nandc_reg_phys(nandc, reg_off + 4 * i),
+-				    BAM_READ_COMMAND,
+-				    reg_buf_dma_addr(nandc,
+-						     (__le32 *)vaddr + i));
+-		else
+-			bam_prep_ce_le32(&bam_ce_buffer[i],
+-					 nandc_reg_phys(nandc, reg_off + 4 * i),
+-					 BAM_WRITE_COMMAND,
+-					 *((__le32 *)vaddr + i));
+-	}
+-
+-	bam_txn->bam_ce_pos += size;
+-
+-	/* use the separate sgl after this command */
+-	if (flags & NAND_BAM_NEXT_SGL) {
+-		bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
+-		bam_ce_size = (bam_txn->bam_ce_pos -
+-				bam_txn->bam_ce_start) *
+-				sizeof(struct bam_cmd_element);
+-		sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
+-			   bam_ce_buffer, bam_ce_size);
+-		bam_txn->cmd_sgl_pos++;
+-		bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+-
+-		if (flags & NAND_BAM_NWD) {
+-			ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
+-							  DMA_PREP_FENCE |
+-							  DMA_PREP_CMD);
+-			if (ret)
+-				return ret;
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+-/*
+- * Prepares the data descriptor for BAM DMA which will be used for NAND
+- * data reads and writes.
+- */
+-static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+-				       const void *vaddr, int size, unsigned int flags)
+-{
+-	int ret;
+-	struct bam_transaction *bam_txn = nandc->bam_txn;
+-
+-	if (read) {
+-		sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
+-			   vaddr, size);
+-		bam_txn->rx_sgl_pos++;
+-	} else {
+-		sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
+-			   vaddr, size);
+-		bam_txn->tx_sgl_pos++;
+-
+-		/*
+-		 * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
+-		 * is not set, form the DMA descriptor
+-		 */
+-		if (!(flags & NAND_BAM_NO_EOT)) {
+-			ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
+-							  DMA_PREP_INTERRUPT);
+-			if (ret)
+-				return ret;
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+-static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
+-				  int reg_off, const void *vaddr, int size,
+-				  bool flow_control)
+-{
+-	struct desc_info *desc;
+-	struct dma_async_tx_descriptor *dma_desc;
+-	struct scatterlist *sgl;
+-	struct dma_slave_config slave_conf;
+-	struct qcom_adm_peripheral_config periph_conf = {};
+-	enum dma_transfer_direction dir_eng;
+-	int ret;
+-
+-	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+-	if (!desc)
+-		return -ENOMEM;
+-
+-	sgl = &desc->adm_sgl;
+-
+-	sg_init_one(sgl, vaddr, size);
+-
+-	if (read) {
+-		dir_eng = DMA_DEV_TO_MEM;
+-		desc->dir = DMA_FROM_DEVICE;
+-	} else {
+-		dir_eng = DMA_MEM_TO_DEV;
+-		desc->dir = DMA_TO_DEVICE;
+-	}
+-
+-	ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
+-	if (ret == 0) {
+-		ret = -ENOMEM;
+-		goto err;
+-	}
+-
+-	memset(&slave_conf, 0x00, sizeof(slave_conf));
+-
+-	slave_conf.device_fc = flow_control;
+-	if (read) {
+-		slave_conf.src_maxburst = 16;
+-		slave_conf.src_addr = nandc->base_dma + reg_off;
+-		if (nandc->data_crci) {
+-			periph_conf.crci = nandc->data_crci;
+-			slave_conf.peripheral_config = &periph_conf;
+-			slave_conf.peripheral_size = sizeof(periph_conf);
+-		}
+-	} else {
+-		slave_conf.dst_maxburst = 16;
+-		slave_conf.dst_addr = nandc->base_dma + reg_off;
+-		if (nandc->cmd_crci) {
+-			periph_conf.crci = nandc->cmd_crci;
+-			slave_conf.peripheral_config = &periph_conf;
+-			slave_conf.peripheral_size = sizeof(periph_conf);
+-		}
+-	}
+-
+-	ret = dmaengine_slave_config(nandc->chan, &slave_conf);
+-	if (ret) {
+-		dev_err(nandc->dev, "failed to configure dma channel\n");
+-		goto err;
+-	}
+-
+-	dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
+-	if (!dma_desc) {
+-		dev_err(nandc->dev, "failed to prepare desc\n");
+-		ret = -EINVAL;
+-		goto err;
+-	}
+-
+-	desc->dma_desc = dma_desc;
+-
+-	list_add_tail(&desc->node, &nandc->desc_list);
+-
+-	return 0;
+-err:
+-	kfree(desc);
+-
+-	return ret;
+-}
+-
+-/*
+- * qcom_read_reg_dma:	prepares a descriptor to read a given number of
+- *			contiguous registers to the reg_read_buf pointer
+- *
+- * @first:		offset of the first register in the contiguous block
+- * @num_regs:		number of registers to read
+- * @flags:		flags to control DMA descriptor preparation
+- */
+-static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
+-			     int num_regs, unsigned int flags)
+-{
+-	bool flow_control = false;
+-	void *vaddr;
+-
+-	vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+-	nandc->reg_read_pos += num_regs;
+-
+-	if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
+-		first = dev_cmd_reg_addr(nandc, first);
+-
+-	if (nandc->props->supports_bam)
+-		return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+-					     num_regs, flags);
+-
+-	if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+-		flow_control = true;
+-
+-	return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
+-				 num_regs * sizeof(u32), flow_control);
+-}
+-
+-/*
+- * qcom_write_reg_dma:	prepares a descriptor to write a given number of
+- *			contiguous registers
+- *
+- * @vaddr:		contiguous memory from where register value will
+- *			be written
+- * @first:		offset of the first register in the contiguous block
+- * @num_regs:		number of registers to write
+- * @flags:		flags to control DMA descriptor preparation
+- */
+-static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
+-			      int first, int num_regs, unsigned int flags)
+-{
+-	bool flow_control = false;
+-
+-	if (first == NAND_EXEC_CMD)
+-		flags |= NAND_BAM_NWD;
+-
+-	if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
+-		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
+-
+-	if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
+-		first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+-
+-	if (nandc->props->supports_bam)
+-		return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+-					     num_regs, flags);
+-
+-	if (first == NAND_FLASH_CMD)
+-		flow_control = true;
+-
+-	return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
+-				 num_regs * sizeof(u32), flow_control);
+-}
+-
+-/*
+- * qcom_read_data_dma:	prepares a DMA descriptor to transfer data from the
+- *			controller's internal buffer to the buffer 'vaddr'
+- *
+- * @reg_off:		offset within the controller's data buffer
+- * @vaddr:		virtual address of the buffer we want to write to
+- * @size:		DMA transaction size in bytes
+- * @flags:		flags to control DMA descriptor preparation
+- */
+-static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+-			      const u8 *vaddr, int size, unsigned int flags)
+-{
+-	if (nandc->props->supports_bam)
+-		return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+-
+-	return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+-}
+-
+-/*
+- * qcom_write_data_dma:	prepares a DMA descriptor to transfer data from
+- *			'vaddr' to the controller's internal buffer
+- *
+- * @reg_off:		offset within the controller's data buffer
+- * @vaddr:		virtual address of the buffer we want to read from
+- * @size:		DMA transaction size in bytes
+- * @flags:		flags to control DMA descriptor preparation
+- */
+-static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+-			       const u8 *vaddr, int size, unsigned int flags)
+-{
+-	if (nandc->props->supports_bam)
+-		return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+-
+-	return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+-}
+-
+-/*
+  * Helper to prepare DMA descriptors for configuring registers
+  * before reading a NAND page.
+  */
+@@ -1262,83 +394,6 @@ static void config_nand_cw_write(struct
+ 			   NAND_BAM_NEXT_SGL);
+ }
+ 
+-/* helpers to submit/free our list of dma descriptors */
+-static int qcom_submit_descs(struct qcom_nand_controller *nandc)
+-{
+-	struct desc_info *desc, *n;
+-	dma_cookie_t cookie = 0;
+-	struct bam_transaction *bam_txn = nandc->bam_txn;
+-	int ret = 0;
+-
+-	if (nandc->props->supports_bam) {
+-		if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
+-			ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+-			if (ret)
+-				goto err_unmap_free_desc;
+-		}
+-
+-		if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
+-			ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
+-							  DMA_PREP_INTERRUPT);
+-			if (ret)
+-				goto err_unmap_free_desc;
+-		}
+-
+-		if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
+-			ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
+-							  DMA_PREP_CMD);
+-			if (ret)
+-				goto err_unmap_free_desc;
+-		}
+-	}
+-
+-	list_for_each_entry(desc, &nandc->desc_list, node)
+-		cookie = dmaengine_submit(desc->dma_desc);
+-
+-	if (nandc->props->supports_bam) {
+-		bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
+-		bam_txn->last_cmd_desc->callback_param = bam_txn;
+-
+-		dma_async_issue_pending(nandc->tx_chan);
+-		dma_async_issue_pending(nandc->rx_chan);
+-		dma_async_issue_pending(nandc->cmd_chan);
+-
+-		if (!wait_for_completion_timeout(&bam_txn->txn_done,
+-						 QPIC_NAND_COMPLETION_TIMEOUT))
+-			ret = -ETIMEDOUT;
+-	} else {
+-		if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
+-			ret = -ETIMEDOUT;
+-	}
+-
+-err_unmap_free_desc:
+-	/*
+-	 * Unmap the dma sg_list and free the desc allocated by both
+-	 * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
+-	 */
+-	list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+-		list_del(&desc->node);
+-
+-		if (nandc->props->supports_bam)
+-			dma_unmap_sg(nandc->dev, desc->bam_sgl,
+-				     desc->sgl_cnt, desc->dir);
+-		else
+-			dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
+-				     desc->dir);
+-
+-		kfree(desc);
+-	}
+-
+-	return ret;
+-}
+-
+-/* reset the register read buffer for next NAND operation */
+-static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
+-{
+-	nandc->reg_read_pos = 0;
+-	qcom_nandc_dev_to_mem(nandc, false);
+-}
+-
+ /*
+  * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
+  * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
+@@ -2967,141 +2022,14 @@ static const struct nand_controller_ops
+ 	.exec_op = qcom_nand_exec_op,
+ };
+ 
+-static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+-{
+-	if (nandc->props->supports_bam) {
+-		if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
+-			dma_unmap_single(nandc->dev, nandc->reg_read_dma,
+-					 MAX_REG_RD *
+-					 sizeof(*nandc->reg_read_buf),
+-					 DMA_FROM_DEVICE);
+-
+-		if (nandc->tx_chan)
+-			dma_release_channel(nandc->tx_chan);
+-
+-		if (nandc->rx_chan)
+-			dma_release_channel(nandc->rx_chan);
+-
+-		if (nandc->cmd_chan)
+-			dma_release_channel(nandc->cmd_chan);
+-	} else {
+-		if (nandc->chan)
+-			dma_release_channel(nandc->chan);
+-	}
+-}
+-
+-static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
+-{
+-	int ret;
+-
+-	ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
+-	if (ret) {
+-		dev_err(nandc->dev, "failed to set DMA mask\n");
+-		return ret;
+-	}
+-
+-	/*
+-	 * we use the internal buffer for reading ONFI params, reading small
+-	 * data like ID and status, and preforming read-copy-write operations
+-	 * when writing to a codeword partially. 532 is the maximum possible
+-	 * size of a codeword for our nand controller
+-	 */
+-	nandc->buf_size = 532;
+-
+-	nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
+-	if (!nandc->data_buffer)
+-		return -ENOMEM;
+-
+-	nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
+-	if (!nandc->regs)
+-		return -ENOMEM;
+-
+-	nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
+-					   sizeof(*nandc->reg_read_buf),
+-					   GFP_KERNEL);
+-	if (!nandc->reg_read_buf)
+-		return -ENOMEM;
+-
+-	if (nandc->props->supports_bam) {
+-		nandc->reg_read_dma =
+-			dma_map_single(nandc->dev, nandc->reg_read_buf,
+-				       MAX_REG_RD *
+-				       sizeof(*nandc->reg_read_buf),
+-				       DMA_FROM_DEVICE);
+-		if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
+-			dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
+-			return -EIO;
+-		}
+-
+-		nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
+-		if (IS_ERR(nandc->tx_chan)) {
+-			ret = PTR_ERR(nandc->tx_chan);
+-			nandc->tx_chan = NULL;
+-			dev_err_probe(nandc->dev, ret,
+-				      "tx DMA channel request failed\n");
+-			goto unalloc;
+-		}
+-
+-		nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
+-		if (IS_ERR(nandc->rx_chan)) {
+-			ret = PTR_ERR(nandc->rx_chan);
+-			nandc->rx_chan = NULL;
+-			dev_err_probe(nandc->dev, ret,
+-				      "rx DMA channel request failed\n");
+-			goto unalloc;
+-		}
+-
+-		nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
+-		if (IS_ERR(nandc->cmd_chan)) {
+-			ret = PTR_ERR(nandc->cmd_chan);
+-			nandc->cmd_chan = NULL;
+-			dev_err_probe(nandc->dev, ret,
+-				      "cmd DMA channel request failed\n");
+-			goto unalloc;
+-		}
+-
+-		/*
+-		 * Initially allocate BAM transaction to read ONFI param page.
+-		 * After detecting all the devices, this BAM transaction will
+-		 * be freed and the next BAM transaction will be allocated with
+-		 * maximum codeword size
+-		 */
+-		nandc->max_cwperpage = 1;
+-		nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
+-		if (!nandc->bam_txn) {
+-			dev_err(nandc->dev,
+-				"failed to allocate bam transaction\n");
+-			ret = -ENOMEM;
+-			goto unalloc;
+-		}
+-	} else {
+-		nandc->chan = dma_request_chan(nandc->dev, "rxtx");
+-		if (IS_ERR(nandc->chan)) {
+-			ret = PTR_ERR(nandc->chan);
+-			nandc->chan = NULL;
+-			dev_err_probe(nandc->dev, ret,
+-				      "rxtx DMA channel request failed\n");
+-			return ret;
+-		}
+-	}
+-
+-	INIT_LIST_HEAD(&nandc->desc_list);
+-	INIT_LIST_HEAD(&nandc->host_list);
+-
+-	nand_controller_init(&nandc->controller);
+-	nandc->controller.ops = &qcom_nandc_ops;
+-
+-	return 0;
+-unalloc:
+-	qcom_nandc_unalloc(nandc);
+-	return ret;
+-}
+-
+ /* one time setup of a few nand controller registers */
+ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
+ {
+ 	u32 nand_ctrl;
+ 
++	nand_controller_init(nandc->controller);
++	nandc->controller->ops = &qcom_nandc_ops;
++
+ 	/* kill onenand */
+ 	if (!nandc->props->nandc_part_of_qpic)
+ 		nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+@@ -3240,7 +2168,7 @@ static int qcom_nand_host_init_and_regis
+ 	chip->legacy.block_bad		= qcom_nandc_block_bad;
+ 	chip->legacy.block_markbad	= qcom_nandc_block_markbad;
+ 
+-	chip->controller = &nandc->controller;
++	chip->controller = nandc->controller;
+ 	chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
+ 			 NAND_SKIP_BBTSCAN;
+ 
+@@ -3323,17 +2251,21 @@ static int qcom_nandc_parse_dt(struct pl
+ static int qcom_nandc_probe(struct platform_device *pdev)
+ {
+ 	struct qcom_nand_controller *nandc;
++	struct nand_controller *controller;
+ 	const void *dev_data;
+ 	struct device *dev = &pdev->dev;
+ 	struct resource *res;
+ 	int ret;
+ 
+-	nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
++	nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc) + sizeof(*controller),
++			     GFP_KERNEL);
+ 	if (!nandc)
+ 		return -ENOMEM;
++	controller = (struct nand_controller *)&nandc[1];
+ 
+ 	platform_set_drvdata(pdev, nandc);
+ 	nandc->dev = dev;
++	nandc->controller = controller;
+ 
+ 	dev_data = of_device_get_match_data(dev);
+ 	if (!dev_data) {
+--- /dev/null
++++ b/include/linux/mtd/nand-qpic-common.h
+@@ -0,0 +1,468 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * QCOM QPIC common APIs header file
++ *
++ * Copyright (c) 2023 Qualcomm Inc.
++ * Authors:	Md sadre Alam	<[email protected]>
++ *
++ */
++#ifndef __MTD_NAND_QPIC_COMMON_H__
++#define __MTD_NAND_QPIC_COMMON_H__
++
++/* NANDc reg offsets */
++#define	NAND_FLASH_CMD			0x00
++#define	NAND_ADDR0			0x04
++#define	NAND_ADDR1			0x08
++#define	NAND_FLASH_CHIP_SELECT		0x0c
++#define	NAND_EXEC_CMD			0x10
++#define	NAND_FLASH_STATUS		0x14
++#define	NAND_BUFFER_STATUS		0x18
++#define	NAND_DEV0_CFG0			0x20
++#define	NAND_DEV0_CFG1			0x24
++#define	NAND_DEV0_ECC_CFG		0x28
++#define	NAND_AUTO_STATUS_EN		0x2c
++#define	NAND_DEV1_CFG0			0x30
++#define	NAND_DEV1_CFG1			0x34
++#define	NAND_READ_ID			0x40
++#define	NAND_READ_STATUS		0x44
++#define	NAND_DEV_CMD0			0xa0
++#define	NAND_DEV_CMD1			0xa4
++#define	NAND_DEV_CMD2			0xa8
++#define	NAND_DEV_CMD_VLD		0xac
++#define	SFLASHC_BURST_CFG		0xe0
++#define	NAND_ERASED_CW_DETECT_CFG	0xe8
++#define	NAND_ERASED_CW_DETECT_STATUS	0xec
++#define	NAND_EBI2_ECC_BUF_CFG		0xf0
++#define	FLASH_BUF_ACC			0x100
++
++#define	NAND_CTRL			0xf00
++#define	NAND_VERSION			0xf08
++#define	NAND_READ_LOCATION_0		0xf20
++#define	NAND_READ_LOCATION_1		0xf24
++#define	NAND_READ_LOCATION_2		0xf28
++#define	NAND_READ_LOCATION_3		0xf2c
++#define	NAND_READ_LOCATION_LAST_CW_0	0xf40
++#define	NAND_READ_LOCATION_LAST_CW_1	0xf44
++#define	NAND_READ_LOCATION_LAST_CW_2	0xf48
++#define	NAND_READ_LOCATION_LAST_CW_3	0xf4c
++
++/* dummy register offsets, used by qcom_write_reg_dma */
++#define	NAND_DEV_CMD1_RESTORE		0xdead
++#define	NAND_DEV_CMD_VLD_RESTORE	0xbeef
++
++/* NAND_FLASH_CMD bits */
++#define	PAGE_ACC			BIT(4)
++#define	LAST_PAGE			BIT(5)
++
++/* NAND_FLASH_CHIP_SELECT bits */
++#define	NAND_DEV_SEL			0
++#define	DM_EN				BIT(2)
++
++/* NAND_FLASH_STATUS bits */
++#define	FS_OP_ERR			BIT(4)
++#define	FS_READY_BSY_N			BIT(5)
++#define	FS_MPU_ERR			BIT(8)
++#define	FS_DEVICE_STS_ERR		BIT(16)
++#define	FS_DEVICE_WP			BIT(23)
++
++/* NAND_BUFFER_STATUS bits */
++#define	BS_UNCORRECTABLE_BIT		BIT(8)
++#define	BS_CORRECTABLE_ERR_MSK		0x1f
++
++/* NAND_DEVn_CFG0 bits */
++#define	DISABLE_STATUS_AFTER_WRITE	4
++#define	CW_PER_PAGE			6
++#define	UD_SIZE_BYTES			9
++#define	UD_SIZE_BYTES_MASK		GENMASK(18, 9)
++#define	ECC_PARITY_SIZE_BYTES_RS	19
++#define	SPARE_SIZE_BYTES		23
++#define	SPARE_SIZE_BYTES_MASK		GENMASK(26, 23)
++#define	NUM_ADDR_CYCLES			27
++#define	STATUS_BFR_READ			30
++#define	SET_RD_MODE_AFTER_STATUS	31
++
++/* NAND_DEVn_CFG0 bits */
++#define	DEV0_CFG1_ECC_DISABLE		0
++#define	WIDE_FLASH			1
++#define	NAND_RECOVERY_CYCLES		2
++#define	CS_ACTIVE_BSY			5
++#define	BAD_BLOCK_BYTE_NUM		6
++#define	BAD_BLOCK_IN_SPARE_AREA		16
++#define	WR_RD_BSY_GAP			17
++#define	ENABLE_BCH_ECC			27
++
++/* NAND_DEV0_ECC_CFG bits */
++#define	ECC_CFG_ECC_DISABLE		0
++#define	ECC_SW_RESET			1
++#define	ECC_MODE			4
++#define	ECC_PARITY_SIZE_BYTES_BCH	8
++#define	ECC_NUM_DATA_BYTES		16
++#define	ECC_NUM_DATA_BYTES_MASK		GENMASK(25, 16)
++#define	ECC_FORCE_CLK_OPEN		30
++
++/* NAND_DEV_CMD1 bits */
++#define	READ_ADDR			0
++
++/* NAND_DEV_CMD_VLD bits */
++#define	READ_START_VLD			BIT(0)
++#define	READ_STOP_VLD			BIT(1)
++#define	WRITE_START_VLD			BIT(2)
++#define	ERASE_START_VLD			BIT(3)
++#define	SEQ_READ_START_VLD		BIT(4)
++
++/* NAND_EBI2_ECC_BUF_CFG bits */
++#define	NUM_STEPS			0
++
++/* NAND_ERASED_CW_DETECT_CFG bits */
++#define	ERASED_CW_ECC_MASK		1
++#define	AUTO_DETECT_RES			0
++#define	MASK_ECC			BIT(ERASED_CW_ECC_MASK)
++#define	RESET_ERASED_DET		BIT(AUTO_DETECT_RES)
++#define	ACTIVE_ERASED_DET		(0 << AUTO_DETECT_RES)
++#define	CLR_ERASED_PAGE_DET		(RESET_ERASED_DET | MASK_ECC)
++#define	SET_ERASED_PAGE_DET		(ACTIVE_ERASED_DET | MASK_ECC)
++
++/* NAND_ERASED_CW_DETECT_STATUS bits */
++#define	PAGE_ALL_ERASED			BIT(7)
++#define	CODEWORD_ALL_ERASED		BIT(6)
++#define	PAGE_ERASED			BIT(5)
++#define	CODEWORD_ERASED			BIT(4)
++#define	ERASED_PAGE			(PAGE_ALL_ERASED | PAGE_ERASED)
++#define	ERASED_CW			(CODEWORD_ALL_ERASED | CODEWORD_ERASED)
++
++/* NAND_READ_LOCATION_n bits */
++#define READ_LOCATION_OFFSET		0
++#define READ_LOCATION_SIZE		16
++#define READ_LOCATION_LAST		31
++
++/* Version Mask */
++#define	NAND_VERSION_MAJOR_MASK		0xf0000000
++#define	NAND_VERSION_MAJOR_SHIFT	28
++#define	NAND_VERSION_MINOR_MASK		0x0fff0000
++#define	NAND_VERSION_MINOR_SHIFT	16
++
++/* NAND OP_CMDs */
++#define	OP_PAGE_READ			0x2
++#define	OP_PAGE_READ_WITH_ECC		0x3
++#define	OP_PAGE_READ_WITH_ECC_SPARE	0x4
++#define	OP_PAGE_READ_ONFI_READ		0x5
++#define	OP_PROGRAM_PAGE			0x6
++#define	OP_PAGE_PROGRAM_WITH_ECC	0x7
++#define	OP_PROGRAM_PAGE_SPARE		0x9
++#define	OP_BLOCK_ERASE			0xa
++#define	OP_CHECK_STATUS			0xc
++#define	OP_FETCH_ID			0xb
++#define	OP_RESET_DEVICE			0xd
++
++/* Default Value for NAND_DEV_CMD_VLD */
++#define NAND_DEV_CMD_VLD_VAL		(READ_START_VLD | WRITE_START_VLD | \
++					 ERASE_START_VLD | SEQ_READ_START_VLD)
++
++/* NAND_CTRL bits */
++#define	BAM_MODE_EN			BIT(0)
++
++/*
++ * the NAND controller performs reads/writes with ECC in 516 byte chunks.
++ * the driver calls the chunks 'step' or 'codeword' interchangeably
++ */
++#define	NANDC_STEP_SIZE			512
++
++/*
++ * the largest page size we support is 8K, this will have 16 steps/codewords
++ * of 512 bytes each
++ */
++#define	MAX_NUM_STEPS			(SZ_8K / NANDC_STEP_SIZE)
++
++/* we read at most 3 registers per codeword scan */
++#define	MAX_REG_RD			(3 * MAX_NUM_STEPS)
++
++/* ECC modes supported by the controller */
++#define	ECC_NONE	BIT(0)
++#define	ECC_RS_4BIT	BIT(1)
++#define	ECC_BCH_4BIT	BIT(2)
++#define	ECC_BCH_8BIT	BIT(3)
++
++/*
++ * Returns the actual register address for all NAND_DEV_ registers
++ * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
++ */
++#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
++
++/* Returns the NAND register physical address */
++#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
++
++/* Returns the dma address for reg read buffer */
++#define reg_buf_dma_addr(chip, vaddr) \
++	((chip)->reg_read_dma + \
++	((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
++
++#define QPIC_PER_CW_CMD_ELEMENTS	32
++#define QPIC_PER_CW_CMD_SGL		32
++#define QPIC_PER_CW_DATA_SGL		8
++
++#define QPIC_NAND_COMPLETION_TIMEOUT	msecs_to_jiffies(2000)
++
++/*
++ * Flags used in DMA descriptor preparation helper functions
++ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
++ */
++/* Don't set the EOT in current tx BAM sgl */
++#define NAND_BAM_NO_EOT			BIT(0)
++/* Set the NWD flag in current BAM sgl */
++#define NAND_BAM_NWD			BIT(1)
++/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
++#define NAND_BAM_NEXT_SGL		BIT(2)
++/*
++ * Erased codeword status is being used two times in single transfer so this
++ * flag will determine the current value of erased codeword status register
++ */
++#define NAND_ERASED_CW_SET		BIT(4)
++
++#define MAX_ADDRESS_CYCLE		5
++
++/*
++ * This data type corresponds to the BAM transaction which will be used for all
++ * NAND transfers.
++ * @bam_ce - the array of BAM command elements
++ * @cmd_sgl - sgl for NAND BAM command pipe
++ * @data_sgl - sgl for NAND BAM consumer/producer pipe
++ * @last_data_desc - last DMA desc in data channel (tx/rx).
++ * @last_cmd_desc - last DMA desc in command channel.
++ * @txn_done - completion for NAND transfer.
++ * @bam_ce_pos - the index in bam_ce which is available for next sgl
++ * @bam_ce_start - the index in bam_ce which marks the start position ce
++ *		   for current sgl. It will be used for size calculation
++ *		   for current sgl
++ * @cmd_sgl_pos - current index in command sgl.
++ * @cmd_sgl_start - start index in command sgl.
++ * @tx_sgl_pos - current index in data sgl for tx.
++ * @tx_sgl_start - start index in data sgl for tx.
++ * @rx_sgl_pos - current index in data sgl for rx.
++ * @rx_sgl_start - start index in data sgl for rx.
++ */
++struct bam_transaction {
++	struct bam_cmd_element *bam_ce;
++	struct scatterlist *cmd_sgl;
++	struct scatterlist *data_sgl;
++	struct dma_async_tx_descriptor *last_data_desc;
++	struct dma_async_tx_descriptor *last_cmd_desc;
++	struct completion txn_done;
++	u32 bam_ce_pos;
++	u32 bam_ce_start;
++	u32 cmd_sgl_pos;
++	u32 cmd_sgl_start;
++	u32 tx_sgl_pos;
++	u32 tx_sgl_start;
++	u32 rx_sgl_pos;
++	u32 rx_sgl_start;
++};
++
++/*
++ * This data type corresponds to the nand dma descriptor
++ * @dma_desc - low level DMA engine descriptor
++ * @list - list for desc_info
++ *
++ * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
++ *	      ADM
++ * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
++ * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
++ * @dir - DMA transfer direction
++ */
++struct desc_info {
++	struct dma_async_tx_descriptor *dma_desc;
++	struct list_head node;
++
++	union {
++		struct scatterlist adm_sgl;
++		struct {
++			struct scatterlist *bam_sgl;
++			int sgl_cnt;
++		};
++	};
++	enum dma_data_direction dir;
++};
++
++/*
++ * holds the current register values that we want to write. acts as a contiguous
++ * chunk of memory which we use to write the controller registers through DMA.
++ */
++struct nandc_regs {
++	__le32 cmd;
++	__le32 addr0;
++	__le32 addr1;
++	__le32 chip_sel;
++	__le32 exec;
++
++	__le32 cfg0;
++	__le32 cfg1;
++	__le32 ecc_bch_cfg;
++
++	__le32 clrflashstatus;
++	__le32 clrreadstatus;
++
++	__le32 cmd1;
++	__le32 vld;
++
++	__le32 orig_cmd1;
++	__le32 orig_vld;
++
++	__le32 ecc_buf_cfg;
++	__le32 read_location0;
++	__le32 read_location1;
++	__le32 read_location2;
++	__le32 read_location3;
++	__le32 read_location_last0;
++	__le32 read_location_last1;
++	__le32 read_location_last2;
++	__le32 read_location_last3;
++
++	__le32 erased_cw_detect_cfg_clr;
++	__le32 erased_cw_detect_cfg_set;
++};
++
++/*
++ * NAND controller data struct
++ *
++ * @dev:			parent device
++ *
++ * @base:			MMIO base
++ *
++ * @core_clk:			controller clock
++ * @aon_clk:			another controller clock
++ *
++ * @regs:			a contiguous chunk of memory for DMA register
++ *				writes. contains the register values to be
++ *				written to controller
++ *
++ * @props:			properties of current NAND controller,
++ *				initialized via DT match data
++ *
++ * @controller:			base controller structure
++ * @host_list:			list containing all the chips attached to the
++ *				controller
++ *
++ * @chan:			dma channel
++ * @cmd_crci:			ADM DMA CRCI for command flow control
++ * @data_crci:			ADM DMA CRCI for data flow control
++ *
++ * @desc_list:			DMA descriptor list (list of desc_infos)
++ *
++ * @data_buffer:		our local DMA buffer for page read/writes,
++ *				used when we can't use the buffer provided
++ *				by upper layers directly
++ * @reg_read_buf:		local buffer for reading back registers via DMA
++ *
++ * @base_phys:			physical base address of controller registers
++ * @base_dma:			dma base address of controller registers
++ * @reg_read_dma:		contains dma address for register read buffer
++ *
++ * @buf_size/count/start:	markers for chip->legacy.read_buf/write_buf
++ *				functions
++ * @max_cwperpage:		maximum QPIC codewords required. calculated
++ *				from all connected NAND devices pagesize
++ *
++ * @reg_read_pos:		marker for data read in reg_read_buf
++ *
++ * @cmd1/vld:			some fixed controller register values
++ *
++ * @exec_opwrite:		flag to select correct number of code word
++ *				while reading status
++ */
++struct qcom_nand_controller {
++	struct device *dev;
++
++	void __iomem *base;
++
++	struct clk *core_clk;
++	struct clk *aon_clk;
++
++	struct nandc_regs *regs;
++	struct bam_transaction *bam_txn;
++
++	const struct qcom_nandc_props *props;
++
++	struct nand_controller *controller;
++	struct list_head host_list;
++
++	union {
++		/* will be used only by QPIC for BAM DMA */
++		struct {
++			struct dma_chan *tx_chan;
++			struct dma_chan *rx_chan;
++			struct dma_chan *cmd_chan;
++		};
++
++		/* will be used only by EBI2 for ADM DMA */
++		struct {
++			struct dma_chan *chan;
++			unsigned int cmd_crci;
++			unsigned int data_crci;
++		};
++	};
++
++	struct list_head desc_list;
++
++	u8		*data_buffer;
++	__le32		*reg_read_buf;
++
++	phys_addr_t base_phys;
++	dma_addr_t base_dma;
++	dma_addr_t reg_read_dma;
++
++	int		buf_size;
++	int		buf_count;
++	int		buf_start;
++	unsigned int	max_cwperpage;
++
++	int reg_read_pos;
++
++	u32 cmd1, vld;
++	bool exec_opwrite;
++};
++
++/*
++ * This data type corresponds to the NAND controller properties which varies
++ * among different NAND controllers.
++ * @ecc_modes - ecc mode for NAND
++ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
++ * @supports_bam - whether NAND controller is using BAM
++ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
++ * @qpic_version2 - flag to indicate QPIC IP version 2
++ * @use_codeword_fixup - whether NAND has different layout for boot partitions
++ */
++struct qcom_nandc_props {
++	u32 ecc_modes;
++	u32 dev_cmd_reg_start;
++	bool supports_bam;
++	bool nandc_part_of_qpic;
++	bool qpic_version2;
++	bool use_codeword_fixup;
++};
++
++void qcom_free_bam_transaction(struct qcom_nand_controller *nandc);
++struct bam_transaction *qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc);
++void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc);
++void qcom_qpic_bam_dma_done(void *data);
++void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu);
++int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
++				struct dma_chan *chan, unsigned long flags);
++int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
++			       int reg_off, const void *vaddr, int size, unsigned int flags);
++int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
++				const void *vaddr, int size, unsigned int flags);
++int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, int reg_off,
++			   const void *vaddr, int size, bool flow_control);
++int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, int num_regs,
++		      unsigned int flags);
++int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, int first,
++		       int num_regs, unsigned int flags);
++int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
++		       int size, unsigned int flags);
++int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
++			int size, unsigned int flags);
++int qcom_submit_descs(struct qcom_nand_controller *nandc);
++void qcom_clear_read_regs(struct qcom_nand_controller *nandc);
++void qcom_nandc_unalloc(struct qcom_nand_controller *nandc);
++int qcom_nandc_alloc(struct qcom_nand_controller *nandc);
++#endif
++

+ 198 - 0
target/linux/generic/backport-6.6/413-04-v6.14-mtd-rawnand-qcom-use-FIELD_PREP-and-GENMASK.patch

@@ -0,0 +1,198 @@
+From 0c08080fd71cd5dd59643104b39d3c89d793ab3c Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <[email protected]>
+Date: Wed, 20 Nov 2024 14:45:03 +0530
+Subject: [PATCH 4/4] mtd: rawnand: qcom: use FIELD_PREP and GENMASK
+
+Use the bitfield macro FIELD_PREP, and GENMASK to
+do the shift and mask in one go. This makes the code
+more readable.
+
+Reviewed-by: Konrad Dybcio <[email protected]>
+Signed-off-by: Md Sadre Alam <[email protected]>
+Signed-off-by: Miquel Raynal <[email protected]>
+---
+ drivers/mtd/nand/raw/qcom_nandc.c    | 97 ++++++++++++++--------------
+ include/linux/mtd/nand-qpic-common.h | 31 +++++----
+ 2 files changed, 67 insertions(+), 61 deletions(-)
+
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -281,7 +281,7 @@ static void update_rw_regs(struct qcom_n
+ 				(num_cw - 1) << CW_PER_PAGE);
+ 
+ 		cfg1 = cpu_to_le32(host->cfg1_raw);
+-		ecc_bch_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
++		ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
+ 	}
+ 
+ 	nandc->regs->cmd = cmd;
+@@ -1494,42 +1494,41 @@ static int qcom_nand_attach_chip(struct
+ 	host->cw_size = host->cw_data + ecc->bytes;
+ 	bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
+ 
+-	host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
+-				| host->cw_data << UD_SIZE_BYTES
+-				| 0 << DISABLE_STATUS_AFTER_WRITE
+-				| 5 << NUM_ADDR_CYCLES
+-				| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
+-				| 0 << STATUS_BFR_READ
+-				| 1 << SET_RD_MODE_AFTER_STATUS
+-				| host->spare_bytes << SPARE_SIZE_BYTES;
+-
+-	host->cfg1 = 7 << NAND_RECOVERY_CYCLES
+-				| 0 <<  CS_ACTIVE_BSY
+-				| bad_block_byte << BAD_BLOCK_BYTE_NUM
+-				| 0 << BAD_BLOCK_IN_SPARE_AREA
+-				| 2 << WR_RD_BSY_GAP
+-				| wide_bus << WIDE_FLASH
+-				| host->bch_enabled << ENABLE_BCH_ECC;
+-
+-	host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
+-				| host->cw_size << UD_SIZE_BYTES
+-				| 5 << NUM_ADDR_CYCLES
+-				| 0 << SPARE_SIZE_BYTES;
+-
+-	host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
+-				| 0 << CS_ACTIVE_BSY
+-				| 17 << BAD_BLOCK_BYTE_NUM
+-				| 1 << BAD_BLOCK_IN_SPARE_AREA
+-				| 2 << WR_RD_BSY_GAP
+-				| wide_bus << WIDE_FLASH
+-				| 1 << DEV0_CFG1_ECC_DISABLE;
+-
+-	host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
+-				| 0 << ECC_SW_RESET
+-				| host->cw_data << ECC_NUM_DATA_BYTES
+-				| 1 << ECC_FORCE_CLK_OPEN
+-				| ecc_mode << ECC_MODE
+-				| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
++	host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
++		     FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data) |
++		     FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 0) |
++		     FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
++		     FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, host->ecc_bytes_hw) |
++		     FIELD_PREP(STATUS_BFR_READ, 0) |
++		     FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
++		     FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes);
++
++	host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
++		     FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
++		     FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
++		     FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
++		     FIELD_PREP(WIDE_FLASH, wide_bus) |
++		     FIELD_PREP(ENABLE_BCH_ECC, host->bch_enabled);
++
++	host->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
++			 FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_size) |
++			 FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
++			 FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
++
++	host->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
++			 FIELD_PREP(CS_ACTIVE_BSY, 0) |
++			 FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
++			 FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
++			 FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
++			 FIELD_PREP(WIDE_FLASH, wide_bus) |
++			 FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
++
++	host->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !host->bch_enabled) |
++			    FIELD_PREP(ECC_SW_RESET, 0) |
++			    FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data) |
++			    FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
++			    FIELD_PREP(ECC_MODE_MASK, ecc_mode) |
++			    FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw);
+ 
+ 	if (!nandc->props->qpic_version2)
+ 		host->ecc_buf_cfg = 0x203 << NUM_STEPS;
+@@ -1882,21 +1881,21 @@ static int qcom_param_page_type_exec(str
+ 	nandc->regs->addr0 = 0;
+ 	nandc->regs->addr1 = 0;
+ 
+-	nandc->regs->cfg0 = cpu_to_le32(0 << CW_PER_PAGE |
+-					512 << UD_SIZE_BYTES |
+-					5 << NUM_ADDR_CYCLES |
+-					0 << SPARE_SIZE_BYTES);
+-
+-	nandc->regs->cfg1 = cpu_to_le32(7 << NAND_RECOVERY_CYCLES |
+-					0 << CS_ACTIVE_BSY |
+-					17 << BAD_BLOCK_BYTE_NUM |
+-					1 << BAD_BLOCK_IN_SPARE_AREA |
+-					2 << WR_RD_BSY_GAP |
+-					0 << WIDE_FLASH |
+-					1 << DEV0_CFG1_ECC_DISABLE);
++	host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
++		     FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
++		     FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
++		     FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
++
++	host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
++		     FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
++		     FIELD_PREP(CS_ACTIVE_BSY, 0) |
++		     FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
++		     FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
++		     FIELD_PREP(WIDE_FLASH, 0) |
++		     FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+ 
+ 	if (!nandc->props->qpic_version2)
+-		nandc->regs->ecc_buf_cfg = cpu_to_le32(1 << ECC_CFG_ECC_DISABLE);
++		nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
+ 
+ 	/* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
+ 	if (!nandc->props->qpic_version2) {
+--- a/include/linux/mtd/nand-qpic-common.h
++++ b/include/linux/mtd/nand-qpic-common.h
+@@ -70,35 +70,42 @@
+ #define	BS_CORRECTABLE_ERR_MSK		0x1f
+ 
+ /* NAND_DEVn_CFG0 bits */
+-#define	DISABLE_STATUS_AFTER_WRITE	4
++#define	DISABLE_STATUS_AFTER_WRITE	BIT(4)
+ #define	CW_PER_PAGE			6
++#define	CW_PER_PAGE_MASK		GENMASK(8, 6)
+ #define	UD_SIZE_BYTES			9
+ #define	UD_SIZE_BYTES_MASK		GENMASK(18, 9)
+-#define	ECC_PARITY_SIZE_BYTES_RS	19
++#define	ECC_PARITY_SIZE_BYTES_RS	GENMASK(22, 19)
+ #define	SPARE_SIZE_BYTES		23
+ #define	SPARE_SIZE_BYTES_MASK		GENMASK(26, 23)
+ #define	NUM_ADDR_CYCLES			27
+-#define	STATUS_BFR_READ			30
+-#define	SET_RD_MODE_AFTER_STATUS	31
++#define	NUM_ADDR_CYCLES_MASK		GENMASK(29, 27)
++#define	STATUS_BFR_READ			BIT(30)
++#define	SET_RD_MODE_AFTER_STATUS	BIT(31)
+ 
+ /* NAND_DEVn_CFG0 bits */
+-#define	DEV0_CFG1_ECC_DISABLE		0
+-#define	WIDE_FLASH			1
++#define	DEV0_CFG1_ECC_DISABLE		BIT(0)
++#define	WIDE_FLASH			BIT(1)
+ #define	NAND_RECOVERY_CYCLES		2
+-#define	CS_ACTIVE_BSY			5
++#define	NAND_RECOVERY_CYCLES_MASK	GENMASK(4, 2)
++#define	CS_ACTIVE_BSY			BIT(5)
+ #define	BAD_BLOCK_BYTE_NUM		6
+-#define	BAD_BLOCK_IN_SPARE_AREA		16
++#define	BAD_BLOCK_BYTE_NUM_MASK		GENMASK(15, 6)
++#define	BAD_BLOCK_IN_SPARE_AREA		BIT(16)
+ #define	WR_RD_BSY_GAP			17
+-#define	ENABLE_BCH_ECC			27
++#define	WR_RD_BSY_GAP_MASK		GENMASK(22, 17)
++#define	ENABLE_BCH_ECC			BIT(27)
+ 
+ /* NAND_DEV0_ECC_CFG bits */
+-#define	ECC_CFG_ECC_DISABLE		0
+-#define	ECC_SW_RESET			1
++#define	ECC_CFG_ECC_DISABLE		BIT(0)
++#define	ECC_SW_RESET			BIT(1)
+ #define	ECC_MODE			4
++#define	ECC_MODE_MASK			GENMASK(5, 4)
+ #define	ECC_PARITY_SIZE_BYTES_BCH	8
++#define	ECC_PARITY_SIZE_BYTES_BCH_MASK	GENMASK(12, 8)
+ #define	ECC_NUM_DATA_BYTES		16
+ #define	ECC_NUM_DATA_BYTES_MASK		GENMASK(25, 16)
+-#define	ECC_FORCE_CLK_OPEN		30
++#define	ECC_FORCE_CLK_OPEN		BIT(30)
+ 
+ /* NAND_DEV_CMD1 bits */
+ #define	READ_ADDR			0

+ 64 - 0
target/linux/generic/backport-6.6/414-v6.14-mtd-rawnand-qcom-fix-broken-config-in-qcom_param_pag.patch

@@ -0,0 +1,64 @@
+From 9d4ffbcfde283f2a87ea45128ddf7e6651facdd9 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Fri, 7 Feb 2025 20:42:38 +0100
+Subject: [PATCH] mtd: rawnand: qcom: fix broken config in
+ qcom_param_page_type_exec
+
+Fix broken config in qcom_param_page_type_exec caused by copy-paste error
+from commit 0c08080fd71c ("mtd: rawnand: qcom: use FIELD_PREP and GENMASK")
+
+In qcom_param_page_type_exec the value needs to be set to
+nandc->regs->cfg0 instead of host->cfg0. This wrong configuration caused
+the Qcom NANDC driver to malfunction on any device that makes use of it
+(IPQ806x, IPQ40xx, IPQ807x, IPQ60xx) with the following error:
+
+[    0.885369] nand: device found, Manufacturer ID: 0x2c, Chip ID: 0xaa
+[    0.885909] nand: Micron NAND 256MiB 1,8V 8-bit
+[    0.892499] nand: 256 MiB, SLC, erase size: 128 KiB, page size: 2048, OOB size: 64
+[    0.896823] nand: ECC (step, strength) = (512, 8) does not fit in OOB
+[    0.896836] qcom-nandc 79b0000.nand-controller: No valid ECC settings possible
+[    0.910996] bam-dma-engine 7984000.dma-controller: Cannot free busy channel
+[    0.918070] qcom-nandc: probe of 79b0000.nand-controller failed with error -28
+
+Restore original configuration fix the problem and makes the driver work
+again.
+
+Cc: [email protected]
+Fixes: 0c08080fd71c ("mtd: rawnand: qcom: use FIELD_PREP and GENMASK")
+Signed-off-by: Christian Marangi <[email protected]>
+---
+ drivers/mtd/nand/raw/qcom_nandc.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -1881,18 +1881,18 @@ static int qcom_param_page_type_exec(str
+ 	nandc->regs->addr0 = 0;
+ 	nandc->regs->addr1 = 0;
+ 
+-	host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
+-		     FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
+-		     FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+-		     FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
++	nandc->regs->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
++			    FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
++			    FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
++			    FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
+ 
+-	host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+-		     FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+-		     FIELD_PREP(CS_ACTIVE_BSY, 0) |
+-		     FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+-		     FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+-		     FIELD_PREP(WIDE_FLASH, 0) |
+-		     FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
++	nandc->regs->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
++			    FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
++			    FIELD_PREP(CS_ACTIVE_BSY, 0) |
++			    FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
++			    FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
++			    FIELD_PREP(WIDE_FLASH, 0) |
++			    FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+ 
+ 	if (!nandc->props->qpic_version2)
+ 		nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);

+ 77 - 0
target/linux/generic/backport-6.6/415-v6.14-mtd-rawnand-qcom-Fix-build-issue-on-x86-architecture.patch

@@ -0,0 +1,77 @@
+From b9371866799d67a80be0ea9e01bd41987db22f26 Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <[email protected]>
+Date: Mon, 6 Jan 2025 18:45:58 +0530
+Subject: [PATCH] mtd: rawnand: qcom: Fix build issue on x86 architecture
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fix a buffer overflow issue in qcom_clear_bam_transaction by using
+struct_group to group related fields and avoid FORTIFY_SOURCE warnings.
+
+On x86 architecture, the following error occurs due to warnings being
+treated as errors:
+
+In function ‘fortify_memset_chk’,
+    inlined from ‘qcom_clear_bam_transaction’ at
+drivers/mtd/nand/qpic_common.c:88:2:
+./include/linux/fortify-string.h:480:25: error: call to ‘__write_overflow_field’
+declared with attribute warning: detected write beyond size of field
+(1st parameter); maybe use struct_group()? [-Werror=attribute-warning]
+  480 |                         __write_overflow_field(p_size_field, size);
+      |                         ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+  LD [M]  drivers/mtd/nand/nandcore.o
+  CC [M]  drivers/w1/masters/mxc_w1.o
+cc1: all warnings being treated as errors
+
+This patch addresses the issue by grouping the related fields in
+struct bam_transaction using struct_group and updating the memset call
+accordingly.
+
+Fixes: 8c52932da5e6 ("mtd: rawnand: qcom: cleanup qcom_nandc driver")
+Signed-off-by: Md Sadre Alam <[email protected]>
+Signed-off-by: Miquel Raynal <[email protected]>
+---
+ drivers/mtd/nand/qpic_common.c       |  2 +-
+ include/linux/mtd/nand-qpic-common.h | 19 +++++++++++--------
+ 2 files changed, 12 insertions(+), 9 deletions(-)
+
+--- a/drivers/mtd/nand/qpic_common.c
++++ b/drivers/mtd/nand/qpic_common.c
+@@ -85,7 +85,7 @@ void qcom_clear_bam_transaction(struct q
+ 	if (!nandc->props->supports_bam)
+ 		return;
+ 
+-	memset(&bam_txn->bam_ce_pos, 0, sizeof(u32) * 8);
++	memset(&bam_txn->bam_positions, 0, sizeof(bam_txn->bam_positions));
+ 	bam_txn->last_data_desc = NULL;
+ 
+ 	sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
+--- a/include/linux/mtd/nand-qpic-common.h
++++ b/include/linux/mtd/nand-qpic-common.h
+@@ -254,14 +254,17 @@ struct bam_transaction {
+ 	struct dma_async_tx_descriptor *last_data_desc;
+ 	struct dma_async_tx_descriptor *last_cmd_desc;
+ 	struct completion txn_done;
+-	u32 bam_ce_pos;
+-	u32 bam_ce_start;
+-	u32 cmd_sgl_pos;
+-	u32 cmd_sgl_start;
+-	u32 tx_sgl_pos;
+-	u32 tx_sgl_start;
+-	u32 rx_sgl_pos;
+-	u32 rx_sgl_start;
++	struct_group(bam_positions,
++		u32 bam_ce_pos;
++		u32 bam_ce_start;
++		u32 cmd_sgl_pos;
++		u32 cmd_sgl_start;
++		u32 tx_sgl_pos;
++		u32 tx_sgl_start;
++		u32 rx_sgl_pos;
++		u32 rx_sgl_start;
++
++	);
+ };
+ 
+ /*

+ 1737 - 0
target/linux/generic/backport-6.6/416-v6.15-01-spi-spi-qpic-add-driver-for-QCOM-SPI-NAND-flash-Inte.patch

@@ -0,0 +1,1737 @@
+From 7304d1909080ef0c9da703500a97f46c98393fcd Mon Sep 17 00:00:00 2001
+From: Md Sadre Alam <[email protected]>
+Date: Mon, 24 Feb 2025 16:44:14 +0530
+Subject: [PATCH] spi: spi-qpic: add driver for QCOM SPI NAND flash Interface
+
+This driver implements support for the SPI-NAND mode of QCOM NAND Flash
+Interface as a SPI-MEM controller with pipelined ECC capability.
+
+Co-developed-by: Sricharan Ramabadhran <[email protected]>
+Signed-off-by: Sricharan Ramabadhran <[email protected]>
+Co-developed-by: Varadarajan Narayanan <[email protected]>
+Signed-off-by: Varadarajan Narayanan <[email protected]>
+Signed-off-by: Md Sadre Alam <[email protected]>
+Link: https://patch.msgid.link/[email protected]
+Signed-off-by: Mark Brown <[email protected]>
+---
+ drivers/mtd/nand/Makefile            |    4 +
+ drivers/spi/Kconfig                  |    9 +
+ drivers/spi/Makefile                 |    1 +
+ drivers/spi/spi-qpic-snand.c         | 1631 ++++++++++++++++++++++++++
+ include/linux/mtd/nand-qpic-common.h |    7 +
+ 5 files changed, 1652 insertions(+)
+ create mode 100644 drivers/spi/spi-qpic-snand.c
+
+--- a/drivers/mtd/nand/Makefile
++++ b/drivers/mtd/nand/Makefile
+@@ -3,7 +3,11 @@
+ nandcore-objs := core.o bbt.o
+ obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
+ obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
++ifeq ($(CONFIG_SPI_QPIC_SNAND),y)
++obj-$(CONFIG_SPI_QPIC_SNAND) += qpic_common.o
++else
+ obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
++endif
+ obj-y	+= onenand/
+ obj-y	+= raw/
+ obj-y	+= spi/
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -870,6 +870,15 @@ config SPI_QCOM_QSPI
+ 	help
+ 	  QSPI(Quad SPI) driver for Qualcomm QSPI controller.
+ 
++config SPI_QPIC_SNAND
++	bool "QPIC SNAND controller"
++	depends on ARCH_QCOM || COMPILE_TEST
++	select MTD
++	help
++	  QPIC_SNAND (QPIC SPI NAND) driver for Qualcomm QPIC controller.
++	  QPIC controller supports both parallel nand and serial nand.
++	  This config will enable serial nand driver for QPIC controller.
++
+ config SPI_QUP
+ 	tristate "Qualcomm SPI controller with QUP interface"
+ 	depends on ARCH_QCOM || COMPILE_TEST
+--- a/drivers/spi/Makefile
++++ b/drivers/spi/Makefile
+@@ -110,6 +110,7 @@ obj-$(CONFIG_SPI_PXA2XX)		+= spi-pxa2xx-
+ obj-$(CONFIG_SPI_PXA2XX_PCI)		+= spi-pxa2xx-pci.o
+ obj-$(CONFIG_SPI_QCOM_GENI)		+= spi-geni-qcom.o
+ obj-$(CONFIG_SPI_QCOM_QSPI)		+= spi-qcom-qspi.o
++obj-$(CONFIG_SPI_QPIC_SNAND)            += spi-qpic-snand.o
+ obj-$(CONFIG_SPI_QUP)			+= spi-qup.o
+ obj-$(CONFIG_SPI_ROCKCHIP)		+= spi-rockchip.o
+ obj-$(CONFIG_SPI_ROCKCHIP_SFC)		+= spi-rockchip-sfc.o
+--- /dev/null
++++ b/drivers/spi/spi-qpic-snand.c
+@@ -0,0 +1,1631 @@
++/*
++ * SPDX-License-Identifier: GPL-2.0
++ *
++ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
++ *
++ * Authors:
++ *	Md Sadre Alam <[email protected]>
++ *	Sricharan R <[email protected]>
++ *	Varadarajan Narayanan <[email protected]>
++ */
++#include <linux/bitops.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/dma/qcom_adm.h>
++#include <linux/dma/qcom_bam_dma.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/mtd/nand-qpic-common.h>
++#include <linux/mtd/spinand.h>
++#include <linux/bitfield.h>
++
++#define NAND_FLASH_SPI_CFG		0xc0
++#define NAND_NUM_ADDR_CYCLES		0xc4
++#define NAND_BUSY_CHECK_WAIT_CNT	0xc8
++#define NAND_FLASH_FEATURES		0xf64
++
++/* QSPI NAND config reg bits */
++#define LOAD_CLK_CNTR_INIT_EN		BIT(28)
++#define CLK_CNTR_INIT_VAL_VEC		0x924
++#define CLK_CNTR_INIT_VAL_VEC_MASK	GENMASK(27, 16)
++#define FEA_STATUS_DEV_ADDR		0xc0
++#define FEA_STATUS_DEV_ADDR_MASK	GENMASK(15, 8)
++#define SPI_CFG				BIT(0)
++#define SPI_NUM_ADDR			0xDA4DB
++#define SPI_WAIT_CNT			0x10
++#define QPIC_QSPI_NUM_CS		1
++#define SPI_TRANSFER_MODE_x1		BIT(29)
++#define SPI_TRANSFER_MODE_x4		(3 << 29)
++#define SPI_WP				BIT(28)
++#define SPI_HOLD			BIT(27)
++#define QPIC_SET_FEATURE		BIT(31)
++
++#define SPINAND_RESET			0xff
++#define SPINAND_READID			0x9f
++#define SPINAND_GET_FEATURE		0x0f
++#define SPINAND_SET_FEATURE		0x1f
++#define SPINAND_READ			0x13
++#define SPINAND_ERASE			0xd8
++#define SPINAND_WRITE_EN		0x06
++#define SPINAND_PROGRAM_EXECUTE		0x10
++#define SPINAND_PROGRAM_LOAD		0x84
++
++#define ACC_FEATURE			0xe
++#define BAD_BLOCK_MARKER_SIZE		0x2
++#define OOB_BUF_SIZE			128
++#define ecceng_to_qspi(eng)		container_of(eng, struct qpic_spi_nand, ecc_eng)
++
++struct qpic_snand_op {
++	u32 cmd_reg;
++	u32 addr1_reg;
++	u32 addr2_reg;
++};
++
++struct snandc_read_status {
++	__le32 snandc_flash;
++	__le32 snandc_buffer;
++	__le32 snandc_erased_cw;
++};
++
++/*
++ * ECC state struct
++ * @corrected:		ECC corrected
++ * @bitflips:		Max bit flip
++ * @failed:		ECC failed
++ */
++struct qcom_ecc_stats {
++	u32 corrected;
++	u32 bitflips;
++	u32 failed;
++};
++
++struct qpic_ecc {
++	struct device *dev;
++	int ecc_bytes_hw;
++	int spare_bytes;
++	int bbm_size;
++	int ecc_mode;
++	int bytes;
++	int steps;
++	int step_size;
++	int strength;
++	int cw_size;
++	int cw_data;
++	u32 cfg0;
++	u32 cfg1;
++	u32 cfg0_raw;
++	u32 cfg1_raw;
++	u32 ecc_buf_cfg;
++	u32 ecc_bch_cfg;
++	u32 clrflashstatus;
++	u32 clrreadstatus;
++	bool bch_enabled;
++};
++
++struct qpic_spi_nand {
++	struct qcom_nand_controller *snandc;
++	struct spi_controller *ctlr;
++	struct mtd_info *mtd;
++	struct clk *iomacro_clk;
++	struct qpic_ecc *ecc;
++	struct qcom_ecc_stats ecc_stats;
++	struct nand_ecc_engine ecc_eng;
++	u8 *data_buf;
++	u8 *oob_buf;
++	u32 wlen;
++	__le32 addr1;
++	__le32 addr2;
++	__le32 cmd;
++	u32 num_cw;
++	bool oob_rw;
++	bool page_rw;
++	bool raw_rw;
++};
++
++static void qcom_spi_set_read_loc_first(struct qcom_nand_controller *snandc,
++					int reg, int cw_offset, int read_size,
++					int is_last_read_loc)
++{
++	__le32 locreg_val;
++	u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
++		  ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc)
++		  << READ_LOCATION_LAST));
++
++	locreg_val = cpu_to_le32(val);
++
++	if (reg == NAND_READ_LOCATION_0)
++		snandc->regs->read_location0 = locreg_val;
++	else if (reg == NAND_READ_LOCATION_1)
++		snandc->regs->read_location1 = locreg_val;
++	else if (reg == NAND_READ_LOCATION_2)
++		snandc->regs->read_location1 = locreg_val;
++	else if (reg == NAND_READ_LOCATION_3)
++		snandc->regs->read_location3 = locreg_val;
++}
++
++static void qcom_spi_set_read_loc_last(struct qcom_nand_controller *snandc,
++				       int reg, int cw_offset, int read_size,
++				       int is_last_read_loc)
++{
++	__le32 locreg_val;
++	u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
++		  ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc)
++		  << READ_LOCATION_LAST));
++
++	locreg_val = cpu_to_le32(val);
++
++	if (reg == NAND_READ_LOCATION_LAST_CW_0)
++		snandc->regs->read_location_last0 = locreg_val;
++	else if (reg == NAND_READ_LOCATION_LAST_CW_1)
++		snandc->regs->read_location_last1 = locreg_val;
++	else if (reg == NAND_READ_LOCATION_LAST_CW_2)
++		snandc->regs->read_location_last2 = locreg_val;
++	else if (reg == NAND_READ_LOCATION_LAST_CW_3)
++		snandc->regs->read_location_last3 = locreg_val;
++}
++
++static struct qcom_nand_controller *nand_to_qcom_snand(struct nand_device *nand)
++{
++	struct nand_ecc_engine *eng = nand->ecc.engine;
++	struct qpic_spi_nand *qspi = ecceng_to_qspi(eng);
++
++	return qspi->snandc;
++}
++
++static int qcom_spi_init(struct qcom_nand_controller *snandc)
++{
++	u32 snand_cfg_val = 0x0;
++	int ret;
++
++	snand_cfg_val = FIELD_PREP(CLK_CNTR_INIT_VAL_VEC_MASK, CLK_CNTR_INIT_VAL_VEC) |
++			FIELD_PREP(LOAD_CLK_CNTR_INIT_EN, 0) |
++			FIELD_PREP(FEA_STATUS_DEV_ADDR_MASK, FEA_STATUS_DEV_ADDR) |
++			FIELD_PREP(SPI_CFG, 0);
++
++	snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val);
++	snandc->regs->num_addr_cycle = cpu_to_le32(SPI_NUM_ADDR);
++	snandc->regs->busy_wait_cnt = cpu_to_le32(SPI_WAIT_CNT);
++
++	qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0);
++
++	snand_cfg_val &= ~LOAD_CLK_CNTR_INIT_EN;
++	snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val);
++
++	qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0);
++
++	qcom_write_reg_dma(snandc, &snandc->regs->num_addr_cycle, NAND_NUM_ADDR_CYCLES, 1, 0);
++	qcom_write_reg_dma(snandc, &snandc->regs->busy_wait_cnt, NAND_BUSY_CHECK_WAIT_CNT, 1,
++			   NAND_BAM_NEXT_SGL);
++
++	ret = qcom_submit_descs(snandc);
++	if (ret) {
++		dev_err(snandc->dev, "failure in submitting spi init descriptor\n");
++		return ret;
++	}
++
++	return ret;
++}
++
++static int qcom_spi_ooblayout_ecc(struct mtd_info *mtd, int section,
++				  struct mtd_oob_region *oobregion)
++{
++	struct nand_device *nand = mtd_to_nanddev(mtd);
++	struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
++	struct qpic_ecc *qecc = snandc->qspi->ecc;
++
++	if (section > 1)
++		return -ERANGE;
++
++	oobregion->length = qecc->ecc_bytes_hw + qecc->spare_bytes;
++	oobregion->offset = mtd->oobsize - oobregion->length;
++
++	return 0;
++}
++
++static int qcom_spi_ooblayout_free(struct mtd_info *mtd, int section,
++				   struct mtd_oob_region *oobregion)
++{
++	struct nand_device *nand = mtd_to_nanddev(mtd);
++	struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
++	struct qpic_ecc *qecc = snandc->qspi->ecc;
++
++	if (section)
++		return -ERANGE;
++
++	oobregion->length = qecc->steps * 4;
++	oobregion->offset = ((qecc->steps - 1) * qecc->bytes) + qecc->bbm_size;
++
++	return 0;
++}
++
++static const struct mtd_ooblayout_ops qcom_spi_ooblayout = {
++	.ecc = qcom_spi_ooblayout_ecc,
++	.free = qcom_spi_ooblayout_free,
++};
++
++static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand)
++{
++	struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
++	struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
++	struct mtd_info *mtd = nanddev_to_mtd(nand);
++	int cwperpage, bad_block_byte;
++	struct qpic_ecc *ecc_cfg;
++
++	cwperpage = mtd->writesize / NANDC_STEP_SIZE;
++	snandc->qspi->num_cw = cwperpage;
++
++	ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
++	if (!ecc_cfg)
++		return -ENOMEM;
++	snandc->qspi->oob_buf = kzalloc(mtd->writesize + mtd->oobsize,
++					GFP_KERNEL);
++	if (!snandc->qspi->oob_buf)
++		return -ENOMEM;
++
++	memset(snandc->qspi->oob_buf, 0xff, mtd->writesize + mtd->oobsize);
++
++	nand->ecc.ctx.priv = ecc_cfg;
++	snandc->qspi->mtd = mtd;
++
++	ecc_cfg->ecc_bytes_hw = 7;
++	ecc_cfg->spare_bytes = 4;
++	ecc_cfg->bbm_size = 1;
++	ecc_cfg->bch_enabled = true;
++	ecc_cfg->bytes = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes + ecc_cfg->bbm_size;
++
++	ecc_cfg->steps = 4;
++	ecc_cfg->strength = 4;
++	ecc_cfg->step_size = 512;
++	ecc_cfg->cw_data = 516;
++	ecc_cfg->cw_size = ecc_cfg->cw_data + ecc_cfg->bytes;
++	bad_block_byte = mtd->writesize - ecc_cfg->cw_size * (cwperpage - 1) + 1;
++
++	mtd_set_ooblayout(mtd, &qcom_spi_ooblayout);
++
++	ecc_cfg->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
++			FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_data) |
++			FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 1) |
++			FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) |
++			FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, ecc_cfg->ecc_bytes_hw) |
++			FIELD_PREP(STATUS_BFR_READ, 0) |
++			FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
++			FIELD_PREP(SPARE_SIZE_BYTES_MASK, ecc_cfg->spare_bytes);
++
++	ecc_cfg->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) |
++			FIELD_PREP(CS_ACTIVE_BSY, 0) |
++			FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
++			FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
++			FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) |
++			FIELD_PREP(WIDE_FLASH, 0) |
++			FIELD_PREP(ENABLE_BCH_ECC, ecc_cfg->bch_enabled);
++
++	ecc_cfg->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
++			    FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) |
++			    FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_size) |
++			    FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
++
++	ecc_cfg->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) |
++			    FIELD_PREP(CS_ACTIVE_BSY, 0) |
++			    FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
++			    FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
++			    FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) |
++			    FIELD_PREP(WIDE_FLASH, 0) |
++			    FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
++
++	ecc_cfg->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !ecc_cfg->bch_enabled) |
++			       FIELD_PREP(ECC_SW_RESET, 0) |
++			       FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, ecc_cfg->cw_data) |
++			       FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
++			       FIELD_PREP(ECC_MODE_MASK, 0) |
++			       FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, ecc_cfg->ecc_bytes_hw);
++
++	ecc_cfg->ecc_buf_cfg = 0x203 << NUM_STEPS;
++	ecc_cfg->clrflashstatus = FS_READY_BSY_N;
++	ecc_cfg->clrreadstatus = 0xc0;
++
++	conf->step_size = ecc_cfg->step_size;
++	conf->strength = ecc_cfg->strength;
++
++	snandc->regs->erased_cw_detect_cfg_clr = cpu_to_le32(CLR_ERASED_PAGE_DET);
++	snandc->regs->erased_cw_detect_cfg_set = cpu_to_le32(SET_ERASED_PAGE_DET);
++
++	dev_dbg(snandc->dev, "ECC strength: %u bits per %u bytes\n",
++		ecc_cfg->strength, ecc_cfg->step_size);
++
++	return 0;
++}
++
++static void qcom_spi_ecc_cleanup_ctx_pipelined(struct nand_device *nand)
++{
++	struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand);
++
++	kfree(ecc_cfg);
++}
++
++static int qcom_spi_ecc_prepare_io_req_pipelined(struct nand_device *nand,
++						 struct nand_page_io_req *req)
++{
++	struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
++	struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand);
++
++	snandc->qspi->ecc = ecc_cfg;
++	snandc->qspi->raw_rw = false;
++	snandc->qspi->oob_rw = false;
++	snandc->qspi->page_rw = false;
++
++	if (req->datalen)
++		snandc->qspi->page_rw = true;
++
++	if (req->ooblen)
++		snandc->qspi->oob_rw = true;
++
++	if (req->mode == MTD_OPS_RAW)
++		snandc->qspi->raw_rw = true;
++
++	return 0;
++}
++
++static int qcom_spi_ecc_finish_io_req_pipelined(struct nand_device *nand,
++						struct nand_page_io_req *req)
++{
++	struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
++	struct mtd_info *mtd = nanddev_to_mtd(nand);
++
++	if (req->mode == MTD_OPS_RAW || req->type != NAND_PAGE_READ)
++		return 0;
++
++	if (snandc->qspi->ecc_stats.failed)
++		mtd->ecc_stats.failed += snandc->qspi->ecc_stats.failed;
++	else
++		mtd->ecc_stats.corrected += snandc->qspi->ecc_stats.corrected;
++
++	if (snandc->qspi->ecc_stats.failed)
++		return -EBADMSG;
++	else
++		return snandc->qspi->ecc_stats.bitflips;
++}
++
++static struct nand_ecc_engine_ops qcom_spi_ecc_engine_ops_pipelined = {
++	.init_ctx = qcom_spi_ecc_init_ctx_pipelined,
++	.cleanup_ctx = qcom_spi_ecc_cleanup_ctx_pipelined,
++	.prepare_io_req = qcom_spi_ecc_prepare_io_req_pipelined,
++	.finish_io_req = qcom_spi_ecc_finish_io_req_pipelined,
++};
++
++/* helper to configure location register values */
++static void qcom_spi_set_read_loc(struct qcom_nand_controller *snandc, int cw, int reg,
++				  int cw_offset, int read_size, int is_last_read_loc)
++{
++	int reg_base = NAND_READ_LOCATION_0;
++	int num_cw = snandc->qspi->num_cw;
++
++	if (cw == (num_cw - 1))
++		reg_base = NAND_READ_LOCATION_LAST_CW_0;
++
++	reg_base += reg * 4;
++
++	if (cw == (num_cw - 1))
++		return qcom_spi_set_read_loc_last(snandc, reg_base, cw_offset,
++						  read_size, is_last_read_loc);
++	else
++		return qcom_spi_set_read_loc_first(snandc, reg_base, cw_offset,
++						   read_size, is_last_read_loc);
++}
++
++static void
++qcom_spi_config_cw_read(struct qcom_nand_controller *snandc, bool use_ecc, int cw)
++{
++	__le32 *reg = &snandc->regs->read_location0;
++	int num_cw = snandc->qspi->num_cw;
++
++	qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
++	if (cw == (num_cw - 1)) {
++		reg = &snandc->regs->read_location_last0;
++		qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4,
++				   NAND_BAM_NEXT_SGL);
++	}
++
++	qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++	qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++
++	qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 2, 0);
++	qcom_read_reg_dma(snandc, NAND_ERASED_CW_DETECT_STATUS, 1,
++			  NAND_BAM_NEXT_SGL);
++}
++
++static int qcom_spi_block_erase(struct qcom_nand_controller *snandc)
++{
++	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++	int ret;
++
++	snandc->buf_count = 0;
++	snandc->buf_start = 0;
++	qcom_clear_read_regs(snandc);
++	qcom_clear_bam_transaction(snandc);
++
++	snandc->regs->cmd = snandc->qspi->cmd;
++	snandc->regs->addr0 = snandc->qspi->addr1;
++	snandc->regs->addr1 = snandc->qspi->addr2;
++	snandc->regs->cfg0 = cpu_to_le32(ecc_cfg->cfg0_raw & ~(7 << CW_PER_PAGE));
++	snandc->regs->cfg1 = cpu_to_le32(ecc_cfg->cfg1_raw);
++	snandc->regs->exec = cpu_to_le32(1);
++
++	qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
++	qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
++	qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++
++	ret = qcom_submit_descs(snandc);
++	if (ret) {
++		dev_err(snandc->dev, "failure to erase block\n");
++		return ret;
++	}
++
++	return 0;
++}
++
++static void qcom_spi_config_single_cw_page_read(struct qcom_nand_controller *snandc,
++						bool use_ecc, int cw)
++{
++	__le32 *reg = &snandc->regs->read_location0;
++	int num_cw = snandc->qspi->num_cw;
++
++	qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
++	qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++	qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
++			   NAND_ERASED_CW_DETECT_CFG, 1, 0);
++	qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
++			   NAND_ERASED_CW_DETECT_CFG, 1,
++			   NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
++
++	if (cw == (num_cw - 1)) {
++		reg = &snandc->regs->read_location_last0;
++		qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4, NAND_BAM_NEXT_SGL);
++	}
++	qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++	qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++
++	qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, 0);
++}
++
++static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc,
++				 const struct spi_mem_op *op)
++{
++	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++	struct mtd_info *mtd = snandc->qspi->mtd;
++	int size, ret = 0;
++	int col,  bbpos;
++	u32 cfg0, cfg1, ecc_bch_cfg;
++	u32 num_cw = snandc->qspi->num_cw;
++
++	qcom_clear_bam_transaction(snandc);
++	qcom_clear_read_regs(snandc);
++
++	size = ecc_cfg->cw_size;
++	col = ecc_cfg->cw_size * (num_cw - 1);
++
++	memset(snandc->data_buffer, 0xff, size);
++	snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
++	snandc->regs->addr1 = snandc->qspi->addr2;
++
++	cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
++		0 << CW_PER_PAGE;
++	cfg1 = ecc_cfg->cfg1_raw;
++	ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
++
++	snandc->regs->cmd = snandc->qspi->cmd;
++	snandc->regs->cfg0 = cpu_to_le32(cfg0);
++	snandc->regs->cfg1 = cpu_to_le32(cfg1);
++	snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
++	snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
++	snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
++	snandc->regs->exec = cpu_to_le32(1);
++
++	qcom_spi_set_read_loc(snandc, num_cw - 1, 0, 0, ecc_cfg->cw_size, 1);
++
++	qcom_spi_config_single_cw_page_read(snandc, false, num_cw - 1);
++
++	qcom_read_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, size, 0);
++
++	ret = qcom_submit_descs(snandc);
++	if (ret) {
++		dev_err(snandc->dev, "failed to read last cw\n");
++		return ret;
++	}
++
++	qcom_nandc_dev_to_mem(snandc, true);
++	u32 flash = le32_to_cpu(snandc->reg_read_buf[0]);
++
++	if (flash & (FS_OP_ERR | FS_MPU_ERR))
++		return -EIO;
++
++	bbpos = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
++
++	if (snandc->data_buffer[bbpos] == 0xff)
++		snandc->data_buffer[bbpos + 1] = 0xff;
++	if (snandc->data_buffer[bbpos] != 0xff)
++		snandc->data_buffer[bbpos + 1] = snandc->data_buffer[bbpos];
++
++	memcpy(op->data.buf.in, snandc->data_buffer + bbpos, op->data.nbytes);
++
++	return ret;
++}
++
++static int qcom_spi_check_error(struct qcom_nand_controller *snandc, u8 *data_buf, u8 *oob_buf)
++{
++	struct snandc_read_status *buf;
++	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++	int i, num_cw = snandc->qspi->num_cw;
++	bool flash_op_err = false, erased;
++	unsigned int max_bitflips = 0;
++	unsigned int uncorrectable_cws = 0;
++
++	snandc->qspi->ecc_stats.failed = 0;
++	snandc->qspi->ecc_stats.corrected = 0;
++
++	qcom_nandc_dev_to_mem(snandc, true);
++	buf = (struct snandc_read_status *)snandc->reg_read_buf;
++
++	for (i = 0; i < num_cw; i++, buf++) {
++		u32 flash, buffer, erased_cw;
++		int data_len, oob_len;
++
++		if (i == (num_cw - 1)) {
++			data_len = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
++			oob_len = num_cw << 2;
++		} else {
++			data_len = ecc_cfg->cw_data;
++			oob_len = 0;
++		}
++
++		flash = le32_to_cpu(buf->snandc_flash);
++		buffer = le32_to_cpu(buf->snandc_buffer);
++		erased_cw = le32_to_cpu(buf->snandc_erased_cw);
++
++		if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
++			if (ecc_cfg->bch_enabled)
++				erased = (erased_cw & ERASED_CW) == ERASED_CW;
++			else
++				erased = false;
++
++			if (!erased)
++				uncorrectable_cws |= BIT(i);
++
++		} else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
++			flash_op_err = true;
++		} else {
++			unsigned int stat;
++
++			stat = buffer & BS_CORRECTABLE_ERR_MSK;
++			snandc->qspi->ecc_stats.corrected += stat;
++			max_bitflips = max(max_bitflips, stat);
++		}
++
++		if (data_buf)
++			data_buf += data_len;
++		if (oob_buf)
++			oob_buf += oob_len + ecc_cfg->bytes;
++	}
++
++	if (flash_op_err)
++		return -EIO;
++
++	if (!uncorrectable_cws)
++		snandc->qspi->ecc_stats.bitflips = max_bitflips;
++	else
++		snandc->qspi->ecc_stats.failed++;
++
++	return 0;
++}
++
++static int qcom_spi_check_raw_flash_errors(struct qcom_nand_controller *snandc, int cw_cnt)
++{
++	int i;
++
++	qcom_nandc_dev_to_mem(snandc, true);
++
++	for (i = 0; i < cw_cnt; i++) {
++		u32 flash = le32_to_cpu(snandc->reg_read_buf[i]);
++
++		if (flash & (FS_OP_ERR | FS_MPU_ERR))
++			return -EIO;
++	}
++
++	return 0;
++}
++
++static int qcom_spi_read_cw_raw(struct qcom_nand_controller *snandc, u8 *data_buf,
++				u8 *oob_buf, int cw)
++{
++	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++	struct mtd_info *mtd = snandc->qspi->mtd;
++	int data_size1, data_size2, oob_size1, oob_size2;
++	int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
++	int raw_cw = cw;
++	u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
++	int col;
++
++	snandc->buf_count = 0;
++	snandc->buf_start = 0;
++	qcom_clear_read_regs(snandc);
++	qcom_clear_bam_transaction(snandc);
++	raw_cw = num_cw - 1;
++
++	cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
++				0 << CW_PER_PAGE;
++	cfg1 = ecc_cfg->cfg1_raw;
++	ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
++
++	col = ecc_cfg->cw_size * cw;
++
++	snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
++	snandc->regs->addr1 = snandc->qspi->addr2;
++	snandc->regs->cmd = snandc->qspi->cmd;
++	snandc->regs->cfg0 = cpu_to_le32(cfg0);
++	snandc->regs->cfg1 = cpu_to_le32(cfg1);
++	snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
++	snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
++	snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
++	snandc->regs->exec = cpu_to_le32(1);
++
++	qcom_spi_set_read_loc(snandc, raw_cw, 0, 0, ecc_cfg->cw_size, 1);
++
++	qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
++	qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++	qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
++
++	qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
++			   NAND_ERASED_CW_DETECT_CFG, 1, 0);
++	qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
++			   NAND_ERASED_CW_DETECT_CFG, 1,
++			   NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
++
++	data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
++	oob_size1 = ecc_cfg->bbm_size;
++
++	if (cw == (num_cw - 1)) {
++		data_size2 = NANDC_STEP_SIZE - data_size1 -
++			     ((num_cw - 1) * 4);
++		oob_size2 = (num_cw * 4) + ecc_cfg->ecc_bytes_hw +
++			    ecc_cfg->spare_bytes;
++	} else {
++		data_size2 = ecc_cfg->cw_data - data_size1;
++		oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
++	}
++
++	qcom_spi_set_read_loc(snandc, cw, 0, read_loc, data_size1, 0);
++	read_loc += data_size1;
++
++	qcom_spi_set_read_loc(snandc, cw, 1, read_loc, oob_size1, 0);
++	read_loc += oob_size1;
++
++	qcom_spi_set_read_loc(snandc, cw, 2, read_loc, data_size2, 0);
++	read_loc += data_size2;
++
++	qcom_spi_set_read_loc(snandc, cw, 3, read_loc, oob_size2, 1);
++
++	qcom_spi_config_cw_read(snandc, false, raw_cw);
++
++	qcom_read_data_dma(snandc, reg_off, data_buf, data_size1, 0);
++	reg_off += data_size1;
++
++	qcom_read_data_dma(snandc, reg_off, oob_buf, oob_size1, 0);
++	reg_off += oob_size1;
++
++	qcom_read_data_dma(snandc, reg_off, data_buf + data_size1, data_size2, 0);
++	reg_off += data_size2;
++
++	qcom_read_data_dma(snandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
++
++	ret = qcom_submit_descs(snandc);
++	if (ret) {
++		dev_err(snandc->dev, "failure to read raw cw %d\n", cw);
++		return ret;
++	}
++
++	return qcom_spi_check_raw_flash_errors(snandc, 1);
++}
++
++static int qcom_spi_read_page_raw(struct qcom_nand_controller *snandc,
++				  const struct spi_mem_op *op)
++{
++	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++	u8 *data_buf = NULL, *oob_buf = NULL;
++	int ret, cw;
++	u32 num_cw = snandc->qspi->num_cw;
++
++	if (snandc->qspi->page_rw)
++		data_buf = op->data.buf.in;
++
++	oob_buf = snandc->qspi->oob_buf;
++	memset(oob_buf, 0xff, OOB_BUF_SIZE);
++
++	for (cw = 0; cw < num_cw; cw++) {
++		ret = qcom_spi_read_cw_raw(snandc, data_buf, oob_buf, cw);
++		if (ret)
++			return ret;
++
++		if (data_buf)
++			data_buf += ecc_cfg->cw_data;
++		if (oob_buf)
++			oob_buf += ecc_cfg->bytes;
++	}
++
++	return 0;
++}
++
++static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc,
++				  const struct spi_mem_op *op)
++{
++	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++	u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start;
++	int ret, i;
++	u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
++
++	data_buf = op->data.buf.in;
++	data_buf_start = data_buf;
++
++	oob_buf = snandc->qspi->oob_buf;
++	oob_buf_start = oob_buf;
++
++	snandc->buf_count = 0;
++	snandc->buf_start = 0;
++	qcom_clear_read_regs(snandc);
++
++	cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
++				(num_cw - 1) << CW_PER_PAGE;
++	cfg1 = ecc_cfg->cfg1;
++	ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
++
++	snandc->regs->addr0 = snandc->qspi->addr1;
++	snandc->regs->addr1 = snandc->qspi->addr2;
++	snandc->regs->cmd = snandc->qspi->cmd;
++	snandc->regs->cfg0 = cpu_to_le32(cfg0);
++	snandc->regs->cfg1 = cpu_to_le32(cfg1);
++	snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
++	snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
++	snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
++	snandc->regs->exec = cpu_to_le32(1);
++
++	qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
++
++	qcom_clear_bam_transaction(snandc);
++
++	qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
++	qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++	qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
++			   NAND_ERASED_CW_DETECT_CFG, 1, 0);
++	qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
++			   NAND_ERASED_CW_DETECT_CFG, 1,
++			   NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
++
++	for (i = 0; i < num_cw; i++) {
++		int data_size, oob_size;
++
++		if (i == (num_cw - 1)) {
++			data_size = 512 - ((num_cw - 1) << 2);
++			oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
++				    ecc_cfg->spare_bytes;
++		} else {
++			data_size = ecc_cfg->cw_data;
++			oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
++		}
++
++		if (data_buf && oob_buf) {
++			qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 0);
++			qcom_spi_set_read_loc(snandc, i, 1, data_size, oob_size, 1);
++		} else if (data_buf) {
++			qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 1);
++		} else {
++			qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1);
++		}
++
++		qcom_spi_config_cw_read(snandc, true, i);
++
++		if (data_buf)
++			qcom_read_data_dma(snandc, FLASH_BUF_ACC, data_buf,
++					   data_size, 0);
++		if (oob_buf) {
++			int j;
++
++			for (j = 0; j < ecc_cfg->bbm_size; j++)
++				*oob_buf++ = 0xff;
++
++			qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size,
++					   oob_buf, oob_size, 0);
++		}
++
++		if (data_buf)
++			data_buf += data_size;
++		if (oob_buf)
++			oob_buf += oob_size;
++	}
++
++	ret = qcom_submit_descs(snandc);
++	if (ret) {
++		dev_err(snandc->dev, "failure to read page\n");
++		return ret;
++	}
++
++	return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start);
++}
++
++static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc,
++				  const struct spi_mem_op *op)
++{
++	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++	u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start;
++	int ret, i;
++	u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
++
++	oob_buf = op->data.buf.in;
++	oob_buf_start = oob_buf;
++
++	data_buf_start = data_buf;
++
++	snandc->buf_count = 0;
++	snandc->buf_start = 0;
++	qcom_clear_read_regs(snandc);
++	qcom_clear_bam_transaction(snandc);
++
++	cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
++				(num_cw - 1) << CW_PER_PAGE;
++	cfg1 = ecc_cfg->cfg1;
++	ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
++
++	snandc->regs->addr0 = snandc->qspi->addr1;
++	snandc->regs->addr1 = snandc->qspi->addr2;
++	snandc->regs->cmd = snandc->qspi->cmd;
++	snandc->regs->cfg0 = cpu_to_le32(cfg0);
++	snandc->regs->cfg1 = cpu_to_le32(cfg1);
++	snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
++	snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
++	snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
++	snandc->regs->exec = cpu_to_le32(1);
++
++	qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
++
++	qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
++	qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++	qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
++			   NAND_ERASED_CW_DETECT_CFG, 1, 0);
++	qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
++			   NAND_ERASED_CW_DETECT_CFG, 1,
++			   NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
++
++	for (i = 0; i < num_cw; i++) {
++		int data_size, oob_size;
++
++		if (i == (num_cw - 1)) {
++			data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
++			oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
++				    ecc_cfg->spare_bytes;
++		} else {
++			data_size = ecc_cfg->cw_data;
++			oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
++		}
++
++		qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1);
++
++		qcom_spi_config_cw_read(snandc, true, i);
++
++		if (oob_buf) {
++			int j;
++
++			for (j = 0; j < ecc_cfg->bbm_size; j++)
++				*oob_buf++ = 0xff;
++
++			qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size,
++					   oob_buf, oob_size, 0);
++		}
++
++		if (oob_buf)
++			oob_buf += oob_size;
++	}
++
++	ret = qcom_submit_descs(snandc);
++	if (ret) {
++		dev_err(snandc->dev, "failure to read oob\n");
++		return ret;
++	}
++
++	return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start);
++}
++
++static int qcom_spi_read_page(struct qcom_nand_controller *snandc,
++			      const struct spi_mem_op *op)
++{
++	if (snandc->qspi->page_rw && snandc->qspi->raw_rw)
++		return qcom_spi_read_page_raw(snandc, op);
++
++	if (snandc->qspi->page_rw)
++		return qcom_spi_read_page_ecc(snandc, op);
++
++	if (snandc->qspi->oob_rw && snandc->qspi->raw_rw)
++		return qcom_spi_read_last_cw(snandc, op);
++
++	if (snandc->qspi->oob_rw)
++		return qcom_spi_read_page_oob(snandc, op);
++
++	return 0;
++}
++
++static void qcom_spi_config_page_write(struct qcom_nand_controller *snandc)
++{
++	qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
++	qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
++	qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG,
++			   1, NAND_BAM_NEXT_SGL);
++}
++
++static void qcom_spi_config_cw_write(struct qcom_nand_controller *snandc)
++{
++	qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
++	qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++	qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
++
++	qcom_write_reg_dma(snandc, &snandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
++	qcom_write_reg_dma(snandc, &snandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
++			   NAND_BAM_NEXT_SGL);
++}
++
++static int qcom_spi_program_raw(struct qcom_nand_controller *snandc,
++				const struct spi_mem_op *op)
++{
++	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++	struct mtd_info *mtd = snandc->qspi->mtd;
++	u8 *data_buf = NULL, *oob_buf = NULL;
++	int i, ret;
++	int num_cw = snandc->qspi->num_cw;
++	u32 cfg0, cfg1, ecc_bch_cfg;
++
++	cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
++			(num_cw - 1) << CW_PER_PAGE;
++	cfg1 = ecc_cfg->cfg1_raw;
++	ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
++
++	data_buf = snandc->qspi->data_buf;
++
++	oob_buf = snandc->qspi->oob_buf;
++	memset(oob_buf, 0xff, OOB_BUF_SIZE);
++
++	snandc->buf_count = 0;
++	snandc->buf_start = 0;
++	qcom_clear_read_regs(snandc);
++	qcom_clear_bam_transaction(snandc);
++
++	snandc->regs->addr0 = snandc->qspi->addr1;
++	snandc->regs->addr1 = snandc->qspi->addr2;
++	snandc->regs->cmd = snandc->qspi->cmd;
++	snandc->regs->cfg0 = cpu_to_le32(cfg0);
++	snandc->regs->cfg1 = cpu_to_le32(cfg1);
++	snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
++	snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
++	snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
++	snandc->regs->exec = cpu_to_le32(1);
++
++	qcom_spi_config_page_write(snandc);
++
++	for (i = 0; i < num_cw; i++) {
++		int data_size1, data_size2, oob_size1, oob_size2;
++		int reg_off = FLASH_BUF_ACC;
++
++		data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
++		oob_size1 = ecc_cfg->bbm_size;
++
++		if (i == (num_cw - 1)) {
++			data_size2 = NANDC_STEP_SIZE - data_size1 -
++				     ((num_cw - 1) << 2);
++			oob_size2 = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
++				    ecc_cfg->spare_bytes;
++		} else {
++			data_size2 = ecc_cfg->cw_data - data_size1;
++			oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
++		}
++
++		qcom_write_data_dma(snandc, reg_off, data_buf, data_size1,
++				    NAND_BAM_NO_EOT);
++		reg_off += data_size1;
++		data_buf += data_size1;
++
++		qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size1,
++				    NAND_BAM_NO_EOT);
++		oob_buf += oob_size1;
++		reg_off += oob_size1;
++
++		qcom_write_data_dma(snandc, reg_off, data_buf, data_size2,
++				    NAND_BAM_NO_EOT);
++		reg_off += data_size2;
++		data_buf += data_size2;
++
++		qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size2, 0);
++		oob_buf += oob_size2;
++
++		qcom_spi_config_cw_write(snandc);
++	}
++
++	ret = qcom_submit_descs(snandc);
++	if (ret) {
++		dev_err(snandc->dev, "failure to write raw page\n");
++		return ret;
++	}
++
++	return 0;
++}
++
++static int qcom_spi_program_ecc(struct qcom_nand_controller *snandc,
++				const struct spi_mem_op *op)
++{
++	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++	u8 *data_buf = NULL, *oob_buf = NULL;
++	int i, ret;
++	int num_cw = snandc->qspi->num_cw;
++	u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
++
++	cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
++				(num_cw - 1) << CW_PER_PAGE;
++	cfg1 = ecc_cfg->cfg1;
++	ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
++	ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
++
++	if (snandc->qspi->data_buf)
++		data_buf = snandc->qspi->data_buf;
++
++	oob_buf = snandc->qspi->oob_buf;
++
++	snandc->buf_count = 0;
++	snandc->buf_start = 0;
++	qcom_clear_read_regs(snandc);
++	qcom_clear_bam_transaction(snandc);
++
++	snandc->regs->addr0 = snandc->qspi->addr1;
++	snandc->regs->addr1 = snandc->qspi->addr2;
++	snandc->regs->cmd = snandc->qspi->cmd;
++	snandc->regs->cfg0 = cpu_to_le32(cfg0);
++	snandc->regs->cfg1 = cpu_to_le32(cfg1);
++	snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
++	snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg);
++	snandc->regs->exec = cpu_to_le32(1);
++
++	qcom_spi_config_page_write(snandc);
++
++	for (i = 0; i < num_cw; i++) {
++		int data_size, oob_size;
++
++		if (i == (num_cw - 1)) {
++			data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
++			oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
++				    ecc_cfg->spare_bytes;
++		} else {
++			data_size = ecc_cfg->cw_data;
++			oob_size = ecc_cfg->bytes;
++		}
++
++		if (data_buf)
++			qcom_write_data_dma(snandc, FLASH_BUF_ACC, data_buf, data_size,
++					    i == (num_cw - 1) ? NAND_BAM_NO_EOT : 0);
++
++		if (i == (num_cw - 1)) {
++			if (oob_buf) {
++				oob_buf += ecc_cfg->bbm_size;
++				qcom_write_data_dma(snandc, FLASH_BUF_ACC + data_size,
++						    oob_buf, oob_size, 0);
++			}
++		}
++
++		qcom_spi_config_cw_write(snandc);
++
++		if (data_buf)
++			data_buf += data_size;
++		if (oob_buf)
++			oob_buf += oob_size;
++	}
++
++	ret = qcom_submit_descs(snandc);
++	if (ret) {
++		dev_err(snandc->dev, "failure to write page\n");
++		return ret;
++	}
++
++	return 0;
++}
++
++static int qcom_spi_program_oob(struct qcom_nand_controller *snandc,
++				const struct spi_mem_op *op)
++{
++	struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
++	u8 *oob_buf = NULL;
++	int ret, col, data_size, oob_size;
++	int num_cw = snandc->qspi->num_cw;
++	u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
++
++	cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
++				(num_cw - 1) << CW_PER_PAGE;
++	cfg1 = ecc_cfg->cfg1;
++	ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
++	ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
++
++	col = ecc_cfg->cw_size * (num_cw - 1);
++
++	oob_buf = snandc->qspi->data_buf;
++
++	snandc->buf_count = 0;
++	snandc->buf_start = 0;
++	qcom_clear_read_regs(snandc);
++	qcom_clear_bam_transaction(snandc);
++	snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
++	snandc->regs->addr1 = snandc->qspi->addr2;
++	snandc->regs->cmd = snandc->qspi->cmd;
++	snandc->regs->cfg0 = cpu_to_le32(cfg0);
++	snandc->regs->cfg1 = cpu_to_le32(cfg1);
++	snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
++	snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg);
++	snandc->regs->exec = cpu_to_le32(1);
++
++	/* calculate the data and oob size for the last codeword/step */
++	data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
++	oob_size = snandc->qspi->mtd->oobavail;
++
++	memset(snandc->data_buffer, 0xff, ecc_cfg->cw_data);
++	/* override new oob content to last codeword */
++	mtd_ooblayout_get_databytes(snandc->qspi->mtd, snandc->data_buffer + data_size,
++				    oob_buf, 0, snandc->qspi->mtd->oobavail);
++	qcom_spi_config_page_write(snandc);
++	qcom_write_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, data_size + oob_size, 0);
++	qcom_spi_config_cw_write(snandc);
++
++	ret = qcom_submit_descs(snandc);
++	if (ret) {
++		dev_err(snandc->dev, "failure to write oob\n");
++		return ret;
++	}
++
++	return 0;
++}
++
++static int qcom_spi_program_execute(struct qcom_nand_controller *snandc,
++				    const struct spi_mem_op *op)
++{
++	if (snandc->qspi->page_rw && snandc->qspi->raw_rw)
++		return qcom_spi_program_raw(snandc, op);
++
++	if (snandc->qspi->page_rw)
++		return qcom_spi_program_ecc(snandc, op);
++
++	if (snandc->qspi->oob_rw)
++		return qcom_spi_program_oob(snandc, op);
++
++	return 0;
++}
++
++static int qcom_spi_cmd_mapping(struct qcom_nand_controller *snandc, u32 opcode, u32 *cmd)
++{
++	switch (opcode) {
++	case SPINAND_RESET:
++		*cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_RESET_DEVICE);
++		break;
++	case SPINAND_READID:
++		*cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_FETCH_ID);
++		break;
++	case SPINAND_GET_FEATURE:
++		*cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE);
++		break;
++	case SPINAND_SET_FEATURE:
++		*cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE |
++			QPIC_SET_FEATURE);
++		break;
++	case SPINAND_READ:
++		if (snandc->qspi->raw_rw) {
++			*cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
++					SPI_WP | SPI_HOLD | OP_PAGE_READ);
++		} else {
++			*cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
++					SPI_WP | SPI_HOLD | OP_PAGE_READ_WITH_ECC);
++		}
++
++		break;
++	case SPINAND_ERASE:
++		*cmd = OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE | SPI_WP |
++			SPI_HOLD | SPI_TRANSFER_MODE_x1;
++		break;
++	case SPINAND_WRITE_EN:
++		*cmd = SPINAND_WRITE_EN;
++		break;
++	case SPINAND_PROGRAM_EXECUTE:
++		*cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
++				SPI_WP | SPI_HOLD | OP_PROGRAM_PAGE);
++		break;
++	case SPINAND_PROGRAM_LOAD:
++		*cmd = SPINAND_PROGRAM_LOAD;
++		break;
++	default:
++		dev_err(snandc->dev, "Opcode not supported: %u\n", opcode);
++		return -EOPNOTSUPP;
++	}
++
++	return 0;
++}
++
++static int qcom_spi_write_page(struct qcom_nand_controller *snandc,
++			       const struct spi_mem_op *op)
++{
++	int ret;
++	u32 cmd;
++
++	ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd);
++	if (ret < 0)
++		return ret;
++
++	if (op->cmd.opcode == SPINAND_PROGRAM_LOAD)
++		snandc->qspi->data_buf = (u8 *)op->data.buf.out;
++
++	return 0;
++}
++
++static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc,
++				 const struct spi_mem_op *op)
++{
++	struct qpic_snand_op s_op = {};
++	u32 cmd;
++	int ret, opcode;
++
++	ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd);
++	if (ret < 0)
++		return ret;
++
++	s_op.cmd_reg = cmd;
++	s_op.addr1_reg = op->addr.val;
++	s_op.addr2_reg = 0;
++
++	opcode = op->cmd.opcode;
++
++	switch (opcode) {
++	case SPINAND_WRITE_EN:
++		return 0;
++	case SPINAND_PROGRAM_EXECUTE:
++		s_op.addr1_reg = op->addr.val << 16;
++		s_op.addr2_reg = op->addr.val >> 16 & 0xff;
++		snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg);
++		snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
++		snandc->qspi->cmd = cpu_to_le32(cmd);
++		return qcom_spi_program_execute(snandc, op);
++	case SPINAND_READ:
++		s_op.addr1_reg = (op->addr.val << 16);
++		s_op.addr2_reg = op->addr.val >> 16 & 0xff;
++		snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg);
++		snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
++		snandc->qspi->cmd = cpu_to_le32(cmd);
++		return 0;
++	case SPINAND_ERASE:
++		s_op.addr2_reg = (op->addr.val >> 16) & 0xffff;
++		s_op.addr1_reg = op->addr.val;
++		snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg << 16);
++		snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
++		snandc->qspi->cmd = cpu_to_le32(cmd);
++		qcom_spi_block_erase(snandc);
++		return 0;
++	default:
++		break;
++	}
++
++	snandc->buf_count = 0;
++	snandc->buf_start = 0;
++	qcom_clear_read_regs(snandc);
++	qcom_clear_bam_transaction(snandc);
++
++	snandc->regs->cmd = cpu_to_le32(s_op.cmd_reg);
++	snandc->regs->exec = cpu_to_le32(1);
++	snandc->regs->addr0 = cpu_to_le32(s_op.addr1_reg);
++	snandc->regs->addr1 = cpu_to_le32(s_op.addr2_reg);
++
++	qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
++	qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
++
++	ret = qcom_submit_descs(snandc);
++	if (ret)
++		dev_err(snandc->dev, "failure in submitting cmd descriptor\n");
++
++	return ret;
++}
++
++static int qcom_spi_io_op(struct qcom_nand_controller *snandc, const struct spi_mem_op *op)
++{
++	int ret, val, opcode;
++	bool copy = false, copy_ftr = false;
++
++	ret = qcom_spi_send_cmdaddr(snandc, op);
++	if (ret)
++		return ret;
++
++	snandc->buf_count = 0;
++	snandc->buf_start = 0;
++	qcom_clear_read_regs(snandc);
++	qcom_clear_bam_transaction(snandc);
++	opcode = op->cmd.opcode;
++
++	switch (opcode) {
++	case SPINAND_READID:
++		snandc->buf_count = 4;
++		qcom_read_reg_dma(snandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
++		copy = true;
++		break;
++	case SPINAND_GET_FEATURE:
++		snandc->buf_count = 4;
++		qcom_read_reg_dma(snandc, NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL);
++		copy_ftr = true;
++		break;
++	case SPINAND_SET_FEATURE:
++		snandc->regs->flash_feature = cpu_to_le32(*(u32 *)op->data.buf.out);
++		qcom_write_reg_dma(snandc, &snandc->regs->flash_feature,
++				   NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL);
++		break;
++	case SPINAND_PROGRAM_EXECUTE:
++	case SPINAND_WRITE_EN:
++	case SPINAND_RESET:
++	case SPINAND_ERASE:
++	case SPINAND_READ:
++		return 0;
++	default:
++		return -EOPNOTSUPP;
++	}
++
++	ret = qcom_submit_descs(snandc);
++	if (ret)
++		dev_err(snandc->dev, "failure in submitting descriptor for:%d\n", opcode);
++
++	if (copy) {
++		qcom_nandc_dev_to_mem(snandc, true);
++		memcpy(op->data.buf.in, snandc->reg_read_buf, snandc->buf_count);
++	}
++
++	if (copy_ftr) {
++		qcom_nandc_dev_to_mem(snandc, true);
++		val = le32_to_cpu(*(__le32 *)snandc->reg_read_buf);
++		val >>= 8;
++		memcpy(op->data.buf.in, &val, snandc->buf_count);
++	}
++
++	return ret;
++}
++
++static bool qcom_spi_is_page_op(const struct spi_mem_op *op)
++{
++	if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && op->addr.buswidth != 4)
++		return false;
++
++	if (op->data.dir == SPI_MEM_DATA_IN) {
++		if (op->addr.buswidth == 4 && op->data.buswidth == 4)
++			return true;
++
++		if (op->addr.nbytes == 2 && op->addr.buswidth == 1)
++			return true;
++
++	} else if (op->data.dir == SPI_MEM_DATA_OUT) {
++		if (op->data.buswidth == 4)
++			return true;
++		if (op->addr.nbytes == 2 && op->addr.buswidth == 1)
++			return true;
++	}
++
++	return false;
++}
++
++static bool qcom_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
++{
++	if (!spi_mem_default_supports_op(mem, op))
++		return false;
++
++	if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1)
++		return false;
++
++	if (qcom_spi_is_page_op(op))
++		return true;
++
++	return ((!op->addr.nbytes || op->addr.buswidth == 1) &&
++		(!op->dummy.nbytes || op->dummy.buswidth == 1) &&
++		(!op->data.nbytes || op->data.buswidth == 1));
++}
++
++static int qcom_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
++{
++	struct qcom_nand_controller *snandc = spi_controller_get_devdata(mem->spi->controller);
++
++	dev_dbg(snandc->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
++		op->addr.val, op->addr.buswidth, op->addr.nbytes,
++		op->data.buswidth, op->data.nbytes);
++
++	if (qcom_spi_is_page_op(op)) {
++		if (op->data.dir == SPI_MEM_DATA_IN)
++			return qcom_spi_read_page(snandc, op);
++		if (op->data.dir == SPI_MEM_DATA_OUT)
++			return qcom_spi_write_page(snandc, op);
++	} else {
++		return qcom_spi_io_op(snandc, op);
++	}
++
++	return 0;
++}
++
++static const struct spi_controller_mem_ops qcom_spi_mem_ops = {
++	.supports_op = qcom_spi_supports_op,
++	.exec_op = qcom_spi_exec_op,
++};
++
++static const struct spi_controller_mem_caps qcom_spi_mem_caps = {
++	.ecc = true,
++};
++
++static int qcom_spi_probe(struct platform_device *pdev)
++{
++	struct device *dev = &pdev->dev;
++	struct spi_controller *ctlr;
++	struct qcom_nand_controller *snandc;
++	struct qpic_spi_nand *qspi;
++	struct qpic_ecc *ecc;
++	struct resource *res;
++	const void *dev_data;
++	int ret;
++
++	ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
++	if (!ecc)
++		return -ENOMEM;
++
++	qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL);
++	if (!qspi)
++		return -ENOMEM;
++
++	ctlr = __devm_spi_alloc_controller(dev, sizeof(*snandc), false);
++	if (!ctlr)
++		return -ENOMEM;
++
++	platform_set_drvdata(pdev, ctlr);
++
++	snandc = spi_controller_get_devdata(ctlr);
++	qspi->snandc = snandc;
++
++	snandc->dev = dev;
++	snandc->qspi = qspi;
++	snandc->qspi->ctlr = ctlr;
++	snandc->qspi->ecc = ecc;
++
++	dev_data = of_device_get_match_data(dev);
++	if (!dev_data) {
++		dev_err(&pdev->dev, "failed to get device data\n");
++		return -ENODEV;
++	}
++
++	snandc->props = dev_data;
++	snandc->dev = &pdev->dev;
++
++	snandc->core_clk = devm_clk_get(dev, "core");
++	if (IS_ERR(snandc->core_clk))
++		return PTR_ERR(snandc->core_clk);
++
++	snandc->aon_clk = devm_clk_get(dev, "aon");
++	if (IS_ERR(snandc->aon_clk))
++		return PTR_ERR(snandc->aon_clk);
++
++	snandc->qspi->iomacro_clk = devm_clk_get(dev, "iom");
++	if (IS_ERR(snandc->qspi->iomacro_clk))
++		return PTR_ERR(snandc->qspi->iomacro_clk);
++
++	snandc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
++	if (IS_ERR(snandc->base))
++		return PTR_ERR(snandc->base);
++
++	snandc->base_phys = res->start;
++	snandc->base_dma = dma_map_resource(dev, res->start, resource_size(res),
++					    DMA_BIDIRECTIONAL, 0);
++	if (dma_mapping_error(dev, snandc->base_dma))
++		return -ENXIO;
++
++	ret = clk_prepare_enable(snandc->core_clk);
++	if (ret)
++		goto err_dis_core_clk;
++
++	ret = clk_prepare_enable(snandc->aon_clk);
++	if (ret)
++		goto err_dis_aon_clk;
++
++	ret = clk_prepare_enable(snandc->qspi->iomacro_clk);
++	if (ret)
++		goto err_dis_iom_clk;
++
++	ret = qcom_nandc_alloc(snandc);
++	if (ret)
++		goto err_snand_alloc;
++
++	ret = qcom_spi_init(snandc);
++	if (ret)
++		goto err_spi_init;
++
++	/* setup ECC engine */
++	snandc->qspi->ecc_eng.dev = &pdev->dev;
++	snandc->qspi->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
++	snandc->qspi->ecc_eng.ops = &qcom_spi_ecc_engine_ops_pipelined;
++	snandc->qspi->ecc_eng.priv = snandc;
++
++	ret = nand_ecc_register_on_host_hw_engine(&snandc->qspi->ecc_eng);
++	if (ret) {
++		dev_err(&pdev->dev, "failed to register ecc engine:%d\n", ret);
++		goto err_spi_init;
++	}
++
++	ctlr->num_chipselect = QPIC_QSPI_NUM_CS;
++	ctlr->mem_ops = &qcom_spi_mem_ops;
++	ctlr->mem_caps = &qcom_spi_mem_caps;
++	ctlr->dev.of_node = pdev->dev.of_node;
++	ctlr->mode_bits = SPI_TX_DUAL | SPI_RX_DUAL |
++			    SPI_TX_QUAD | SPI_RX_QUAD;
++
++	ret = spi_register_controller(ctlr);
++	if (ret) {
++		dev_err(&pdev->dev, "spi_register_controller failed.\n");
++		goto err_spi_init;
++	}
++
++	return 0;
++
++err_spi_init:
++	qcom_nandc_unalloc(snandc);
++err_snand_alloc:
++	clk_disable_unprepare(snandc->qspi->iomacro_clk);
++err_dis_iom_clk:
++	clk_disable_unprepare(snandc->aon_clk);
++err_dis_aon_clk:
++	clk_disable_unprepare(snandc->core_clk);
++err_dis_core_clk:
++	dma_unmap_resource(dev, res->start, resource_size(res),
++			   DMA_BIDIRECTIONAL, 0);
++	return ret;
++}
++
++static void qcom_spi_remove(struct platform_device *pdev)
++{
++	struct spi_controller *ctlr = platform_get_drvdata(pdev);
++	struct qcom_nand_controller *snandc = spi_controller_get_devdata(ctlr);
++	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++	spi_unregister_controller(ctlr);
++
++	qcom_nandc_unalloc(snandc);
++
++	clk_disable_unprepare(snandc->aon_clk);
++	clk_disable_unprepare(snandc->core_clk);
++	clk_disable_unprepare(snandc->qspi->iomacro_clk);
++
++	dma_unmap_resource(&pdev->dev, snandc->base_dma, resource_size(res),
++			   DMA_BIDIRECTIONAL, 0);
++}
++
++static const struct qcom_nandc_props ipq9574_snandc_props = {
++	.dev_cmd_reg_start = 0x7000,
++	.supports_bam = true,
++};
++
++static const struct of_device_id qcom_snandc_of_match[] = {
++	{
++		.compatible = "qcom,ipq9574-snand",
++		.data = &ipq9574_snandc_props,
++	},
++	{}
++}
++MODULE_DEVICE_TABLE(of, qcom_snandc_of_match);
++
++static struct platform_driver qcom_spi_driver = {
++	.driver = {
++		.name		= "qcom_snand",
++		.of_match_table = qcom_snandc_of_match,
++	},
++	.probe = qcom_spi_probe,
++	.remove_new = qcom_spi_remove,
++};
++module_platform_driver(qcom_spi_driver);
++
++MODULE_DESCRIPTION("SPI driver for QPIC QSPI cores");
++MODULE_AUTHOR("Md Sadre Alam <[email protected]>");
++MODULE_LICENSE("GPL");
++
+--- a/include/linux/mtd/nand-qpic-common.h
++++ b/include/linux/mtd/nand-qpic-common.h
+@@ -325,6 +325,10 @@ struct nandc_regs {
+ 	__le32 read_location_last1;
+ 	__le32 read_location_last2;
+ 	__le32 read_location_last3;
++	__le32 spi_cfg;
++	__le32 num_addr_cycle;
++	__le32 busy_wait_cnt;
++	__le32 flash_feature;
+ 
+ 	__le32 erased_cw_detect_cfg_clr;
+ 	__le32 erased_cw_detect_cfg_set;
+@@ -339,6 +343,7 @@ struct nandc_regs {
+  *
+  * @core_clk:			controller clock
+  * @aon_clk:			another controller clock
++ * @iomacro_clk:		io macro clock
+  *
+  * @regs:			a contiguous chunk of memory for DMA register
+  *				writes. contains the register values to be
+@@ -348,6 +353,7 @@ struct nandc_regs {
+  *				initialized via DT match data
+  *
+  * @controller:			base controller structure
++ * @qspi:			qpic spi structure
+  * @host_list:			list containing all the chips attached to the
+  *				controller
+  *
+@@ -392,6 +398,7 @@ struct qcom_nand_controller {
+ 	const struct qcom_nandc_props *props;
+ 
+ 	struct nand_controller *controller;
++	struct qpic_spi_nand *qspi;
+ 	struct list_head host_list;
+ 
+ 	union {

+ 28 - 0
target/linux/generic/backport-6.6/416-v6.15-02-spi-spi-qpic-snand-Fix-ECC_CFG_ECC_DISABLE-shift-in-.patch

@@ -0,0 +1,28 @@
+From cf1ba3cb245020459f2ca446b7a7b199839f5d83 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <[email protected]>
+Date: Thu, 6 Mar 2025 12:40:01 +0300
+Subject: [PATCH] spi: spi-qpic-snand: Fix ECC_CFG_ECC_DISABLE shift in
+ qcom_spi_read_last_cw()
+
+The ECC_CFG_ECC_DISABLE define is BIT(0).  It's supposed to be used
+directly instead of used as a shifter.
+
+Fixes: 7304d1909080 ("spi: spi-qpic: add driver for QCOM SPI NAND flash Interface")
+Signed-off-by: Dan Carpenter <[email protected]>
+Link: https://patch.msgid.link/[email protected]
+Signed-off-by: Mark Brown <[email protected]>
+---
+ drivers/spi/spi-qpic-snand.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/spi/spi-qpic-snand.c
++++ b/drivers/spi/spi-qpic-snand.c
+@@ -514,7 +514,7 @@ static int qcom_spi_read_last_cw(struct
+ 	cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+ 		0 << CW_PER_PAGE;
+ 	cfg1 = ecc_cfg->cfg1_raw;
+-	ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
++	ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
+ 
+ 	snandc->regs->cmd = snandc->qspi->cmd;
+ 	snandc->regs->cfg0 = cpu_to_le32(cfg0);

+ 35 - 0
target/linux/generic/backport-6.6/416-v6.15-03-spi-spi-qpic-snand-avoid-memleak-in-qcom_spi_ecc_ini.patch

@@ -0,0 +1,35 @@
+From d450cdd9c4398add1f2aa7200f2c95f1e3b9f9fa Mon Sep 17 00:00:00 2001
+From: Gabor Juhos <[email protected]>
+Date: Thu, 13 Mar 2025 19:31:21 +0100
+Subject: [PATCH] spi: spi-qpic-snand: avoid memleak in
+ qcom_spi_ecc_init_ctx_pipelined()
+
+When the allocation of the OOB buffer fails, the
+qcom_spi_ecc_init_ctx_pipelined() function returns without freeing
+the memory allocated for 'ecc_cfg' thus it can cause a memory leak.
+
+Call kfree() to free 'ecc_cfg' before returning from the function
+to avoid that.
+
+Fixes: 7304d1909080 ("spi: spi-qpic: add driver for QCOM SPI NAND flash Interface")
+Signed-off-by: Gabor Juhos <[email protected]>
+Link: https://patch.msgid.link/[email protected]
+Signed-off-by: Mark Brown <[email protected]>
+---
+ drivers/spi/spi-qpic-snand.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/spi/spi-qpic-snand.c
++++ b/drivers/spi/spi-qpic-snand.c
+@@ -263,8 +263,10 @@ static int qcom_spi_ecc_init_ctx_pipelin
+ 		return -ENOMEM;
+ 	snandc->qspi->oob_buf = kzalloc(mtd->writesize + mtd->oobsize,
+ 					GFP_KERNEL);
+-	if (!snandc->qspi->oob_buf)
++	if (!snandc->qspi->oob_buf) {
++		kfree(ecc_cfg);
+ 		return -ENOMEM;
++	}
+ 
+ 	memset(snandc->qspi->oob_buf, 0xff, mtd->writesize + mtd->oobsize);
+ 

+ 49 - 0
target/linux/generic/backport-6.6/416-v6.15-04-spi-SPI_QPIC_SNAND-should-be-tristate-and-depend-on-.patch

@@ -0,0 +1,49 @@
+From d32c4e58545f17caaa854415f854691e32d42075 Mon Sep 17 00:00:00 2001
+From: Geert Uytterhoeven <[email protected]>
+Date: Wed, 26 Mar 2025 15:22:19 +0100
+Subject: [PATCH] spi: SPI_QPIC_SNAND should be tristate and depend on MTD
+
+SPI_QPIC_SNAND is the only driver that selects MTD instead of depending
+on it, which could lead to circular dependencies.  Moreover, as
+SPI_QPIC_SNAND is bool, this forces MTD (and various related symbols) to
+be built-in, as can be seen in an allmodconfig kernel.
+
+Except for a missing semicolon, there is no reason why SPI_QPIC_SNAND
+cannot be tristate; all MODULE_*() boilerplate is already present.
+Hence make SPI_QPIC_SNAND tristate, let it depend on MTD, and add the
+missing semicolon.
+
+Fixes: 7304d1909080ef0c ("spi: spi-qpic: add driver for QCOM SPI NAND flash Interface")
+Signed-off-by: Geert Uytterhoeven <[email protected]>
+Link: https://patch.msgid.link/b63db431cbf35223a4400e44c296293d32c4543c.1742998909.git.geert+renesas@glider.be
+Signed-off-by: Mark Brown <[email protected]>
+---
+ drivers/spi/Kconfig          | 4 ++--
+ drivers/spi/spi-qpic-snand.c | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -871,9 +871,9 @@ config SPI_QCOM_QSPI
+ 	  QSPI(Quad SPI) driver for Qualcomm QSPI controller.
+ 
+ config SPI_QPIC_SNAND
+-	bool "QPIC SNAND controller"
++	tristate "QPIC SNAND controller"
+ 	depends on ARCH_QCOM || COMPILE_TEST
+-	select MTD
++	depends on MTD
+ 	help
+ 	  QPIC_SNAND (QPIC SPI NAND) driver for Qualcomm QPIC controller.
+ 	  QPIC controller supports both parallel nand and serial nand.
+--- a/drivers/spi/spi-qpic-snand.c
++++ b/drivers/spi/spi-qpic-snand.c
+@@ -1614,7 +1614,7 @@ static const struct of_device_id qcom_sn
+ 		.data = &ipq9574_snandc_props,
+ 	},
+ 	{}
+-}
++};
+ MODULE_DEVICE_TABLE(of, qcom_snandc_of_match);
+ 
+ static struct platform_driver qcom_spi_driver = {

+ 29 - 0
target/linux/generic/backport-6.6/416-v6.15-05-spi-spi-qpic-snand-use-kmalloc-for-OOB-buffer-alloca.patch

@@ -0,0 +1,29 @@
+From f48d80503504257682e493dc17408f2f0b47bcfa Mon Sep 17 00:00:00 2001
+From: Gabor Juhos <[email protected]>
+Date: Thu, 20 Mar 2025 19:11:59 +0100
+Subject: [PATCH] spi: spi-qpic-snand: use kmalloc() for OOB buffer allocation
+
+The qcom_spi_ecc_init_ctx_pipelined() function allocates zeroed
+memory for the OOB buffer, then it fills the buffer with '0xff'
+bytes right after the allocation. In this case zeroing the memory
+during allocation is superfluous, so use kmalloc() instead of
+kzalloc() to avoid that.
+
+Signed-off-by: Gabor Juhos <[email protected]>
+Link: https://patch.msgid.link/[email protected]
+Signed-off-by: Mark Brown <[email protected]>
+---
+ drivers/spi/spi-qpic-snand.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/spi/spi-qpic-snand.c
++++ b/drivers/spi/spi-qpic-snand.c
+@@ -261,7 +261,7 @@ static int qcom_spi_ecc_init_ctx_pipelin
+ 	ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
+ 	if (!ecc_cfg)
+ 		return -ENOMEM;
+-	snandc->qspi->oob_buf = kzalloc(mtd->writesize + mtd->oobsize,
++	snandc->qspi->oob_buf = kmalloc(mtd->writesize + mtd->oobsize,
+ 					GFP_KERNEL);
+ 	if (!snandc->qspi->oob_buf) {
+ 		kfree(ecc_cfg);

+ 31 - 0
target/linux/generic/backport-6.6/417-v6.12-mtd-spinand-set-bitflip_threshold-to-75-of-ECC-stren.patch

@@ -0,0 +1,31 @@
+From 1824520e7477bedf76bd08c32261c755e6405cd9 Mon Sep 17 00:00:00 2001
+From: Daniel Golle <[email protected]>
+Date: Mon, 12 Aug 2024 02:56:41 +0100
+Subject: [PATCH] mtd: spinand: set bitflip_threshold to 75% of ECC strength
+
+Reporting an unclean read from SPI-NAND only when the maximum number
+of correctable bitflip errors has been hit seems a bit late.
+UBI LEB scrubbing, which depends on the lower MTD device reporting
+correctable bitflips, then only kicks in when it's almost too late.
+
+Set bitflip_threshold to 75% of the ECC strength, which is also the
+default for raw NAND.
+
+Signed-off-by: Daniel Golle <[email protected]>
+Reviewed-by: Frieder Schrempf <[email protected]>
+Signed-off-by: Miquel Raynal <[email protected]>
+Link: https://lore.kernel.org/linux-mtd/2117e387260b0a96f95b8e1652ff79e0e2d71d53.1723427450.git.daniel@makrotopia.org
+---
+ drivers/mtd/nand/spi/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/mtd/nand/spi/core.c
++++ b/drivers/mtd/nand/spi/core.c
+@@ -1287,6 +1287,7 @@ static int spinand_init(struct spinand_d
+ 	/* Propagate ECC information to mtd_info */
+ 	mtd->ecc_strength = nanddev_get_ecc_conf(nand)->strength;
+ 	mtd->ecc_step_size = nanddev_get_ecc_conf(nand)->step_size;
++	mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
+ 
+ 	ret = spinand_create_dirmaps(spinand);
+ 	if (ret) {

+ 65 - 0
target/linux/generic/backport-6.6/418-v6.12-mtd-spinand-winbond-add-support-for-W25N01KV.patch

@@ -0,0 +1,65 @@
+From e2a9fcb36e851adb5b25c4acea53a290fd48a636 Mon Sep 17 00:00:00 2001
+From: Robert Marko <[email protected]>
+Date: Mon, 5 Aug 2024 19:51:02 +0200
+Subject: [PATCH] mtd: spinand: winbond: add support for W25N01KV
+
+Add support for Winbond W25N01KV 1Gbit SPI-NAND.
+
+It has 4-bit on-die ECC.
+
+Signed-off-by: Robert Marko <[email protected]>
+Signed-off-by: Miquel Raynal <[email protected]>
+Link: https://lore.kernel.org/linux-mtd/[email protected]
+---
+ drivers/mtd/nand/spi/winbond.c | 26 ++++++++++++++++++++++++++
+ 1 file changed, 26 insertions(+)
+
+--- a/drivers/mtd/nand/spi/winbond.c
++++ b/drivers/mtd/nand/spi/winbond.c
+@@ -74,6 +74,18 @@ static int w25m02gv_select_target(struct
+ 	return spi_mem_exec_op(spinand->spimem, &op);
+ }
+ 
++static int w25n01kv_ooblayout_ecc(struct mtd_info *mtd, int section,
++				  struct mtd_oob_region *region)
++{
++	if (section > 3)
++		return -ERANGE;
++
++	region->offset = 64 + (8 * section);
++	region->length = 7;
++
++	return 0;
++}
++
+ static int w25n02kv_ooblayout_ecc(struct mtd_info *mtd, int section,
+ 				  struct mtd_oob_region *region)
+ {
+@@ -98,6 +110,11 @@ static int w25n02kv_ooblayout_free(struc
+ 	return 0;
+ }
+ 
++static const struct mtd_ooblayout_ops w25n01kv_ooblayout = {
++	.ecc = w25n01kv_ooblayout_ecc,
++	.free = w25n02kv_ooblayout_free,
++};
++
+ static const struct mtd_ooblayout_ops w25n02kv_ooblayout = {
+ 	.ecc = w25n02kv_ooblayout_ecc,
+ 	.free = w25n02kv_ooblayout_free,
+@@ -160,6 +177,15 @@ static const struct spinand_info winbond
+ 					      &update_cache_variants),
+ 		     0,
+ 		     SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
++	SPINAND_INFO("W25N01KV",
++		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xae, 0x21),
++		     NAND_MEMORG(1, 2048, 96, 64, 1024, 20, 1, 1, 1),
++		     NAND_ECCREQ(4, 512),
++		     SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
++					      &write_cache_variants,
++					      &update_cache_variants),
++		     0,
++		     SPINAND_ECCINFO(&w25n01kv_ooblayout, w25n02kv_ecc_get_status)),
+ 	SPINAND_INFO("W25N02KV",
+ 		     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xaa, 0x22),
+ 		     NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),

+ 75 - 0
target/linux/generic/backport-6.6/600-v6.10-net-Remove-conditional-threaded-NAPI-wakeup-based-on.patch

@@ -0,0 +1,75 @@
+From 56364c910691f6d10ba88c964c9041b9ab777bd6 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <[email protected]>
+Date: Mon, 25 Mar 2024 08:40:28 +0100
+Subject: [PATCH 1/4] net: Remove conditional threaded-NAPI wakeup based on
+ task state.
+
+A NAPI thread is scheduled by first setting NAPI_STATE_SCHED bit. If
+successful (the bit was not yet set) then the NAPI_STATE_SCHED_THREADED
+is set but only if thread's state is not TASK_INTERRUPTIBLE (is
+TASK_RUNNING) followed by task wakeup.
+
+If the task is idle (TASK_INTERRUPTIBLE) then the
+NAPI_STATE_SCHED_THREADED bit is not set. The thread is no relying on
+the bit but always leaving the wait-loop after returning from schedule()
+because there must have been a wakeup.
+
+The smpboot-threads implementation for per-CPU threads requires an
+explicit condition and does not support "if we get out of schedule()
+then there must be something to do".
+
+Removing this optimisation simplifies the following integration.
+
+Set NAPI_STATE_SCHED_THREADED unconditionally on wakeup and rely on it
+in the wait path by removing the `woken' condition.
+
+Acked-by: Jakub Kicinski <[email protected]>
+Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
+Signed-off-by: Paolo Abeni <[email protected]>
+---
+ net/core/dev.c | 14 ++------------
+ 1 file changed, 2 insertions(+), 12 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4514,13 +4514,7 @@ static inline void ____napi_schedule(str
+ 		 */
+ 		thread = READ_ONCE(napi->thread);
+ 		if (thread) {
+-			/* Avoid doing set_bit() if the thread is in
+-			 * INTERRUPTIBLE state, cause napi_thread_wait()
+-			 * makes sure to proceed with napi polling
+-			 * if the thread is explicitly woken from here.
+-			 */
+-			if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
+-				set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
++			set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
+ 			wake_up_process(thread);
+ 			return;
+ 		}
+@@ -6676,8 +6670,6 @@ static int napi_poll(struct napi_struct
+ 
+ static int napi_thread_wait(struct napi_struct *napi)
+ {
+-	bool woken = false;
+-
+ 	set_current_state(TASK_INTERRUPTIBLE);
+ 
+ 	while (!kthread_should_stop()) {
+@@ -6686,15 +6678,13 @@ static int napi_thread_wait(struct napi_
+ 		 * Testing SCHED bit is not enough because SCHED bit might be
+ 		 * set by some other busy poll thread or by napi_disable().
+ 		 */
+-		if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
++		if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state)) {
+ 			WARN_ON(!list_empty(&napi->poll_list));
+ 			__set_current_state(TASK_RUNNING);
+ 			return 0;
+ 		}
+ 
+ 		schedule();
+-		/* woken being true indicates this thread owns this napi. */
+-		woken = true;
+ 		set_current_state(TASK_INTERRUPTIBLE);
+ 	}
+ 	__set_current_state(TASK_RUNNING);

+ 330 - 0
target/linux/generic/backport-6.6/601-v6.10-net-Allow-to-use-SMP-threads-for-backlog-NAPI.patch

@@ -0,0 +1,330 @@
+From dad6b97702639fba27a2bd3e986982ad6f0db3a7 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <[email protected]>
+Date: Mon, 25 Mar 2024 08:40:29 +0100
+Subject: [PATCH 2/4] net: Allow to use SMP threads for backlog NAPI.
+
+Backlog NAPI is a per-CPU NAPI struct only (with no device behind it)
+used by drivers which don't do NAPI them self, RPS and parts of the
+stack which need to avoid recursive deadlocks while processing a packet.
+
+The non-NAPI driver use the CPU local backlog NAPI. If RPS is enabled
+then a flow for the skb is computed and based on the flow the skb can be
+enqueued on a remote CPU. Scheduling/ raising the softirq (for backlog's
+NAPI) on the remote CPU isn't trivial because the softirq is only
+scheduled on the local CPU and performed after the hardirq is done.
+In order to schedule a softirq on the remote CPU, an IPI is sent to the
+remote CPU which schedules the backlog-NAPI on the then local CPU.
+
+On PREEMPT_RT interrupts are force-threaded. The soft interrupts are
+raised within the interrupt thread and processed after the interrupt
+handler completed still within the context of the interrupt thread. The
+softirq is handled in the context where it originated.
+
+With force-threaded interrupts enabled, ksoftirqd is woken up if a
+softirq is raised from hardirq context. This is the case if it is raised
+from an IPI. Additionally there is a warning on PREEMPT_RT if the
+softirq is raised from the idle thread.
+This was done for two reasons:
+- With threaded interrupts the processing should happen in thread
+  context (where it originated) and ksoftirqd is the only thread for
+  this context if raised from hardirq. Using the currently running task
+  instead would "punish" a random task.
+- Once ksoftirqd is active it consumes all further softirqs until it
+  stops running. This changed recently and is no longer the case.
+
+Instead of keeping the backlog NAPI in ksoftirqd (in force-threaded/
+PREEMPT_RT setups) I am proposing NAPI-threads for backlog.
+The "proper" setup with threaded-NAPI is not doable because the threads
+are not pinned to an individual CPU and can be modified by the user.
+Additionally a dummy network device would have to be assigned. Also
+CPU-hotplug has to be considered if additional CPUs show up.
+All this can be probably done/ solved but the smpboot-threads already
+provide this infrastructure.
+
+Sending UDP packets over loopback expects that the packet is processed
+within the call. Delaying it by handing it over to the thread hurts
+performance. It is not beneficial to the outcome if the context switch
+happens immediately after enqueue or after a while to process a few
+packets in a batch.
+There is no need to always use the thread if the backlog NAPI is
+requested on the local CPU. This restores the loopback throuput. The
+performance drops mostly to the same value after enabling RPS on the
+loopback comparing the IPI and the tread result.
+
+Create NAPI-threads for backlog if request during boot. The thread runs
+the inner loop from napi_threaded_poll(), the wait part is different. It
+checks for NAPI_STATE_SCHED (the backlog NAPI can not be disabled).
+
+The NAPI threads for backlog are optional, it has to be enabled via the boot
+argument "thread_backlog_napi". It is mandatory for PREEMPT_RT to avoid the
+wakeup of ksoftirqd from the IPI.
+
+Acked-by: Jakub Kicinski <[email protected]>
+Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
+Signed-off-by: Paolo Abeni <[email protected]>
+---
+ net/core/dev.c | 148 +++++++++++++++++++++++++++++++++++++------------
+ 1 file changed, 113 insertions(+), 35 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -78,6 +78,7 @@
+ #include <linux/slab.h>
+ #include <linux/sched.h>
+ #include <linux/sched/mm.h>
++#include <linux/smpboot.h>
+ #include <linux/mutex.h>
+ #include <linux/rwsem.h>
+ #include <linux/string.h>
+@@ -217,6 +218,31 @@ static inline struct hlist_head *dev_ind
+ 	return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
+ }
+ 
++#ifndef CONFIG_PREEMPT_RT
++
++static DEFINE_STATIC_KEY_FALSE(use_backlog_threads_key);
++
++static int __init setup_backlog_napi_threads(char *arg)
++{
++	static_branch_enable(&use_backlog_threads_key);
++	return 0;
++}
++early_param("thread_backlog_napi", setup_backlog_napi_threads);
++
++static bool use_backlog_threads(void)
++{
++	return static_branch_unlikely(&use_backlog_threads_key);
++}
++
++#else
++
++static bool use_backlog_threads(void)
++{
++	return true;
++}
++
++#endif
++
+ static inline void rps_lock_irqsave(struct softnet_data *sd,
+ 				    unsigned long *flags)
+ {
+@@ -4482,6 +4508,7 @@ EXPORT_SYMBOL(__dev_direct_xmit);
+ /*************************************************************************
+  *			Receiver routines
+  *************************************************************************/
++static DEFINE_PER_CPU(struct task_struct *, backlog_napi);
+ 
+ int netdev_max_backlog __read_mostly = 1000;
+ EXPORT_SYMBOL(netdev_max_backlog);
+@@ -4514,12 +4541,16 @@ static inline void ____napi_schedule(str
+ 		 */
+ 		thread = READ_ONCE(napi->thread);
+ 		if (thread) {
++			if (use_backlog_threads() && thread == raw_cpu_read(backlog_napi))
++				goto use_local_napi;
++
+ 			set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
+ 			wake_up_process(thread);
+ 			return;
+ 		}
+ 	}
+ 
++use_local_napi:
+ 	list_add_tail(&napi->poll_list, &sd->poll_list);
+ 	WRITE_ONCE(napi->list_owner, smp_processor_id());
+ 	/* If not called from net_rx_action()
+@@ -4765,6 +4796,11 @@ static void napi_schedule_rps(struct sof
+ 
+ #ifdef CONFIG_RPS
+ 	if (sd != mysd) {
++		if (use_backlog_threads()) {
++			__napi_schedule_irqoff(&sd->backlog);
++			return;
++		}
++
+ 		sd->rps_ipi_next = mysd->rps_ipi_list;
+ 		mysd->rps_ipi_list = sd;
+ 
+@@ -5988,7 +6024,7 @@ static void net_rps_action_and_irq_enabl
+ #ifdef CONFIG_RPS
+ 	struct softnet_data *remsd = sd->rps_ipi_list;
+ 
+-	if (remsd) {
++	if (!use_backlog_threads() && remsd) {
+ 		sd->rps_ipi_list = NULL;
+ 
+ 		local_irq_enable();
+@@ -6003,7 +6039,7 @@ static void net_rps_action_and_irq_enabl
+ static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
+ {
+ #ifdef CONFIG_RPS
+-	return sd->rps_ipi_list != NULL;
++	return !use_backlog_threads() && sd->rps_ipi_list;
+ #else
+ 	return false;
+ #endif
+@@ -6047,7 +6083,7 @@ static int process_backlog(struct napi_s
+ 			 * We can use a plain write instead of clear_bit(),
+ 			 * and we dont need an smp_mb() memory barrier.
+ 			 */
+-			napi->state = 0;
++			napi->state &= NAPIF_STATE_THREADED;
+ 			again = false;
+ 		} else {
+ 			skb_queue_splice_tail_init(&sd->input_pkt_queue,
+@@ -6713,43 +6749,48 @@ static void skb_defer_free_flush(struct
+ 	}
+ }
+ 
+-static int napi_threaded_poll(void *data)
++static void napi_threaded_poll_loop(struct napi_struct *napi)
+ {
+-	struct napi_struct *napi = data;
+ 	struct softnet_data *sd;
+-	void *have;
++	unsigned long last_qs = jiffies;
+ 
+-	while (!napi_thread_wait(napi)) {
+-		unsigned long last_qs = jiffies;
++	for (;;) {
++		bool repoll = false;
++		void *have;
+ 
+-		for (;;) {
+-			bool repoll = false;
++		local_bh_disable();
++		sd = this_cpu_ptr(&softnet_data);
++		sd->in_napi_threaded_poll = true;
+ 
+-			local_bh_disable();
+-			sd = this_cpu_ptr(&softnet_data);
+-			sd->in_napi_threaded_poll = true;
+-
+-			have = netpoll_poll_lock(napi);
+-			__napi_poll(napi, &repoll);
+-			netpoll_poll_unlock(have);
+-
+-			sd->in_napi_threaded_poll = false;
+-			barrier();
+-
+-			if (sd_has_rps_ipi_waiting(sd)) {
+-				local_irq_disable();
+-				net_rps_action_and_irq_enable(sd);
+-			}
+-			skb_defer_free_flush(sd);
+-			local_bh_enable();
++		have = netpoll_poll_lock(napi);
++		__napi_poll(napi, &repoll);
++		netpoll_poll_unlock(have);
++
++		sd->in_napi_threaded_poll = false;
++		barrier();
++
++		if (sd_has_rps_ipi_waiting(sd)) {
++			local_irq_disable();
++			net_rps_action_and_irq_enable(sd);
++		}
++		skb_defer_free_flush(sd);
++		local_bh_enable();
+ 
+-			if (!repoll)
+-				break;
++		if (!repoll)
++			break;
+ 
+-			rcu_softirq_qs_periodic(last_qs);
+-			cond_resched();
+-		}
++		rcu_softirq_qs_periodic(last_qs);
++		cond_resched();
+ 	}
++}
++
++static int napi_threaded_poll(void *data)
++{
++	struct napi_struct *napi = data;
++
++	while (!napi_thread_wait(napi))
++		napi_threaded_poll_loop(napi);
++
+ 	return 0;
+ }
+ 
+@@ -11334,7 +11375,7 @@ static int dev_cpu_dead(unsigned int old
+ 
+ 		list_del_init(&napi->poll_list);
+ 		if (napi->poll == process_backlog)
+-			napi->state = 0;
++			napi->state &= NAPIF_STATE_THREADED;
+ 		else
+ 			____napi_schedule(sd, napi);
+ 	}
+@@ -11342,12 +11383,14 @@ static int dev_cpu_dead(unsigned int old
+ 	raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ 	local_irq_enable();
+ 
++	if (!use_backlog_threads()) {
+ #ifdef CONFIG_RPS
+-	remsd = oldsd->rps_ipi_list;
+-	oldsd->rps_ipi_list = NULL;
++		remsd = oldsd->rps_ipi_list;
++		oldsd->rps_ipi_list = NULL;
+ #endif
+-	/* send out pending IPI's on offline CPU */
+-	net_rps_send_ipi(remsd);
++		/* send out pending IPI's on offline CPU */
++		net_rps_send_ipi(remsd);
++	}
+ 
+ 	/* Process offline CPU's input_pkt_queue */
+ 	while ((skb = __skb_dequeue(&oldsd->process_queue))) {
+@@ -11610,6 +11653,38 @@ static struct pernet_operations __net_in
+  *
+  */
+ 
++static int backlog_napi_should_run(unsigned int cpu)
++{
++	struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
++	struct napi_struct *napi = &sd->backlog;
++
++	return test_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
++}
++
++static void run_backlog_napi(unsigned int cpu)
++{
++	struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
++
++	napi_threaded_poll_loop(&sd->backlog);
++}
++
++static void backlog_napi_setup(unsigned int cpu)
++{
++	struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu);
++	struct napi_struct *napi = &sd->backlog;
++
++	napi->thread = this_cpu_read(backlog_napi);
++	set_bit(NAPI_STATE_THREADED, &napi->state);
++}
++
++static struct smp_hotplug_thread backlog_threads = {
++	.store			= &backlog_napi,
++	.thread_should_run	= backlog_napi_should_run,
++	.thread_fn		= run_backlog_napi,
++	.thread_comm		= "backlog_napi/%u",
++	.setup			= backlog_napi_setup,
++};
++
+ /*
+  *       This is called single threaded during boot, so no need
+  *       to take the rtnl semaphore.
+@@ -11660,7 +11735,10 @@ static int __init net_dev_init(void)
+ 		init_gro_hash(&sd->backlog);
+ 		sd->backlog.poll = process_backlog;
+ 		sd->backlog.weight = weight_p;
++		INIT_LIST_HEAD(&sd->backlog.poll_list);
+ 	}
++	if (use_backlog_threads())
++		smpboot_register_percpu_thread(&backlog_threads);
+ 
+ 	dev_boot_phase = 0;
+ 

+ 121 - 0
target/linux/generic/backport-6.6/602-v6.10-net-Use-backlog-NAPI-to-clean-up-the-defer_list.patch

@@ -0,0 +1,121 @@
+From 80d2eefcb4c84aa9018b2a997ab3a4c567bc821a Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <[email protected]>
+Date: Mon, 25 Mar 2024 08:40:30 +0100
+Subject: [PATCH 3/4] net: Use backlog-NAPI to clean up the defer_list.
+
+The defer_list is a per-CPU list which is used to free skbs outside of
+the socket lock and on the CPU on which they have been allocated.
+The list is processed during NAPI callbacks so ideally the list is
+cleaned up.
+Should the amount of skbs on the list exceed a certain water mark then
+the softirq is triggered remotely on the target CPU by invoking a remote
+function call. The raise of the softirqs via a remote function call
+leads to waking the ksoftirqd on PREEMPT_RT which is undesired.
+The backlog-NAPI threads already provide the infrastructure which can be
+utilized to perform the cleanup of the defer_list.
+
+The NAPI state is updated with the input_pkt_queue.lock acquired. It
+order not to break the state, it is needed to also wake the backlog-NAPI
+thread with the lock held. This requires to acquire the use the lock in
+rps_lock_irq*() if the backlog-NAPI threads are used even with RPS
+disabled.
+
+Move the logic of remotely starting softirqs to clean up the defer_list
+into kick_defer_list_purge(). Make sure a lock is held in
+rps_lock_irq*() if backlog-NAPI threads are used. Schedule backlog-NAPI
+for defer_list cleanup if backlog-NAPI is available.
+
+Acked-by: Jakub Kicinski <[email protected]>
+Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
+Signed-off-by: Paolo Abeni <[email protected]>
+---
+ include/linux/netdevice.h |  1 +
+ net/core/dev.c            | 25 +++++++++++++++++++++----
+ net/core/skbuff.c         |  4 ++--
+ 3 files changed, 24 insertions(+), 6 deletions(-)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3308,6 +3308,7 @@ static inline void dev_xmit_recursion_de
+ 	__this_cpu_dec(softnet_data.xmit.recursion);
+ }
+ 
++void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu);
+ void __netif_schedule(struct Qdisc *q);
+ void netif_schedule_queue(struct netdev_queue *txq);
+ 
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -246,7 +246,7 @@ static bool use_backlog_threads(void)
+ static inline void rps_lock_irqsave(struct softnet_data *sd,
+ 				    unsigned long *flags)
+ {
+-	if (IS_ENABLED(CONFIG_RPS))
++	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ 		spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
+ 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ 		local_irq_save(*flags);
+@@ -254,7 +254,7 @@ static inline void rps_lock_irqsave(stru
+ 
+ static inline void rps_lock_irq_disable(struct softnet_data *sd)
+ {
+-	if (IS_ENABLED(CONFIG_RPS))
++	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ 		spin_lock_irq(&sd->input_pkt_queue.lock);
+ 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ 		local_irq_disable();
+@@ -263,7 +263,7 @@ static inline void rps_lock_irq_disable(
+ static inline void rps_unlock_irq_restore(struct softnet_data *sd,
+ 					  unsigned long *flags)
+ {
+-	if (IS_ENABLED(CONFIG_RPS))
++	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ 		spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
+ 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ 		local_irq_restore(*flags);
+@@ -271,7 +271,7 @@ static inline void rps_unlock_irq_restor
+ 
+ static inline void rps_unlock_irq_enable(struct softnet_data *sd)
+ {
+-	if (IS_ENABLED(CONFIG_RPS))
++	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ 		spin_unlock_irq(&sd->input_pkt_queue.lock);
+ 	else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ 		local_irq_enable();
+@@ -4815,6 +4815,23 @@ static void napi_schedule_rps(struct sof
+ 	__napi_schedule_irqoff(&mysd->backlog);
+ }
+ 
++void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu)
++{
++	unsigned long flags;
++
++	if (use_backlog_threads()) {
++		rps_lock_irqsave(sd, &flags);
++
++		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
++			__napi_schedule_irqoff(&sd->backlog);
++
++		rps_unlock_irq_restore(sd, &flags);
++
++	} else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
++		smp_call_function_single_async(cpu, &sd->defer_csd);
++	}
++}
++
+ #ifdef CONFIG_NET_FLOW_LIMIT
+ int netdev_flow_limit_table_len __read_mostly = (1 << 12);
+ #endif
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -6863,8 +6863,8 @@ nodefer:	__kfree_skb(skb);
+ 	/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
+ 	 * if we are unlucky enough (this seems very unlikely).
+ 	 */
+-	if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
+-		smp_call_function_single_async(cpu, &sd->defer_csd);
++	if (unlikely(kick))
++		kick_defer_list_purge(sd, cpu);
+ }
+ 
+ static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,

+ 164 - 0
target/linux/generic/backport-6.6/603-v6.10-net-Rename-rps_lock-to-backlog_lock.patch

@@ -0,0 +1,164 @@
+From 765b11f8f4e20b7433e4ba4a3e9106a0d59501ed Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <[email protected]>
+Date: Mon, 25 Mar 2024 08:40:31 +0100
+Subject: [PATCH 4/4] net: Rename rps_lock to backlog_lock.
+
+The rps_lock.*() functions use the inner lock of a sk_buff_head for
+locking. This lock is used if RPS is enabled, otherwise the list is
+accessed lockless and disabling interrupts is enough for the
+synchronisation because it is only accessed CPU local. Not only the list
+is protected but also the NAPI state protected.
+With the addition of backlog threads, the lock is also needed because of
+the cross CPU access even without RPS. The clean up of the defer_list
+list is also done via backlog threads (if enabled).
+
+It has been suggested to rename the locking function since it is no
+longer just RPS.
+
+Rename the rps_lock*() functions to backlog_lock*().
+
+Suggested-by: Jakub Kicinski <[email protected]>
+Acked-by: Jakub Kicinski <[email protected]>
+Signed-off-by: Sebastian Andrzej Siewior <[email protected]>
+Signed-off-by: Paolo Abeni <[email protected]>
+---
+ net/core/dev.c | 34 +++++++++++++++++-----------------
+ 1 file changed, 17 insertions(+), 17 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -243,8 +243,8 @@ static bool use_backlog_threads(void)
+ 
+ #endif
+ 
+-static inline void rps_lock_irqsave(struct softnet_data *sd,
+-				    unsigned long *flags)
++static inline void backlog_lock_irq_save(struct softnet_data *sd,
++					 unsigned long *flags)
+ {
+ 	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ 		spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
+@@ -252,7 +252,7 @@ static inline void rps_lock_irqsave(stru
+ 		local_irq_save(*flags);
+ }
+ 
+-static inline void rps_lock_irq_disable(struct softnet_data *sd)
++static inline void backlog_lock_irq_disable(struct softnet_data *sd)
+ {
+ 	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ 		spin_lock_irq(&sd->input_pkt_queue.lock);
+@@ -260,8 +260,8 @@ static inline void rps_lock_irq_disable(
+ 		local_irq_disable();
+ }
+ 
+-static inline void rps_unlock_irq_restore(struct softnet_data *sd,
+-					  unsigned long *flags)
++static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
++					      unsigned long *flags)
+ {
+ 	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ 		spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
+@@ -269,7 +269,7 @@ static inline void rps_unlock_irq_restor
+ 		local_irq_restore(*flags);
+ }
+ 
+-static inline void rps_unlock_irq_enable(struct softnet_data *sd)
++static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
+ {
+ 	if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
+ 		spin_unlock_irq(&sd->input_pkt_queue.lock);
+@@ -4820,12 +4820,12 @@ void kick_defer_list_purge(struct softne
+ 	unsigned long flags;
+ 
+ 	if (use_backlog_threads()) {
+-		rps_lock_irqsave(sd, &flags);
++		backlog_lock_irq_save(sd, &flags);
+ 
+ 		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
+ 			__napi_schedule_irqoff(&sd->backlog);
+ 
+-		rps_unlock_irq_restore(sd, &flags);
++		backlog_unlock_irq_restore(sd, &flags);
+ 
+ 	} else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
+ 		smp_call_function_single_async(cpu, &sd->defer_csd);
+@@ -4887,7 +4887,7 @@ static int enqueue_to_backlog(struct sk_
+ 	reason = SKB_DROP_REASON_NOT_SPECIFIED;
+ 	sd = &per_cpu(softnet_data, cpu);
+ 
+-	rps_lock_irqsave(sd, &flags);
++	backlog_lock_irq_save(sd, &flags);
+ 	if (!netif_running(skb->dev))
+ 		goto drop;
+ 	qlen = skb_queue_len(&sd->input_pkt_queue);
+@@ -4896,7 +4896,7 @@ static int enqueue_to_backlog(struct sk_
+ enqueue:
+ 			__skb_queue_tail(&sd->input_pkt_queue, skb);
+ 			input_queue_tail_incr_save(sd, qtail);
+-			rps_unlock_irq_restore(sd, &flags);
++			backlog_unlock_irq_restore(sd, &flags);
+ 			return NET_RX_SUCCESS;
+ 		}
+ 
+@@ -4911,7 +4911,7 @@ enqueue:
+ 
+ drop:
+ 	sd->dropped++;
+-	rps_unlock_irq_restore(sd, &flags);
++	backlog_unlock_irq_restore(sd, &flags);
+ 
+ 	dev_core_stats_rx_dropped_inc(skb->dev);
+ 	kfree_skb_reason(skb, reason);
+@@ -5942,7 +5942,7 @@ static void flush_backlog(struct work_st
+ 	local_bh_disable();
+ 	sd = this_cpu_ptr(&softnet_data);
+ 
+-	rps_lock_irq_disable(sd);
++	backlog_lock_irq_disable(sd);
+ 	skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
+ 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
+ 			__skb_unlink(skb, &sd->input_pkt_queue);
+@@ -5950,7 +5950,7 @@ static void flush_backlog(struct work_st
+ 			input_queue_head_incr(sd);
+ 		}
+ 	}
+-	rps_unlock_irq_enable(sd);
++	backlog_unlock_irq_enable(sd);
+ 
+ 	skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
+ 		if (skb->dev->reg_state == NETREG_UNREGISTERING) {
+@@ -5968,14 +5968,14 @@ static bool flush_required(int cpu)
+ 	struct softnet_data *sd = &per_cpu(softnet_data, cpu);
+ 	bool do_flush;
+ 
+-	rps_lock_irq_disable(sd);
++	backlog_lock_irq_disable(sd);
+ 
+ 	/* as insertion into process_queue happens with the rps lock held,
+ 	 * process_queue access may race only with dequeue
+ 	 */
+ 	do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
+ 		   !skb_queue_empty_lockless(&sd->process_queue);
+-	rps_unlock_irq_enable(sd);
++	backlog_unlock_irq_enable(sd);
+ 
+ 	return do_flush;
+ #endif
+@@ -6090,7 +6090,7 @@ static int process_backlog(struct napi_s
+ 
+ 		}
+ 
+-		rps_lock_irq_disable(sd);
++		backlog_lock_irq_disable(sd);
+ 		if (skb_queue_empty(&sd->input_pkt_queue)) {
+ 			/*
+ 			 * Inline a custom version of __napi_complete().
+@@ -6106,7 +6106,7 @@ static int process_backlog(struct napi_s
+ 			skb_queue_splice_tail_init(&sd->input_pkt_queue,
+ 						   &sd->process_queue);
+ 		}
+-		rps_unlock_irq_enable(sd);
++		backlog_unlock_irq_enable(sd);
+ 	}
+ 
+ 	return work;

+ 30 - 0
target/linux/generic/backport-6.6/610-v6.9-net-mdio-add-2.5g-and-5g-related-PMA-speed-constants.patch

@@ -0,0 +1,30 @@
+From 6c06c88fa838fcc1b7e5380facd086f57fd9d1c4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Beh=C3=BAn?= <[email protected]>
+Date: Sun, 4 Feb 2024 15:16:46 +0100
+Subject: [PATCH] net: mdio: add 2.5g and 5g related PMA speed constants
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add constants indicating 2.5g and 5g ability in the MMD PMA speed
+register.
+
+Signed-off-by: Marek Behún <[email protected]>
+Signed-off-by: Heiner Kallweit <[email protected]>
+Link: https://lore.kernel.org/r/[email protected]
+Signed-off-by: Jakub Kicinski <[email protected]>
+---
+ include/uapi/linux/mdio.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/include/uapi/linux/mdio.h
++++ b/include/uapi/linux/mdio.h
+@@ -138,6 +138,8 @@
+ #define MDIO_PMA_SPEED_1000		0x0010	/* 1000M capable */
+ #define MDIO_PMA_SPEED_100		0x0020	/* 100M capable */
+ #define MDIO_PMA_SPEED_10		0x0040	/* 10M capable */
++#define MDIO_PMA_SPEED_2_5G		0x2000	/* 2.5G capable */
++#define MDIO_PMA_SPEED_5G		0x4000	/* 5G capable */
+ #define MDIO_PCS_SPEED_10P2B		0x0002	/* 10PASS-TS/2BASE-TL capable */
+ #define MDIO_PCS_SPEED_2_5G		0x0040	/* 2.5G capable */
+ #define MDIO_PCS_SPEED_5G		0x0080	/* 5G capable */

+ 94 - 0
target/linux/generic/backport-6.6/611-01-v6.11-udp-Allow-GSO-transmit-from-devices-with-no-checksum.patch

@@ -0,0 +1,94 @@
+From: Jakub Sitnicki <[email protected]>
+Date: Wed, 26 Jun 2024 19:51:26 +0200
+Subject: [PATCH] udp: Allow GSO transmit from devices with no checksum offload
+
+Today sending a UDP GSO packet from a TUN device results in an EIO error:
+
+  import fcntl, os, struct
+  from socket import *
+
+  TUNSETIFF = 0x400454CA
+  IFF_TUN = 0x0001
+  IFF_NO_PI = 0x1000
+  UDP_SEGMENT = 103
+
+  tun_fd = os.open("/dev/net/tun", os.O_RDWR)
+  ifr = struct.pack("16sH", b"tun0", IFF_TUN | IFF_NO_PI)
+  fcntl.ioctl(tun_fd, TUNSETIFF, ifr)
+
+  os.system("ip addr add 192.0.2.1/24 dev tun0")
+  os.system("ip link set dev tun0 up")
+
+  s = socket(AF_INET, SOCK_DGRAM)
+  s.setsockopt(SOL_UDP, UDP_SEGMENT, 1200)
+  s.sendto(b"x" * 3000, ("192.0.2.2", 9)) # EIO
+
+This is due to a check in the udp stack if the egress device offers
+checksum offload. While TUN/TAP devices, by default, don't advertise this
+capability because it requires support from the TUN/TAP reader.
+
+However, the GSO stack has a software fallback for checksum calculation,
+which we can use. This way we don't force UDP_SEGMENT users to handle the
+EIO error and implement a segmentation fallback.
+
+Lift the restriction so that UDP_SEGMENT can be used with any egress
+device. We also need to adjust the UDP GSO code to match the GSO stack
+expectation about ip_summed field, as set in commit 8d63bee643f1 ("net:
+avoid skb_warn_bad_offload false positives on UFO"). Otherwise we will hit
+the bad offload check.
+
+Users should, however, expect a potential performance impact when
+batch-sending packets with UDP_SEGMENT without checksum offload on the
+egress device. In such case the packet payload is read twice: first during
+the sendmsg syscall when copying data from user memory, and then in the GSO
+stack for checksum computation. This double memory read can be less
+efficient than a regular sendmsg where the checksum is calculated during
+the initial data copy from user memory.
+
+Signed-off-by: Jakub Sitnicki <[email protected]>
+Reviewed-by: Willem de Bruijn <[email protected]>
+Link: https://patch.msgid.link/[email protected]
+Signed-off-by: Jakub Kicinski <[email protected]>
+---
+
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -942,8 +942,7 @@ static int udp_send_skb(struct sk_buff *
+ 			kfree_skb(skb);
+ 			return -EINVAL;
+ 		}
+-		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
+-		    dst_xfrm(skb_dst(skb))) {
++		if (is_udplite || dst_xfrm(skb_dst(skb))) {
+ 			kfree_skb(skb);
+ 			return -EIO;
+ 		}
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -384,6 +384,14 @@ struct sk_buff *__udp_gso_segment(struct
+ 	else
+ 		uh->check = gso_make_checksum(seg, ~check) ? : CSUM_MANGLED_0;
+ 
++	/* On the TX path, CHECKSUM_NONE and CHECKSUM_UNNECESSARY have the same
++	 * meaning. However, check for bad offloads in the GSO stack expects the
++	 * latter, if the checksum was calculated in software. To vouch for the
++	 * segment skbs we actually need to set it on the gso_skb.
++	 */
++	if (gso_skb->ip_summed == CHECKSUM_NONE)
++		gso_skb->ip_summed = CHECKSUM_UNNECESSARY;
++
+ 	/* update refcount for the packet */
+ 	if (copy_dtor) {
+ 		int delta = sum_truesize - gso_skb->truesize;
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1258,8 +1258,7 @@ static int udp_v6_send_skb(struct sk_buf
+ 			kfree_skb(skb);
+ 			return -EINVAL;
+ 		}
+-		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
+-		    dst_xfrm(skb_dst(skb))) {
++		if (is_udplite || dst_xfrm(skb_dst(skb))) {
+ 			kfree_skb(skb);
+ 			return -EIO;
+ 		}

+ 69 - 0
target/linux/generic/backport-6.6/611-02-v6.11-net-Make-USO-depend-on-CSUM-offload.patch

@@ -0,0 +1,69 @@
+From: Jakub Sitnicki <[email protected]>
+Date: Thu, 8 Aug 2024 11:56:21 +0200
+Subject: [PATCH] net: Make USO depend on CSUM offload
+
+UDP segmentation offload inherently depends on checksum offload. It should
+not be possible to disable checksum offload while leaving USO enabled.
+Enforce this dependency in code.
+
+There is a single tx-udp-segmentation feature flag to indicate support for
+both IPv4/6, hence the devices wishing to support USO must offer checksum
+offload for both IP versions.
+
+Fixes: 10154dbded6d ("udp: Allow GSO transmit from devices with no checksum offload")
+Suggested-by: Willem de Bruijn <[email protected]>
+Signed-off-by: Jakub Sitnicki <[email protected]>
+Reviewed-by: Willem de Bruijn <[email protected]>
+Link: https://patch.msgid.link/20240808-udp-gso-egress-from-tunnel-v4-1-f5c5b4149ab9@cloudflare.com
+Signed-off-by: Jakub Kicinski <[email protected]>
+---
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -9796,6 +9796,15 @@ static void netdev_sync_lower_features(s
+ 	}
+ }
+ 
++static bool netdev_has_ip_or_hw_csum(netdev_features_t features)
++{
++	netdev_features_t ip_csum_mask = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
++	bool ip_csum = (features & ip_csum_mask) == ip_csum_mask;
++	bool hw_csum = features & NETIF_F_HW_CSUM;
++
++	return ip_csum || hw_csum;
++}
++
+ static netdev_features_t netdev_fix_features(struct net_device *dev,
+ 	netdev_features_t features)
+ {
+@@ -9877,15 +9886,9 @@ static netdev_features_t netdev_fix_feat
+ 		features &= ~NETIF_F_LRO;
+ 	}
+ 
+-	if (features & NETIF_F_HW_TLS_TX) {
+-		bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
+-			(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+-		bool hw_csum = features & NETIF_F_HW_CSUM;
+-
+-		if (!ip_csum && !hw_csum) {
+-			netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
+-			features &= ~NETIF_F_HW_TLS_TX;
+-		}
++	if ((features & NETIF_F_HW_TLS_TX) && !netdev_has_ip_or_hw_csum(features)) {
++		netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
++		features &= ~NETIF_F_HW_TLS_TX;
+ 	}
+ 
+ 	if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
+@@ -9893,6 +9896,11 @@ static netdev_features_t netdev_fix_feat
+ 		features &= ~NETIF_F_HW_TLS_RX;
+ 	}
+ 
++	if ((features & NETIF_F_GSO_UDP_L4) && !netdev_has_ip_or_hw_csum(features)) {
++		netdev_dbg(dev, "Dropping USO feature since no CSUM feature.\n");
++		features &= ~NETIF_F_GSO_UDP_L4;
++	}
++
+ 	return features;
+ }
+ 

+ 86 - 0
target/linux/generic/backport-6.6/611-03-v6.11-udp-Fall-back-to-software-USO-if-IPv6-extension-head.patch

@@ -0,0 +1,86 @@
+From: Jakub Sitnicki <[email protected]>
+Date: Thu, 8 Aug 2024 11:56:22 +0200
+Subject: [PATCH] udp: Fall back to software USO if IPv6 extension headers are
+ present
+
+In commit 10154dbded6d ("udp: Allow GSO transmit from devices with no
+checksum offload") we have intentionally allowed UDP GSO packets marked
+CHECKSUM_NONE to pass to the GSO stack, so that they can be segmented and
+checksummed by a software fallback when the egress device lacks these
+features.
+
+What was not taken into consideration is that a CHECKSUM_NONE skb can be
+handed over to the GSO stack also when the egress device advertises the
+tx-udp-segmentation / NETIF_F_GSO_UDP_L4 feature.
+
+This will happen when there are IPv6 extension headers present, which we
+check for in __ip6_append_data(). Syzbot has discovered this scenario,
+producing a warning as below:
+
+  ip6tnl0: caps=(0x00000006401d7869, 0x00000006401d7869)
+  WARNING: CPU: 0 PID: 5112 at net/core/dev.c:3293 skb_warn_bad_offload+0x166/0x1a0 net/core/dev.c:3291
+  Modules linked in:
+  CPU: 0 PID: 5112 Comm: syz-executor391 Not tainted 6.10.0-rc7-syzkaller-01603-g80ab5445da62 #0
+  Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 06/07/2024
+  RIP: 0010:skb_warn_bad_offload+0x166/0x1a0 net/core/dev.c:3291
+  [...]
+  Call Trace:
+   <TASK>
+   __skb_gso_segment+0x3be/0x4c0 net/core/gso.c:127
+   skb_gso_segment include/net/gso.h:83 [inline]
+   validate_xmit_skb+0x585/0x1120 net/core/dev.c:3661
+   __dev_queue_xmit+0x17a4/0x3e90 net/core/dev.c:4415
+   neigh_output include/net/neighbour.h:542 [inline]
+   ip6_finish_output2+0xffa/0x1680 net/ipv6/ip6_output.c:137
+   ip6_finish_output+0x41e/0x810 net/ipv6/ip6_output.c:222
+   ip6_send_skb+0x112/0x230 net/ipv6/ip6_output.c:1958
+   udp_v6_send_skb+0xbf5/0x1870 net/ipv6/udp.c:1292
+   udpv6_sendmsg+0x23b3/0x3270 net/ipv6/udp.c:1588
+   sock_sendmsg_nosec net/socket.c:730 [inline]
+   __sock_sendmsg+0xef/0x270 net/socket.c:745
+   ____sys_sendmsg+0x525/0x7d0 net/socket.c:2585
+   ___sys_sendmsg net/socket.c:2639 [inline]
+   __sys_sendmmsg+0x3b2/0x740 net/socket.c:2725
+   __do_sys_sendmmsg net/socket.c:2754 [inline]
+   __se_sys_sendmmsg net/socket.c:2751 [inline]
+   __x64_sys_sendmmsg+0xa0/0xb0 net/socket.c:2751
+   do_syscall_x64 arch/x86/entry/common.c:52 [inline]
+   do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83
+   entry_SYSCALL_64_after_hwframe+0x77/0x7f
+   [...]
+   </TASK>
+
+We are hitting the bad offload warning because when an egress device is
+capable of handling segmentation offload requested by
+skb_shinfo(skb)->gso_type, the chain of gso_segment callbacks won't produce
+any segment skbs and return NULL. See the skb_gso_ok() branch in
+{__udp,tcp,sctp}_gso_segment helpers.
+
+To fix it, force a fallback to software USO when processing a packet with
+IPv6 extension headers, since we don't know if these can checksummed by
+all devices which offer USO.
+
+Fixes: 10154dbded6d ("udp: Allow GSO transmit from devices with no checksum offload")
+Reported-by: [email protected]
+Closes: https://lore.kernel.org/all/[email protected]/
+Reviewed-by: Willem de Bruijn <[email protected]>
+Signed-off-by: Jakub Sitnicki <[email protected]>
+Link: https://patch.msgid.link/20240808-udp-gso-egress-from-tunnel-v4-2-f5c5b4149ab9@cloudflare.com
+Signed-off-by: Jakub Kicinski <[email protected]>
+---
+
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -283,6 +283,12 @@ struct sk_buff *__udp_gso_segment(struct
+ 		     !(skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)))
+ 		return ERR_PTR(-EINVAL);
+ 
++	/* We don't know if egress device can segment and checksum the packet
++	 * when IPv6 extension headers are present. Fall back to software GSO.
++	 */
++	if (gso_skb->ip_summed != CHECKSUM_PARTIAL)
++		features &= ~(NETIF_F_GSO_UDP_L4 | NETIF_F_CSUM_MASK);
++
+ 	if (skb_gso_ok(gso_skb, features | NETIF_F_GSO_ROBUST)) {
+ 		/* Packet is from an untrusted source, reset gso_segs. */
+ 		skb_shinfo(gso_skb)->gso_segs = DIV_ROUND_UP(gso_skb->len - sizeof(*uh),

+ 29 - 0
target/linux/generic/backport-6.6/612-v6.9-net-get-stats64-if-device-if-driver-is-configured.patch

@@ -0,0 +1,29 @@
+From: Breno Leitao <[email protected]>
+Date: Wed, 28 Feb 2024 03:31:21 -0800
+Subject: [PATCH] net: get stats64 if device if driver is configured
+
+If the network driver is relying in the net core to do stats allocation,
+then we want to dev_get_tstats64() instead of netdev_stats_to_stats64(),
+since there are per-cpu stats that needs to be taken in consideration.
+
+This will also simplify the drivers in regard to statistics. Once the
+driver sets NETDEV_PCPU_STAT_TSTATS, it doesn't not need to allocate the
+stacks, neither it needs to set `.ndo_get_stats64 = dev_get_tstats64`
+for the generic stats collection function anymore.
+
+Signed-off-by: Breno Leitao <[email protected]>
+Reviewed-by: Simon Horman <[email protected]>
+Signed-off-by: Paolo Abeni <[email protected]>
+---
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -10703,6 +10703,8 @@ struct rtnl_link_stats64 *dev_get_stats(
+ 		ops->ndo_get_stats64(dev, storage);
+ 	} else if (ops->ndo_get_stats) {
+ 		netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
++	} else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) {
++		dev_get_tstats64(dev, storage);
+ 	} else {
+ 		netdev_stats_to_stats64(storage, &dev->stats);
+ 	}

+ 139 - 0
target/linux/generic/backport-6.6/620-01-v6.7-page_pool-fragment-API-support-for-32-bit-arch-with-.patch

@@ -0,0 +1,139 @@
+From: Yunsheng Lin <[email protected]>
+Date: Fri, 13 Oct 2023 14:48:21 +0800
+Subject: [PATCH] page_pool: fragment API support for 32-bit arch with 64-bit
+ DMA
+
+Currently page_pool_alloc_frag() is not supported in 32-bit
+arch with 64-bit DMA because of the overlap issue between
+pp_frag_count and dma_addr_upper in 'struct page' for those
+arches, which seems to be quite common, see [1], which means
+driver may need to handle it when using fragment API.
+
+It is assumed that the combination of the above arch with an
+address space >16TB does not exist, as all those arches have
+64b equivalent, it seems logical to use the 64b version for a
+system with a large address space. It is also assumed that dma
+address is page aligned when we are dma mapping a page aligned
+buffer, see [2].
+
+That means we're storing 12 bits of 0 at the lower end for a
+dma address, we can reuse those bits for the above arches to
+support 32b+12b, which is 16TB of memory.
+
+If we make a wrong assumption, a warning is emitted so that
+user can report to us.
+
+1. https://lore.kernel.org/all/[email protected]/
+2. https://lore.kernel.org/all/[email protected]/
+
+Tested-by: Alexander Lobakin <[email protected]>
+Signed-off-by: Yunsheng Lin <[email protected]>
+CC: Lorenzo Bianconi <[email protected]>
+CC: Alexander Duyck <[email protected]>
+CC: Liang Chen <[email protected]>
+CC: Guillaume Tucker <[email protected]>
+CC: Matthew Wilcox <[email protected]>
+CC: Linux-MM <[email protected]>
+Link: https://lore.kernel.org/r/[email protected]
+Signed-off-by: Jakub Kicinski <[email protected]>
+---
+
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -125,18 +125,7 @@ struct page {
+ 			struct page_pool *pp;
+ 			unsigned long _pp_mapping_pad;
+ 			unsigned long dma_addr;
+-			union {
+-				/**
+-				 * dma_addr_upper: might require a 64-bit
+-				 * value on 32-bit architectures.
+-				 */
+-				unsigned long dma_addr_upper;
+-				/**
+-				 * For frag page support, not supported in
+-				 * 32-bit architectures with 64-bit DMA.
+-				 */
+-				atomic_long_t pp_frag_count;
+-			};
++			atomic_long_t pp_frag_count;
+ 		};
+ 		struct {	/* Tail pages of compound page */
+ 			unsigned long compound_head;	/* Bit zero is set */
+--- a/include/net/page_pool/helpers.h
++++ b/include/net/page_pool/helpers.h
+@@ -197,7 +197,7 @@ static inline void page_pool_recycle_dir
+ 	page_pool_put_full_page(pool, page, true);
+ }
+ 
+-#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT	\
++#define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA	\
+ 		(sizeof(dma_addr_t) > sizeof(unsigned long))
+ 
+ /**
+@@ -211,17 +211,25 @@ static inline dma_addr_t page_pool_get_d
+ {
+ 	dma_addr_t ret = page->dma_addr;
+ 
+-	if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
+-		ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
++	if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
++		ret <<= PAGE_SHIFT;
+ 
+ 	return ret;
+ }
+ 
+-static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
++static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
+ {
++	if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
++		page->dma_addr = addr >> PAGE_SHIFT;
++
++		/* We assume page alignment to shave off bottom bits,
++		 * if this "compression" doesn't work we need to drop.
++		 */
++		return addr != (dma_addr_t)page->dma_addr << PAGE_SHIFT;
++	}
++
+ 	page->dma_addr = addr;
+-	if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
+-		page->dma_addr_upper = upper_32_bits(addr);
++	return false;
+ }
+ 
+ static inline bool page_pool_put(struct page_pool *pool)
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -211,10 +211,6 @@ static int page_pool_init(struct page_po
+ 		 */
+ 	}
+ 
+-	if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT &&
+-	    pool->p.flags & PP_FLAG_PAGE_FRAG)
+-		return -EINVAL;
+-
+ #ifdef CONFIG_PAGE_POOL_STATS
+ 	pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
+ 	if (!pool->recycle_stats)
+@@ -363,12 +359,20 @@ static bool page_pool_dma_map(struct pag
+ 	if (dma_mapping_error(pool->p.dev, dma))
+ 		return false;
+ 
+-	page_pool_set_dma_addr(page, dma);
++	if (page_pool_set_dma_addr(page, dma))
++		goto unmap_failed;
+ 
+ 	if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
+ 		page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
+ 
+ 	return true;
++
++unmap_failed:
++	WARN_ON_ONCE("unexpected DMA address, please report to netdev@");
++	dma_unmap_page_attrs(pool->p.dev, dma,
++			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
++			     DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
++	return false;
+ }
+ 
+ static void page_pool_set_pp_info(struct page_pool *pool,

+ 183 - 0
target/linux/generic/backport-6.6/620-02-v6.7-page_pool-unify-frag_count-handling-in-page_pool_is_.patch

@@ -0,0 +1,183 @@
+From: Yunsheng Lin <[email protected]>
+Date: Fri, 20 Oct 2023 17:59:48 +0800
+Subject: [PATCH] page_pool: unify frag_count handling in
+ page_pool_is_last_frag()
+
+Currently when page_pool_create() is called with
+PP_FLAG_PAGE_FRAG flag, page_pool_alloc_pages() is only
+allowed to be called under the below constraints:
+1. page_pool_fragment_page() need to be called to setup
+   page->pp_frag_count immediately.
+2. page_pool_defrag_page() often need to be called to drain
+   the page->pp_frag_count when there is no more user will
+   be holding on to that page.
+
+Those constraints exist in order to support a page to be
+split into multi fragments.
+
+And those constraints have some overhead because of the
+cache line dirtying/bouncing and atomic update.
+
+Those constraints are unavoidable for case when we need a
+page to be split into more than one fragment, but there is
+also case that we want to avoid the above constraints and
+their overhead when a page can't be split as it can only
+hold a fragment as requested by user, depending on different
+use cases:
+use case 1: allocate page without page splitting.
+use case 2: allocate page with page splitting.
+use case 3: allocate page with or without page splitting
+            depending on the fragment size.
+
+Currently page pool only provide page_pool_alloc_pages() and
+page_pool_alloc_frag() API to enable the 1 & 2 separately,
+so we can not use a combination of 1 & 2 to enable 3, it is
+not possible yet because of the per page_pool flag
+PP_FLAG_PAGE_FRAG.
+
+So in order to allow allocating unsplit page without the
+overhead of split page while still allow allocating split
+page we need to remove the per page_pool flag in
+page_pool_is_last_frag(), as best as I can think of, it seems
+there are two methods as below:
+1. Add per page flag/bit to indicate a page is split or
+   not, which means we might need to update that flag/bit
+   everytime the page is recycled, dirtying the cache line
+   of 'struct page' for use case 1.
+2. Unify the page->pp_frag_count handling for both split and
+   unsplit page by assuming all pages in the page pool is split
+   into a big fragment initially.
+
+As page pool already supports use case 1 without dirtying the
+cache line of 'struct page' whenever a page is recyclable, we
+need to support the above use case 3 with minimal overhead,
+especially not adding any noticeable overhead for use case 1,
+and we are already doing an optimization by not updating
+pp_frag_count in page_pool_defrag_page() for the last fragment
+user, this patch chooses to unify the pp_frag_count handling
+to support the above use case 3.
+
+There is no noticeable performance degradation and some
+justification for unifying the frag_count handling with this
+patch applied using a micro-benchmark testing in [1].
+
+1. https://lore.kernel.org/all/[email protected]/
+
+Signed-off-by: Yunsheng Lin <[email protected]>
+CC: Lorenzo Bianconi <[email protected]>
+CC: Alexander Duyck <[email protected]>
+CC: Liang Chen <[email protected]>
+CC: Alexander Lobakin <[email protected]>
+Link: https://lore.kernel.org/r/[email protected]
+Signed-off-by: Jakub Kicinski <[email protected]>
+---
+
+--- a/include/net/page_pool/helpers.h
++++ b/include/net/page_pool/helpers.h
+@@ -115,28 +115,49 @@ static inline long page_pool_defrag_page
+ 	long ret;
+ 
+ 	/* If nr == pp_frag_count then we have cleared all remaining
+-	 * references to the page. No need to actually overwrite it, instead
+-	 * we can leave this to be overwritten by the calling function.
++	 * references to the page:
++	 * 1. 'n == 1': no need to actually overwrite it.
++	 * 2. 'n != 1': overwrite it with one, which is the rare case
++	 *              for pp_frag_count draining.
+ 	 *
+-	 * The main advantage to doing this is that an atomic_read is
+-	 * generally a much cheaper operation than an atomic update,
+-	 * especially when dealing with a page that may be partitioned
+-	 * into only 2 or 3 pieces.
++	 * The main advantage to doing this is that not only we avoid a atomic
++	 * update, as an atomic_read is generally a much cheaper operation than
++	 * an atomic update, especially when dealing with a page that may be
++	 * partitioned into only 2 or 3 pieces; but also unify the pp_frag_count
++	 * handling by ensuring all pages have partitioned into only 1 piece
++	 * initially, and only overwrite it when the page is partitioned into
++	 * more than one piece.
+ 	 */
+-	if (atomic_long_read(&page->pp_frag_count) == nr)
++	if (atomic_long_read(&page->pp_frag_count) == nr) {
++		/* As we have ensured nr is always one for constant case using
++		 * the BUILD_BUG_ON(), only need to handle the non-constant case
++		 * here for pp_frag_count draining, which is a rare case.
++		 */
++		BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1);
++		if (!__builtin_constant_p(nr))
++			atomic_long_set(&page->pp_frag_count, 1);
++
+ 		return 0;
++	}
+ 
+ 	ret = atomic_long_sub_return(nr, &page->pp_frag_count);
+ 	WARN_ON(ret < 0);
++
++	/* We are the last user here too, reset pp_frag_count back to 1 to
++	 * ensure all pages have been partitioned into 1 piece initially,
++	 * this should be the rare case when the last two fragment users call
++	 * page_pool_defrag_page() currently.
++	 */
++	if (unlikely(!ret))
++		atomic_long_set(&page->pp_frag_count, 1);
++
+ 	return ret;
+ }
+ 
+-static inline bool page_pool_is_last_frag(struct page_pool *pool,
+-					  struct page *page)
++static inline bool page_pool_is_last_frag(struct page *page)
+ {
+-	/* If fragments aren't enabled or count is 0 we were the last user */
+-	return !(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
+-	       (page_pool_defrag_page(page, 1) == 0);
++	/* If page_pool_defrag_page() returns 0, we were the last user */
++	return page_pool_defrag_page(page, 1) == 0;
+ }
+ 
+ /**
+@@ -161,7 +182,7 @@ static inline void page_pool_put_page(st
+ 	 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
+ 	 */
+ #ifdef CONFIG_PAGE_POOL
+-	if (!page_pool_is_last_frag(pool, page))
++	if (!page_pool_is_last_frag(page))
+ 		return;
+ 
+ 	page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -380,6 +380,14 @@ static void page_pool_set_pp_info(struct
+ {
+ 	page->pp = pool;
+ 	page->pp_magic |= PP_SIGNATURE;
++
++	/* Ensuring all pages have been split into one fragment initially:
++	 * page_pool_set_pp_info() is only called once for every page when it
++	 * is allocated from the page allocator and page_pool_fragment_page()
++	 * is dirtying the same cache line as the page->pp_magic above, so
++	 * the overhead is negligible.
++	 */
++	page_pool_fragment_page(page, 1);
+ 	if (pool->p.init_callback)
+ 		pool->p.init_callback(page, pool->p.init_arg);
+ }
+@@ -676,7 +684,7 @@ void page_pool_put_page_bulk(struct page
+ 		struct page *page = virt_to_head_page(data[i]);
+ 
+ 		/* It is not the last user for the page frag case */
+-		if (!page_pool_is_last_frag(pool, page))
++		if (!page_pool_is_last_frag(page))
+ 			continue;
+ 
+ 		page = __page_pool_put_page(pool, page, -1, false);
+@@ -752,8 +760,7 @@ struct page *page_pool_alloc_frag(struct
+ 	unsigned int max_size = PAGE_SIZE << pool->p.order;
+ 	struct page *page = pool->frag_page;
+ 
+-	if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
+-		    size > max_size))
++	if (WARN_ON(size > max_size))
+ 		return NULL;
+ 
+ 	size = ALIGN(size, dma_get_cache_alignment());

+ 101 - 0
target/linux/generic/backport-6.6/621-v6.10-01-net-move-skb_gro_receive_list-from-udp-to-core.patch

@@ -0,0 +1,101 @@
+From 8928756d53d5b99dcd18073dc7738b8ebdbe7d96 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <[email protected]>
+Date: Thu, 2 May 2024 10:44:42 +0200
+Subject: [PATCH 1/6] net: move skb_gro_receive_list from udp to core
+
+This helper function will be used for TCP fraglist GRO support
+
+Acked-by: Paolo Abeni <[email protected]>
+Reviewed-by: Eric Dumazet <[email protected]>
+Signed-off-by: Felix Fietkau <[email protected]>
+Reviewed-by: David Ahern <[email protected]>
+Reviewed-by: Willem de Bruijn <[email protected]>
+Signed-off-by: Paolo Abeni <[email protected]>
+---
+ include/net/gro.h      |  1 +
+ net/core/gro.c         | 27 +++++++++++++++++++++++++++
+ net/ipv4/udp_offload.c | 27 ---------------------------
+ 3 files changed, 28 insertions(+), 27 deletions(-)
+
+--- a/include/net/gro.h
++++ b/include/net/gro.h
+@@ -439,6 +439,7 @@ static inline __wsum ip6_gro_compute_pse
+ }
+ 
+ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
++int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
+ 
+ /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
+ static inline void gro_normal_list(struct napi_struct *napi)
+--- a/net/core/gro.c
++++ b/net/core/gro.c
+@@ -228,6 +228,33 @@ done:
+ 	return 0;
+ }
+ 
++int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
++{
++	if (unlikely(p->len + skb->len >= 65536))
++		return -E2BIG;
++
++	if (NAPI_GRO_CB(p)->last == p)
++		skb_shinfo(p)->frag_list = skb;
++	else
++		NAPI_GRO_CB(p)->last->next = skb;
++
++	skb_pull(skb, skb_gro_offset(skb));
++
++	NAPI_GRO_CB(p)->last = skb;
++	NAPI_GRO_CB(p)->count++;
++	p->data_len += skb->len;
++
++	/* sk ownership - if any - completely transferred to the aggregated packet */
++	skb->destructor = NULL;
++	skb->sk = NULL;
++	p->truesize += skb->truesize;
++	p->len += skb->len;
++
++	NAPI_GRO_CB(skb)->same_flow = 1;
++
++	return 0;
++}
++
+ 
+ static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
+ {
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -474,33 +474,6 @@ out:
+ 	return segs;
+ }
+ 
+-static int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
+-{
+-	if (unlikely(p->len + skb->len >= 65536))
+-		return -E2BIG;
+-
+-	if (NAPI_GRO_CB(p)->last == p)
+-		skb_shinfo(p)->frag_list = skb;
+-	else
+-		NAPI_GRO_CB(p)->last->next = skb;
+-
+-	skb_pull(skb, skb_gro_offset(skb));
+-
+-	NAPI_GRO_CB(p)->last = skb;
+-	NAPI_GRO_CB(p)->count++;
+-	p->data_len += skb->len;
+-
+-	/* sk ownership - if any - completely transferred to the aggregated packet */
+-	skb->destructor = NULL;
+-	skb->sk = NULL;
+-	p->truesize += skb->truesize;
+-	p->len += skb->len;
+-
+-	NAPI_GRO_CB(skb)->same_flow = 1;
+-
+-	return 0;
+-}
+-
+ 
+ #define UDP_GRO_CNT_MAX 64
+ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,

+ 177 - 0
target/linux/generic/backport-6.6/621-v6.10-02-net-add-support-for-segmenting-TCP-fraglist-GSO-pack.patch

@@ -0,0 +1,177 @@
+From bee88cd5bd83d40b8aec4d6cb729378f707f6197 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <[email protected]>
+Date: Thu, 2 May 2024 10:44:43 +0200
+Subject: [PATCH 2/6] net: add support for segmenting TCP fraglist GSO packets
+
+Preparation for adding TCP fraglist GRO support. It expects packets to be
+combined in a similar way as UDP fraglist GSO packets.
+For IPv4 packets, NAT is handled in the same way as UDP fraglist GSO.
+
+Acked-by: Paolo Abeni <[email protected]>
+Reviewed-by: Eric Dumazet <[email protected]>
+Signed-off-by: Felix Fietkau <[email protected]>
+Reviewed-by: David Ahern <[email protected]>
+Reviewed-by: Willem de Bruijn <[email protected]>
+Signed-off-by: Paolo Abeni <[email protected]>
+---
+ net/ipv4/tcp_offload.c   | 67 ++++++++++++++++++++++++++++++++++++++++
+ net/ipv6/tcpv6_offload.c | 58 ++++++++++++++++++++++++++++++++++
+ 2 files changed, 125 insertions(+)
+
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -31,6 +31,70 @@ static void tcp_gso_tstamp(struct sk_buf
+ 	}
+ }
+ 
++static void __tcpv4_gso_segment_csum(struct sk_buff *seg,
++				     __be32 *oldip, __be32 newip,
++				     __be16 *oldport, __be16 newport)
++{
++	struct tcphdr *th;
++	struct iphdr *iph;
++
++	if (*oldip == newip && *oldport == newport)
++		return;
++
++	th = tcp_hdr(seg);
++	iph = ip_hdr(seg);
++
++	inet_proto_csum_replace4(&th->check, seg, *oldip, newip, true);
++	inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
++	*oldport = newport;
++
++	csum_replace4(&iph->check, *oldip, newip);
++	*oldip = newip;
++}
++
++static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs)
++{
++	const struct tcphdr *th;
++	const struct iphdr *iph;
++	struct sk_buff *seg;
++	struct tcphdr *th2;
++	struct iphdr *iph2;
++
++	seg = segs;
++	th = tcp_hdr(seg);
++	iph = ip_hdr(seg);
++	th2 = tcp_hdr(seg->next);
++	iph2 = ip_hdr(seg->next);
++
++	if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
++	    iph->daddr == iph2->daddr && iph->saddr == iph2->saddr)
++		return segs;
++
++	while ((seg = seg->next)) {
++		th2 = tcp_hdr(seg);
++		iph2 = ip_hdr(seg);
++
++		__tcpv4_gso_segment_csum(seg,
++					 &iph2->saddr, iph->saddr,
++					 &th2->source, th->source);
++		__tcpv4_gso_segment_csum(seg,
++					 &iph2->daddr, iph->daddr,
++					 &th2->dest, th->dest);
++	}
++
++	return segs;
++}
++
++static struct sk_buff *__tcp4_gso_segment_list(struct sk_buff *skb,
++					      netdev_features_t features)
++{
++	skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
++	if (IS_ERR(skb))
++		return skb;
++
++	return __tcpv4_gso_segment_list_csum(skb);
++}
++
+ static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
+ 					netdev_features_t features)
+ {
+@@ -40,6 +104,9 @@ static struct sk_buff *tcp4_gso_segment(
+ 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
+ 		return ERR_PTR(-EINVAL);
+ 
++	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
++		return __tcp4_gso_segment_list(skb, features);
++
+ 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+ 		const struct iphdr *iph = ip_hdr(skb);
+ 		struct tcphdr *th = tcp_hdr(skb);
+--- a/net/ipv6/tcpv6_offload.c
++++ b/net/ipv6/tcpv6_offload.c
+@@ -40,6 +40,61 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_com
+ 	return 0;
+ }
+ 
++static void __tcpv6_gso_segment_csum(struct sk_buff *seg,
++				     __be16 *oldport, __be16 newport)
++{
++	struct tcphdr *th;
++
++	if (*oldport == newport)
++		return;
++
++	th = tcp_hdr(seg);
++	inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
++	*oldport = newport;
++}
++
++static struct sk_buff *__tcpv6_gso_segment_list_csum(struct sk_buff *segs)
++{
++	const struct tcphdr *th;
++	const struct ipv6hdr *iph;
++	struct sk_buff *seg;
++	struct tcphdr *th2;
++	struct ipv6hdr *iph2;
++
++	seg = segs;
++	th = tcp_hdr(seg);
++	iph = ipv6_hdr(seg);
++	th2 = tcp_hdr(seg->next);
++	iph2 = ipv6_hdr(seg->next);
++
++	if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
++	    ipv6_addr_equal(&iph->saddr, &iph2->saddr) &&
++	    ipv6_addr_equal(&iph->daddr, &iph2->daddr))
++		return segs;
++
++	while ((seg = seg->next)) {
++		th2 = tcp_hdr(seg);
++		iph2 = ipv6_hdr(seg);
++
++		iph2->saddr = iph->saddr;
++		iph2->daddr = iph->daddr;
++		__tcpv6_gso_segment_csum(seg, &th2->source, th->source);
++		__tcpv6_gso_segment_csum(seg, &th2->dest, th->dest);
++	}
++
++	return segs;
++}
++
++static struct sk_buff *__tcp6_gso_segment_list(struct sk_buff *skb,
++					      netdev_features_t features)
++{
++	skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
++	if (IS_ERR(skb))
++		return skb;
++
++	return __tcpv6_gso_segment_list_csum(skb);
++}
++
+ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
+ 					netdev_features_t features)
+ {
+@@ -51,6 +106,9 @@ static struct sk_buff *tcp6_gso_segment(
+ 	if (!pskb_may_pull(skb, sizeof(*th)))
+ 		return ERR_PTR(-EINVAL);
+ 
++	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
++		return __tcp6_gso_segment_list(skb, features);
++
+ 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+ 		const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ 		struct tcphdr *th = tcp_hdr(skb);

+ 75 - 0
target/linux/generic/backport-6.6/621-v6.10-03-net-add-code-for-TCP-fraglist-GRO.patch

@@ -0,0 +1,75 @@
+From 8d95dc474f85481652a0e422d2f1f079de81f63c Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <[email protected]>
+Date: Thu, 2 May 2024 10:44:44 +0200
+Subject: [PATCH 3/6] net: add code for TCP fraglist GRO
+
+This implements fraglist GRO similar to how it's handled in UDP, however
+no functional changes are added yet. The next change adds a heuristic for
+using fraglist GRO instead of regular GRO.
+
+Acked-by: Paolo Abeni <[email protected]>
+Signed-off-by: Felix Fietkau <[email protected]>
+Reviewed-by: Eric Dumazet <[email protected]>
+Reviewed-by: David Ahern <[email protected]>
+Reviewed-by: Willem de Bruijn <[email protected]>
+Signed-off-by: Paolo Abeni <[email protected]>
+---
+ net/ipv4/tcp_offload.c   | 21 +++++++++++++++++++++
+ net/ipv6/tcpv6_offload.c |  9 +++++++++
+ 2 files changed, 30 insertions(+)
+
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -342,6 +342,18 @@ found:
+ 	flush |= p->decrypted ^ skb->decrypted;
+ #endif
+ 
++	if (unlikely(NAPI_GRO_CB(p)->is_flist)) {
++		flush |= (__force int)(flags ^ tcp_flag_word(th2));
++		flush |= skb->ip_summed != p->ip_summed;
++		flush |= skb->csum_level != p->csum_level;
++		flush |= NAPI_GRO_CB(p)->count >= 64;
++
++		if (flush || skb_gro_receive_list(p, skb))
++			mss = 1;
++
++		goto out_check_final;
++	}
++
+ 	if (flush || skb_gro_receive(p, skb)) {
+ 		mss = 1;
+ 		goto out_check_final;
+@@ -406,6 +418,15 @@ INDIRECT_CALLABLE_SCOPE int tcp4_gro_com
+ 	const struct iphdr *iph = ip_hdr(skb);
+ 	struct tcphdr *th = tcp_hdr(skb);
+ 
++	if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
++		skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4;
++		skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
++
++		__skb_incr_checksum_unnecessary(skb);
++
++		return 0;
++	}
++
+ 	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
+ 				  iph->daddr, 0);
+ 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
+--- a/net/ipv6/tcpv6_offload.c
++++ b/net/ipv6/tcpv6_offload.c
+@@ -32,6 +32,15 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_com
+ 	const struct ipv6hdr *iph = ipv6_hdr(skb);
+ 	struct tcphdr *th = tcp_hdr(skb);
+ 
++	if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
++		skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV6;
++		skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
++
++		__skb_incr_checksum_unnecessary(skb);
++
++		return 0;
++	}
++
+ 	th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
+ 				  &iph->daddr, 0);
+ 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;

+ 88 - 0
target/linux/generic/backport-6.6/621-v6.10-04-net-create-tcp_gro_lookup-helper-function.patch

@@ -0,0 +1,88 @@
+From 80e85fbdf19ecc4dfa31ecf639adb55555db02fe Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <[email protected]>
+Date: Thu, 2 May 2024 10:44:45 +0200
+Subject: [PATCH 4/6] net: create tcp_gro_lookup helper function
+
+This pulls the flow port matching out of tcp_gro_receive, so that it can be
+reused for the next change, which adds the TCP fraglist GRO heuristic.
+
+Acked-by: Paolo Abeni <[email protected]>
+Reviewed-by: Eric Dumazet <[email protected]>
+Signed-off-by: Felix Fietkau <[email protected]>
+Reviewed-by: David Ahern <[email protected]>
+Reviewed-by: Willem de Bruijn <[email protected]>
+Signed-off-by: Paolo Abeni <[email protected]>
+---
+ include/net/tcp.h      |  1 +
+ net/ipv4/tcp_offload.c | 41 +++++++++++++++++++++++++----------------
+ 2 files changed, 26 insertions(+), 16 deletions(-)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -2101,6 +2101,7 @@ void tcp_v4_destroy_sock(struct sock *sk
+ 
+ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+ 				netdev_features_t features);
++struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
+ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
+ INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
+ INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -251,6 +251,27 @@ out:
+ 	return segs;
+ }
+ 
++struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th)
++{
++	struct tcphdr *th2;
++	struct sk_buff *p;
++
++	list_for_each_entry(p, head, list) {
++		if (!NAPI_GRO_CB(p)->same_flow)
++			continue;
++
++		th2 = tcp_hdr(p);
++		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
++			NAPI_GRO_CB(p)->same_flow = 0;
++			continue;
++		}
++
++		return p;
++	}
++
++	return NULL;
++}
++
+ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
+ {
+ 	struct sk_buff *pp = NULL;
+@@ -288,24 +309,12 @@ struct sk_buff *tcp_gro_receive(struct l
+ 	len = skb_gro_len(skb);
+ 	flags = tcp_flag_word(th);
+ 
+-	list_for_each_entry(p, head, list) {
+-		if (!NAPI_GRO_CB(p)->same_flow)
+-			continue;
+-
+-		th2 = tcp_hdr(p);
+-
+-		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
+-			NAPI_GRO_CB(p)->same_flow = 0;
+-			continue;
+-		}
+-
+-		goto found;
+-	}
+-	p = NULL;
+-	goto out_check_final;
++	p = tcp_gro_lookup(head, th);
++	if (!p)
++		goto out_check_final;
+ 
+-found:
+ 	/* Include the IP ID check below from the inner most IP hdr */
++	th2 = tcp_hdr(p);
+ 	flush = NAPI_GRO_CB(p)->flush;
+ 	flush |= (__force int)(flags & TCP_FLAG_CWR);
+ 	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &

+ 166 - 0
target/linux/generic/backport-6.6/621-v6.10-05-net-create-tcp_gro_header_pull-helper-function.patch

@@ -0,0 +1,166 @@
+From 7516b27c555c1711ec17a5d891befb6986e573a3 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <[email protected]>
+Date: Thu, 2 May 2024 10:44:46 +0200
+Subject: [PATCH 5/6] net: create tcp_gro_header_pull helper function
+
+Pull the code out of tcp_gro_receive in order to access the tcp header
+from tcp4/6_gro_receive.
+
+Acked-by: Paolo Abeni <[email protected]>
+Reviewed-by: Eric Dumazet <[email protected]>
+Signed-off-by: Felix Fietkau <[email protected]>
+Reviewed-by: David Ahern <[email protected]>
+Reviewed-by: Willem de Bruijn <[email protected]>
+Signed-off-by: Paolo Abeni <[email protected]>
+---
+ include/net/tcp.h        |  4 ++-
+ net/ipv4/tcp_offload.c   | 55 +++++++++++++++++++++++++---------------
+ net/ipv6/tcpv6_offload.c | 18 +++++++++----
+ 3 files changed, 50 insertions(+), 27 deletions(-)
+
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -2101,8 +2101,10 @@ void tcp_v4_destroy_sock(struct sock *sk
+ 
+ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+ 				netdev_features_t features);
++struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb);
+ struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
+-struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
++struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
++				struct tcphdr *th);
+ INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
+ INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
+ INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -272,40 +272,46 @@ struct sk_buff *tcp_gro_lookup(struct li
+ 	return NULL;
+ }
+ 
+-struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
++struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb)
+ {
+-	struct sk_buff *pp = NULL;
+-	struct sk_buff *p;
++	unsigned int thlen, hlen, off;
+ 	struct tcphdr *th;
+-	struct tcphdr *th2;
+-	unsigned int len;
+-	unsigned int thlen;
+-	__be32 flags;
+-	unsigned int mss = 1;
+-	unsigned int hlen;
+-	unsigned int off;
+-	int flush = 1;
+-	int i;
+ 
+ 	off = skb_gro_offset(skb);
+ 	hlen = off + sizeof(*th);
+ 	th = skb_gro_header(skb, hlen, off);
+ 	if (unlikely(!th))
+-		goto out;
++		return NULL;
+ 
+ 	thlen = th->doff * 4;
+ 	if (thlen < sizeof(*th))
+-		goto out;
++		return NULL;
+ 
+ 	hlen = off + thlen;
+ 	if (skb_gro_header_hard(skb, hlen)) {
+ 		th = skb_gro_header_slow(skb, hlen, off);
+ 		if (unlikely(!th))
+-			goto out;
++			return NULL;
+ 	}
+ 
+ 	skb_gro_pull(skb, thlen);
+ 
++	return th;
++}
++
++struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
++				struct tcphdr *th)
++{
++	unsigned int thlen = th->doff * 4;
++	struct sk_buff *pp = NULL;
++	struct sk_buff *p;
++	struct tcphdr *th2;
++	unsigned int len;
++	__be32 flags;
++	unsigned int mss = 1;
++	int flush = 1;
++	int i;
++
+ 	len = skb_gro_len(skb);
+ 	flags = tcp_flag_word(th);
+ 
+@@ -384,7 +390,6 @@ out_check_final:
+ 	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
+ 		pp = p;
+ 
+-out:
+ 	NAPI_GRO_CB(skb)->flush |= (flush != 0);
+ 
+ 	return pp;
+@@ -411,15 +416,23 @@ EXPORT_SYMBOL(tcp_gro_complete);
+ INDIRECT_CALLABLE_SCOPE
+ struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
+ {
++	struct tcphdr *th;
++
+ 	/* Don't bother verifying checksum if we're going to flush anyway. */
+ 	if (!NAPI_GRO_CB(skb)->flush &&
+ 	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
+-				      inet_gro_compute_pseudo)) {
+-		NAPI_GRO_CB(skb)->flush = 1;
+-		return NULL;
+-	}
++				      inet_gro_compute_pseudo))
++		goto flush;
+ 
+-	return tcp_gro_receive(head, skb);
++	th = tcp_gro_pull_header(skb);
++	if (!th)
++		goto flush;
++
++	return tcp_gro_receive(head, skb, th);
++
++flush:
++	NAPI_GRO_CB(skb)->flush = 1;
++	return NULL;
+ }
+ 
+ INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
+--- a/net/ipv6/tcpv6_offload.c
++++ b/net/ipv6/tcpv6_offload.c
+@@ -16,15 +16,23 @@
+ INDIRECT_CALLABLE_SCOPE
+ struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
+ {
++	struct tcphdr *th;
++
+ 	/* Don't bother verifying checksum if we're going to flush anyway. */
+ 	if (!NAPI_GRO_CB(skb)->flush &&
+ 	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
+-				      ip6_gro_compute_pseudo)) {
+-		NAPI_GRO_CB(skb)->flush = 1;
+-		return NULL;
+-	}
++				      ip6_gro_compute_pseudo))
++		goto flush;
++
++	th = tcp_gro_pull_header(skb);
++	if (!th)
++		goto flush;
++
++	return tcp_gro_receive(head, skb, th);
+ 
+-	return tcp_gro_receive(head, skb);
++flush:
++	NAPI_GRO_CB(skb)->flush = 1;
++	return NULL;
+ }
+ 
+ INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)

+ 140 - 0
target/linux/generic/backport-6.6/621-v6.10-06-net-add-heuristic-for-enabling-TCP-fraglist-GRO.patch

@@ -0,0 +1,140 @@
+From c9d1d23e5239f41700be69133a5769ac5ebc88a8 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <[email protected]>
+Date: Thu, 2 May 2024 10:44:47 +0200
+Subject: [PATCH 6/6] net: add heuristic for enabling TCP fraglist GRO
+
+When forwarding TCP after GRO, software segmentation is very expensive,
+especially when the checksum needs to be recalculated.
+One case where that's currently unavoidable is when routing packets over
+PPPoE. Performance improves significantly when using fraglist GRO
+implemented in the same way as for UDP.
+
+When NETIF_F_GRO_FRAGLIST is enabled, perform a lookup for an established
+socket in the same netns as the receiving device. While this may not
+cover all relevant use cases in multi-netns configurations, it should be
+good enough for most configurations that need this.
+
+Here's a measurement of running 2 TCP streams through a MediaTek MT7622
+device (2-core Cortex-A53), which runs NAT with flow offload enabled from
+one ethernet port to PPPoE on another ethernet port + cake qdisc set to
+1Gbps.
+
+rx-gro-list off: 630 Mbit/s, CPU 35% idle
+rx-gro-list on:  770 Mbit/s, CPU 40% idle
+
+Acked-by: Paolo Abeni <[email protected]>
+Reviewed-by: Eric Dumazet <[email protected]>
+Signed-off-by: Felix Fietkau <[email protected]>
+Reviewed-by: David Ahern <[email protected]>
+Reviewed-by: Willem de Bruijn <[email protected]>
+Signed-off-by: Paolo Abeni <[email protected]>
+---
+ net/ipv4/tcp_offload.c   | 32 ++++++++++++++++++++++++++++++++
+ net/ipv6/tcpv6_offload.c | 35 +++++++++++++++++++++++++++++++++++
+ 2 files changed, 67 insertions(+)
+
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -413,6 +413,36 @@ void tcp_gro_complete(struct sk_buff *sk
+ }
+ EXPORT_SYMBOL(tcp_gro_complete);
+ 
++static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
++				    struct tcphdr *th)
++{
++	const struct iphdr *iph;
++	struct sk_buff *p;
++	struct sock *sk;
++	struct net *net;
++	int iif, sdif;
++
++	if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
++		return;
++
++	p = tcp_gro_lookup(head, th);
++	if (p) {
++		NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
++		return;
++	}
++
++	inet_get_iif_sdif(skb, &iif, &sdif);
++	iph = skb_gro_network_header(skb);
++	net = dev_net(skb->dev);
++	sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
++				       iph->saddr, th->source,
++				       iph->daddr, ntohs(th->dest),
++				       iif, sdif);
++	NAPI_GRO_CB(skb)->is_flist = !sk;
++	if (sk)
++		sock_put(sk);
++}
++
+ INDIRECT_CALLABLE_SCOPE
+ struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
+ {
+@@ -428,6 +458,8 @@ struct sk_buff *tcp4_gro_receive(struct
+ 	if (!th)
+ 		goto flush;
+ 
++	tcp4_check_fraglist_gro(head, skb, th);
++
+ 	return tcp_gro_receive(head, skb, th);
+ 
+ flush:
+--- a/net/ipv6/tcpv6_offload.c
++++ b/net/ipv6/tcpv6_offload.c
+@@ -7,12 +7,45 @@
+  */
+ #include <linux/indirect_call_wrapper.h>
+ #include <linux/skbuff.h>
++#include <net/inet6_hashtables.h>
+ #include <net/gro.h>
+ #include <net/protocol.h>
+ #include <net/tcp.h>
+ #include <net/ip6_checksum.h>
+ #include "ip6_offload.h"
+ 
++static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
++				    struct tcphdr *th)
++{
++#if IS_ENABLED(CONFIG_IPV6)
++	const struct ipv6hdr *hdr;
++	struct sk_buff *p;
++	struct sock *sk;
++	struct net *net;
++	int iif, sdif;
++
++	if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
++		return;
++
++	p = tcp_gro_lookup(head, th);
++	if (p) {
++		NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
++		return;
++	}
++
++	inet6_get_iif_sdif(skb, &iif, &sdif);
++	hdr = skb_gro_network_header(skb);
++	net = dev_net(skb->dev);
++	sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
++					&hdr->saddr, th->source,
++					&hdr->daddr, ntohs(th->dest),
++					iif, sdif);
++	NAPI_GRO_CB(skb)->is_flist = !sk;
++	if (sk)
++		sock_put(sk);
++#endif /* IS_ENABLED(CONFIG_IPV6) */
++}
++
+ INDIRECT_CALLABLE_SCOPE
+ struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
+ {
+@@ -28,6 +61,8 @@ struct sk_buff *tcp6_gro_receive(struct
+ 	if (!th)
+ 		goto flush;
+ 
++	tcp6_check_fraglist_gro(head, skb, th);
++
+ 	return tcp_gro_receive(head, skb, th);
+ 
+ flush:

+ 80 - 0
target/linux/generic/backport-6.6/622-v6.12-net-gso-fix-tcp-fraglist-segmentation-after-pull-fro.patch

@@ -0,0 +1,80 @@
+From 17bd3bd82f9f79f3feba15476c2b2c95a9b11ff8 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <[email protected]>
+Date: Thu, 26 Sep 2024 10:53:14 +0200
+Subject: [PATCH] net: gso: fix tcp fraglist segmentation after pull from
+ frag_list
+
+Detect tcp gso fraglist skbs with corrupted geometry (see below) and
+pass these to skb_segment instead of skb_segment_list, as the first
+can segment them correctly.
+
+Valid SKB_GSO_FRAGLIST skbs
+- consist of two or more segments
+- the head_skb holds the protocol headers plus first gso_size
+- one or more frag_list skbs hold exactly one segment
+- all but the last must be gso_size
+
+Optional datapath hooks such as NAT and BPF (bpf_skb_pull_data) can
+modify these skbs, breaking these invariants.
+
+In extreme cases they pull all data into skb linear. For TCP, this
+causes a NULL ptr deref in __tcpv4_gso_segment_list_csum at
+tcp_hdr(seg->next).
+
+Detect invalid geometry due to pull, by checking head_skb size.
+Don't just drop, as this may blackhole a destination. Convert to be
+able to pass to regular skb_segment.
+
+Approach and description based on a patch by Willem de Bruijn.
+
+Link: https://lore.kernel.org/netdev/[email protected]/
+Link: https://lore.kernel.org/netdev/[email protected]/
+Fixes: bee88cd5bd83 ("net: add support for segmenting TCP fraglist GSO packets")
+Cc: [email protected]
+Signed-off-by: Felix Fietkau <[email protected]>
+Reviewed-by: Willem de Bruijn <[email protected]>
+Link: https://patch.msgid.link/[email protected]
+Signed-off-by: Jakub Kicinski <[email protected]>
+---
+ net/ipv4/tcp_offload.c   | 10 ++++++++--
+ net/ipv6/tcpv6_offload.c | 10 ++++++++--
+ 2 files changed, 16 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -104,8 +104,14 @@ static struct sk_buff *tcp4_gso_segment(
+ 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
+ 		return ERR_PTR(-EINVAL);
+ 
+-	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
+-		return __tcp4_gso_segment_list(skb, features);
++	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
++		struct tcphdr *th = tcp_hdr(skb);
++
++		if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
++			return __tcp4_gso_segment_list(skb, features);
++
++		skb->ip_summed = CHECKSUM_NONE;
++	}
+ 
+ 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+ 		const struct iphdr *iph = ip_hdr(skb);
+--- a/net/ipv6/tcpv6_offload.c
++++ b/net/ipv6/tcpv6_offload.c
+@@ -158,8 +158,14 @@ static struct sk_buff *tcp6_gso_segment(
+ 	if (!pskb_may_pull(skb, sizeof(*th)))
+ 		return ERR_PTR(-EINVAL);
+ 
+-	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)
+-		return __tcp6_gso_segment_list(skb, features);
++	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
++		struct tcphdr *th = tcp_hdr(skb);
++
++		if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
++			return __tcp6_gso_segment_list(skb, features);
++
++		skb->ip_summed = CHECKSUM_NONE;
++	}
+ 
+ 	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+ 		const struct ipv6hdr *ipv6h = ipv6_hdr(skb);

+ 59 - 0
target/linux/generic/backport-6.6/623-v6.14-net-ipv6-fix-TCP-GSO-segmentation-with-NAT.patch

@@ -0,0 +1,59 @@
+From daa624d3c2ddffdcbad140a9625a4064371db44f Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <[email protected]>
+Date: Tue, 11 Mar 2025 22:25:30 +0100
+Subject: [PATCH] net: ipv6: fix TCP GSO segmentation with NAT
+
+When updating the source/destination address, the TCP/UDP checksum needs to
+be updated as well.
+
+Fixes: bee88cd5bd83 ("net: add support for segmenting TCP fraglist GSO packets")
+Signed-off-by: Felix Fietkau <[email protected]>
+Link: https://patch.msgid.link/[email protected]
+Signed-off-by: Paolo Abeni <[email protected]>
+---
+ net/ipv6/tcpv6_offload.c | 21 +++++++++++++++------
+ 1 file changed, 15 insertions(+), 6 deletions(-)
+
+--- a/net/ipv6/tcpv6_offload.c
++++ b/net/ipv6/tcpv6_offload.c
+@@ -93,14 +93,23 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_com
+ }
+ 
+ static void __tcpv6_gso_segment_csum(struct sk_buff *seg,
++				     struct in6_addr *oldip,
++				     const struct in6_addr *newip,
+ 				     __be16 *oldport, __be16 newport)
+ {
+-	struct tcphdr *th;
++	struct tcphdr *th = tcp_hdr(seg);
++
++	if (!ipv6_addr_equal(oldip, newip)) {
++		inet_proto_csum_replace16(&th->check, seg,
++					  oldip->s6_addr32,
++					  newip->s6_addr32,
++					  true);
++		*oldip = *newip;
++	}
+ 
+ 	if (*oldport == newport)
+ 		return;
+ 
+-	th = tcp_hdr(seg);
+ 	inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
+ 	*oldport = newport;
+ }
+@@ -128,10 +137,10 @@ static struct sk_buff *__tcpv6_gso_segme
+ 		th2 = tcp_hdr(seg);
+ 		iph2 = ipv6_hdr(seg);
+ 
+-		iph2->saddr = iph->saddr;
+-		iph2->daddr = iph->daddr;
+-		__tcpv6_gso_segment_csum(seg, &th2->source, th->source);
+-		__tcpv6_gso_segment_csum(seg, &th2->dest, th->dest);
++		__tcpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr,
++					 &th2->source, th->source);
++		__tcpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr,
++					 &th2->dest, th->dest);
+ 	}
+ 
+ 	return segs;

+ 135 - 0
target/linux/generic/backport-6.6/700-v6.10-net-create-a-dummy-net_device-allocator.patch

@@ -0,0 +1,135 @@
+From c661050f93d3fd37a33c06041bb18a89688de7d2 Mon Sep 17 00:00:00 2001
+From: Breno Leitao <[email protected]>
+Date: Mon, 22 Apr 2024 05:38:56 -0700
+Subject: [PATCH] net: create a dummy net_device allocator
+
+It is impossible to use init_dummy_netdev together with alloc_netdev()
+as the 'setup' argument.
+
+This is because alloc_netdev() initializes some fields in the net_device
+structure, and later init_dummy_netdev() memzero them all. This causes
+some problems as reported here:
+
+	https://lore.kernel.org/all/[email protected]/
+
+Split the init_dummy_netdev() function in two. Create a new function called
+init_dummy_netdev_core() that does not memzero the net_device structure.
+Then have init_dummy_netdev() memzero-ing and calling
+init_dummy_netdev_core(), keeping the old behaviour.
+
+init_dummy_netdev_core() is the new function that could be called as an
+argument for alloc_netdev().
+
+Also, create a helper to allocate and initialize dummy net devices,
+leveraging init_dummy_netdev_core() as the setup argument. This function
+basically simplify the allocation of dummy devices, by allocating and
+initializing it. Freeing the device continue to be done through
+free_netdev()
+
+Suggested-by: Jakub Kicinski <[email protected]>
+Signed-off-by: Breno Leitao <[email protected]>
+Reviewed-by: Ido Schimmel <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ include/linux/netdevice.h |  3 +++
+ net/core/dev.c            | 56 ++++++++++++++++++++++++++-------------
+ 2 files changed, 41 insertions(+), 18 deletions(-)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -4569,6 +4569,9 @@ static inline void netif_addr_unlock_bh(
+ 
+ void ether_setup(struct net_device *dev);
+ 
++/* Allocate dummy net_device */
++struct net_device *alloc_netdev_dummy(int sizeof_priv);
++
+ /* Support for loadable net-drivers */
+ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+ 				    unsigned char name_assign_type,
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -10390,25 +10390,12 @@ err_free_name:
+ }
+ EXPORT_SYMBOL(register_netdevice);
+ 
+-/**
+- *	init_dummy_netdev	- init a dummy network device for NAPI
+- *	@dev: device to init
+- *
+- *	This takes a network device structure and initialize the minimum
+- *	amount of fields so it can be used to schedule NAPI polls without
+- *	registering a full blown interface. This is to be used by drivers
+- *	that need to tie several hardware interfaces to a single NAPI
+- *	poll scheduler due to HW limitations.
++/* Initialize the core of a dummy net device.
++ * This is useful if you are calling this function after alloc_netdev(),
++ * since it does not memset the net_device fields.
+  */
+-int init_dummy_netdev(struct net_device *dev)
++static void init_dummy_netdev_core(struct net_device *dev)
+ {
+-	/* Clear everything. Note we don't initialize spinlocks
+-	 * are they aren't supposed to be taken by any of the
+-	 * NAPI code and this dummy netdev is supposed to be
+-	 * only ever used for NAPI polls
+-	 */
+-	memset(dev, 0, sizeof(struct net_device));
+-
+ 	/* make sure we BUG if trying to hit standard
+ 	 * register/unregister code path
+ 	 */
+@@ -10428,12 +10415,32 @@ int init_dummy_netdev(struct net_device
+ 	 * because users of this 'device' dont need to change
+ 	 * its refcount.
+ 	 */
++}
++
++/**
++ *	init_dummy_netdev	- init a dummy network device for NAPI
++ *	@dev: device to init
++ *
++ *	This takes a network device structure and initializes the minimum
++ *	amount of fields so it can be used to schedule NAPI polls without
++ *	registering a full blown interface. This is to be used by drivers
++ *	that need to tie several hardware interfaces to a single NAPI
++ *	poll scheduler due to HW limitations.
++ */
++int init_dummy_netdev(struct net_device *dev)
++{
++	/* Clear everything. Note we don't initialize spinlocks
++	 * as they aren't supposed to be taken by any of the
++	 * NAPI code and this dummy netdev is supposed to be
++	 * only ever used for NAPI polls
++	 */
++	memset(dev, 0, sizeof(struct net_device));
++	init_dummy_netdev_core(dev);
+ 
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(init_dummy_netdev);
+ 
+-
+ /**
+  *	register_netdev	- register a network device
+  *	@dev: device to register
+@@ -11027,6 +11034,19 @@ void free_netdev(struct net_device *dev)
+ EXPORT_SYMBOL(free_netdev);
+ 
+ /**
++ * alloc_netdev_dummy - Allocate and initialize a dummy net device.
++ * @sizeof_priv: size of private data to allocate space for
++ *
++ * Return: the allocated net_device on success, NULL otherwise
++ */
++struct net_device *alloc_netdev_dummy(int sizeof_priv)
++{
++	return alloc_netdev(sizeof_priv, "dummy#", NET_NAME_UNKNOWN,
++			    init_dummy_netdev_core);
++}
++EXPORT_SYMBOL_GPL(alloc_netdev_dummy);
++
++/**
+  *	synchronize_net -  Synchronize with packet receive processing
+  *
+  *	Wait for packets currently being received to be done.

+ 29 - 0
target/linux/generic/backport-6.6/701-v6.8-netfilter-nf_tables-fix-bidirectional-offload-regres.patch

@@ -0,0 +1,29 @@
+From 84443741faab9045d53f022a9ac6a6633067a481 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <[email protected]>
+Date: Wed, 14 Feb 2024 15:42:35 +0100
+Subject: [PATCH] netfilter: nf_tables: fix bidirectional offload regression
+
+Commit 8f84780b84d6 ("netfilter: flowtable: allow unidirectional rules")
+made unidirectional flow offload possible, while completely ignoring (and
+breaking) bidirectional flow offload for nftables.
+Add the missing flag that was left out as an exercise for the reader :)
+
+Cc: Vlad Buslov <[email protected]>
+Fixes: 8f84780b84d6 ("netfilter: flowtable: allow unidirectional rules")
+Reported-by: Daniel Golle <[email protected]>
+Signed-off-by: Felix Fietkau <[email protected]>
+Signed-off-by: Pablo Neira Ayuso <[email protected]>
+---
+ net/netfilter/nft_flow_offload.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -367,6 +367,7 @@ static void nft_flow_offload_eval(const
+ 	if (tcph)
+ 		flow_offload_ct_tcp(ct);
+ 
++	__set_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags);
+ 	ret = flow_offload_add(flowtable, flow);
+ 	if (ret < 0)
+ 		goto err_flow_add;

+ 2386 - 0
target/linux/generic/backport-6.6/702-01-v6.7-net-phy-aquantia-move-to-separate-directory.patch

@@ -0,0 +1,2386 @@
+From d2213db3f49bce8e7a87c8de05b9a091f78f654e Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Tue, 14 Nov 2023 15:08:41 +0100
+Subject: [PATCH 1/3] net: phy: aquantia: move to separate directory
+
+Move aquantia PHY driver to separate driectory in preparation for
+firmware loading support to keep things tidy.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/Kconfig                         | 5 +----
+ drivers/net/phy/Makefile                        | 6 +-----
+ drivers/net/phy/aquantia/Kconfig                | 5 +++++
+ drivers/net/phy/aquantia/Makefile               | 6 ++++++
+ drivers/net/phy/{ => aquantia}/aquantia.h       | 0
+ drivers/net/phy/{ => aquantia}/aquantia_hwmon.c | 0
+ drivers/net/phy/{ => aquantia}/aquantia_main.c  | 0
+ 7 files changed, 13 insertions(+), 9 deletions(-)
+ create mode 100644 drivers/net/phy/aquantia/Kconfig
+ create mode 100644 drivers/net/phy/aquantia/Makefile
+ rename drivers/net/phy/{ => aquantia}/aquantia.h (100%)
+ rename drivers/net/phy/{ => aquantia}/aquantia_hwmon.c (100%)
+ rename drivers/net/phy/{ => aquantia}/aquantia_main.c (100%)
+
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -96,10 +96,7 @@ config ADIN1100_PHY
+ 	  Currently supports the:
+ 	  - ADIN1100 - Robust,Industrial, Low Power 10BASE-T1L Ethernet PHY
+ 
+-config AQUANTIA_PHY
+-	tristate "Aquantia PHYs"
+-	help
+-	  Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
++source "drivers/net/phy/aquantia/Kconfig"
+ 
+ config AX88796B_PHY
+ 	tristate "Asix PHYs"
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -35,11 +35,7 @@ obj-y				+= $(sfp-obj-y) $(sfp-obj-m)
+ obj-$(CONFIG_ADIN_PHY)		+= adin.o
+ obj-$(CONFIG_ADIN1100_PHY)	+= adin1100.o
+ obj-$(CONFIG_AMD_PHY)		+= amd.o
+-aquantia-objs			+= aquantia_main.o
+-ifdef CONFIG_HWMON
+-aquantia-objs			+= aquantia_hwmon.o
+-endif
+-obj-$(CONFIG_AQUANTIA_PHY)	+= aquantia.o
++obj-$(CONFIG_AQUANTIA_PHY)	+= aquantia/
+ obj-$(CONFIG_AT803X_PHY)	+= at803x.o
+ obj-$(CONFIG_AX88796B_PHY)	+= ax88796b.o
+ obj-$(CONFIG_BCM54140_PHY)	+= bcm54140.o
+--- /dev/null
++++ b/drivers/net/phy/aquantia/Kconfig
+@@ -0,0 +1,5 @@
++# SPDX-License-Identifier: GPL-2.0-only
++config AQUANTIA_PHY
++	tristate "Aquantia PHYs"
++	help
++	  Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
+--- /dev/null
++++ b/drivers/net/phy/aquantia/Makefile
+@@ -0,0 +1,6 @@
++# SPDX-License-Identifier: GPL-2.0
++aquantia-objs			+= aquantia_main.o
++ifdef CONFIG_HWMON
++aquantia-objs			+= aquantia_hwmon.o
++endif
++obj-$(CONFIG_AQUANTIA_PHY)	+= aquantia.o
+--- a/drivers/net/phy/aquantia.h
++++ /dev/null
+@@ -1,16 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/* HWMON driver for Aquantia PHY
+- *
+- * Author: Nikita Yushchenko <[email protected]>
+- * Author: Andrew Lunn <[email protected]>
+- * Author: Heiner Kallweit <[email protected]>
+- */
+-
+-#include <linux/device.h>
+-#include <linux/phy.h>
+-
+-#if IS_REACHABLE(CONFIG_HWMON)
+-int aqr_hwmon_probe(struct phy_device *phydev);
+-#else
+-static inline int aqr_hwmon_probe(struct phy_device *phydev) { return 0; }
+-#endif
+--- /dev/null
++++ b/drivers/net/phy/aquantia/aquantia.h
+@@ -0,0 +1,16 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/* HWMON driver for Aquantia PHY
++ *
++ * Author: Nikita Yushchenko <[email protected]>
++ * Author: Andrew Lunn <[email protected]>
++ * Author: Heiner Kallweit <[email protected]>
++ */
++
++#include <linux/device.h>
++#include <linux/phy.h>
++
++#if IS_REACHABLE(CONFIG_HWMON)
++int aqr_hwmon_probe(struct phy_device *phydev);
++#else
++static inline int aqr_hwmon_probe(struct phy_device *phydev) { return 0; }
++#endif
+--- /dev/null
++++ b/drivers/net/phy/aquantia/aquantia_hwmon.c
+@@ -0,0 +1,250 @@
++// SPDX-License-Identifier: GPL-2.0
++/* HWMON driver for Aquantia PHY
++ *
++ * Author: Nikita Yushchenko <[email protected]>
++ * Author: Andrew Lunn <[email protected]>
++ * Author: Heiner Kallweit <[email protected]>
++ */
++
++#include <linux/phy.h>
++#include <linux/device.h>
++#include <linux/ctype.h>
++#include <linux/hwmon.h>
++
++#include "aquantia.h"
++
++/* Vendor specific 1, MDIO_MMD_VEND2 */
++#define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL	0xc421
++#define VEND1_THERMAL_PROV_LOW_TEMP_FAIL	0xc422
++#define VEND1_THERMAL_PROV_HIGH_TEMP_WARN	0xc423
++#define VEND1_THERMAL_PROV_LOW_TEMP_WARN	0xc424
++#define VEND1_THERMAL_STAT1			0xc820
++#define VEND1_THERMAL_STAT2			0xc821
++#define VEND1_THERMAL_STAT2_VALID		BIT(0)
++#define VEND1_GENERAL_STAT1			0xc830
++#define VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL	BIT(14)
++#define VEND1_GENERAL_STAT1_LOW_TEMP_FAIL	BIT(13)
++#define VEND1_GENERAL_STAT1_HIGH_TEMP_WARN	BIT(12)
++#define VEND1_GENERAL_STAT1_LOW_TEMP_WARN	BIT(11)
++
++#if IS_REACHABLE(CONFIG_HWMON)
++
++static umode_t aqr_hwmon_is_visible(const void *data,
++				    enum hwmon_sensor_types type,
++				    u32 attr, int channel)
++{
++	if (type != hwmon_temp)
++		return 0;
++
++	switch (attr) {
++	case hwmon_temp_input:
++	case hwmon_temp_min_alarm:
++	case hwmon_temp_max_alarm:
++	case hwmon_temp_lcrit_alarm:
++	case hwmon_temp_crit_alarm:
++		return 0444;
++	case hwmon_temp_min:
++	case hwmon_temp_max:
++	case hwmon_temp_lcrit:
++	case hwmon_temp_crit:
++		return 0644;
++	default:
++		return 0;
++	}
++}
++
++static int aqr_hwmon_get(struct phy_device *phydev, int reg, long *value)
++{
++	int temp = phy_read_mmd(phydev, MDIO_MMD_VEND1, reg);
++
++	if (temp < 0)
++		return temp;
++
++	/* 16 bit value is 2's complement with LSB = 1/256th degree Celsius */
++	*value = (s16)temp * 1000 / 256;
++
++	return 0;
++}
++
++static int aqr_hwmon_set(struct phy_device *phydev, int reg, long value)
++{
++	int temp;
++
++	if (value >= 128000 || value < -128000)
++		return -ERANGE;
++
++	temp = value * 256 / 1000;
++
++	/* temp is in s16 range and we're interested in lower 16 bits only */
++	return phy_write_mmd(phydev, MDIO_MMD_VEND1, reg, (u16)temp);
++}
++
++static int aqr_hwmon_test_bit(struct phy_device *phydev, int reg, int bit)
++{
++	int val = phy_read_mmd(phydev, MDIO_MMD_VEND1, reg);
++
++	if (val < 0)
++		return val;
++
++	return !!(val & bit);
++}
++
++static int aqr_hwmon_status1(struct phy_device *phydev, int bit, long *value)
++{
++	int val = aqr_hwmon_test_bit(phydev, VEND1_GENERAL_STAT1, bit);
++
++	if (val < 0)
++		return val;
++
++	*value = val;
++
++	return 0;
++}
++
++static int aqr_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
++			  u32 attr, int channel, long *value)
++{
++	struct phy_device *phydev = dev_get_drvdata(dev);
++	int reg;
++
++	if (type != hwmon_temp)
++		return -EOPNOTSUPP;
++
++	switch (attr) {
++	case hwmon_temp_input:
++		reg = aqr_hwmon_test_bit(phydev, VEND1_THERMAL_STAT2,
++					 VEND1_THERMAL_STAT2_VALID);
++		if (reg < 0)
++			return reg;
++		if (!reg)
++			return -EBUSY;
++
++		return aqr_hwmon_get(phydev, VEND1_THERMAL_STAT1, value);
++
++	case hwmon_temp_lcrit:
++		return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_LOW_TEMP_FAIL,
++				     value);
++	case hwmon_temp_min:
++		return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_LOW_TEMP_WARN,
++				     value);
++	case hwmon_temp_max:
++		return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_WARN,
++				     value);
++	case hwmon_temp_crit:
++		return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_FAIL,
++				     value);
++	case hwmon_temp_lcrit_alarm:
++		return aqr_hwmon_status1(phydev,
++					 VEND1_GENERAL_STAT1_LOW_TEMP_FAIL,
++					 value);
++	case hwmon_temp_min_alarm:
++		return aqr_hwmon_status1(phydev,
++					 VEND1_GENERAL_STAT1_LOW_TEMP_WARN,
++					 value);
++	case hwmon_temp_max_alarm:
++		return aqr_hwmon_status1(phydev,
++					 VEND1_GENERAL_STAT1_HIGH_TEMP_WARN,
++					 value);
++	case hwmon_temp_crit_alarm:
++		return aqr_hwmon_status1(phydev,
++					 VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL,
++					 value);
++	default:
++		return -EOPNOTSUPP;
++	}
++}
++
++static int aqr_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
++			   u32 attr, int channel, long value)
++{
++	struct phy_device *phydev = dev_get_drvdata(dev);
++
++	if (type != hwmon_temp)
++		return -EOPNOTSUPP;
++
++	switch (attr) {
++	case hwmon_temp_lcrit:
++		return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_LOW_TEMP_FAIL,
++				     value);
++	case hwmon_temp_min:
++		return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_LOW_TEMP_WARN,
++				     value);
++	case hwmon_temp_max:
++		return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_WARN,
++				     value);
++	case hwmon_temp_crit:
++		return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_FAIL,
++				     value);
++	default:
++		return -EOPNOTSUPP;
++	}
++}
++
++static const struct hwmon_ops aqr_hwmon_ops = {
++	.is_visible = aqr_hwmon_is_visible,
++	.read = aqr_hwmon_read,
++	.write = aqr_hwmon_write,
++};
++
++static u32 aqr_hwmon_chip_config[] = {
++	HWMON_C_REGISTER_TZ,
++	0,
++};
++
++static const struct hwmon_channel_info aqr_hwmon_chip = {
++	.type = hwmon_chip,
++	.config = aqr_hwmon_chip_config,
++};
++
++static u32 aqr_hwmon_temp_config[] = {
++	HWMON_T_INPUT |
++	HWMON_T_MAX | HWMON_T_MIN |
++	HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
++	HWMON_T_CRIT | HWMON_T_LCRIT |
++	HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM,
++	0,
++};
++
++static const struct hwmon_channel_info aqr_hwmon_temp = {
++	.type = hwmon_temp,
++	.config = aqr_hwmon_temp_config,
++};
++
++static const struct hwmon_channel_info * const aqr_hwmon_info[] = {
++	&aqr_hwmon_chip,
++	&aqr_hwmon_temp,
++	NULL,
++};
++
++static const struct hwmon_chip_info aqr_hwmon_chip_info = {
++	.ops = &aqr_hwmon_ops,
++	.info = aqr_hwmon_info,
++};
++
++int aqr_hwmon_probe(struct phy_device *phydev)
++{
++	struct device *dev = &phydev->mdio.dev;
++	struct device *hwmon_dev;
++	char *hwmon_name;
++	int i, j;
++
++	hwmon_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
++	if (!hwmon_name)
++		return -ENOMEM;
++
++	for (i = j = 0; hwmon_name[i]; i++) {
++		if (isalnum(hwmon_name[i])) {
++			if (i != j)
++				hwmon_name[j] = hwmon_name[i];
++			j++;
++		}
++	}
++	hwmon_name[j] = '\0';
++
++	hwmon_dev = devm_hwmon_device_register_with_info(dev, hwmon_name,
++					phydev, &aqr_hwmon_chip_info, NULL);
++
++	return PTR_ERR_OR_ZERO(hwmon_dev);
++}
++
++#endif
+--- /dev/null
++++ b/drivers/net/phy/aquantia/aquantia_main.c
+@@ -0,0 +1,882 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Driver for Aquantia PHY
++ *
++ * Author: Shaohui Xie <[email protected]>
++ *
++ * Copyright 2015 Freescale Semiconductor, Inc.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/bitfield.h>
++#include <linux/phy.h>
++
++#include "aquantia.h"
++
++#define PHY_ID_AQ1202	0x03a1b445
++#define PHY_ID_AQ2104	0x03a1b460
++#define PHY_ID_AQR105	0x03a1b4a2
++#define PHY_ID_AQR106	0x03a1b4d0
++#define PHY_ID_AQR107	0x03a1b4e0
++#define PHY_ID_AQCS109	0x03a1b5c2
++#define PHY_ID_AQR405	0x03a1b4b0
++#define PHY_ID_AQR112	0x03a1b662
++#define PHY_ID_AQR412	0x03a1b712
++#define PHY_ID_AQR113C	0x31c31c12
++
++#define MDIO_PHYXS_VEND_IF_STATUS		0xe812
++#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK	GENMASK(7, 3)
++#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR	0
++#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX	1
++#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI	2
++#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII	3
++#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI	4
++#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII	6
++#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI	7
++#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII	10
++
++#define MDIO_AN_VEND_PROV			0xc400
++#define MDIO_AN_VEND_PROV_1000BASET_FULL	BIT(15)
++#define MDIO_AN_VEND_PROV_1000BASET_HALF	BIT(14)
++#define MDIO_AN_VEND_PROV_5000BASET_FULL	BIT(11)
++#define MDIO_AN_VEND_PROV_2500BASET_FULL	BIT(10)
++#define MDIO_AN_VEND_PROV_DOWNSHIFT_EN		BIT(4)
++#define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK	GENMASK(3, 0)
++#define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT	4
++
++#define MDIO_AN_TX_VEND_STATUS1			0xc800
++#define MDIO_AN_TX_VEND_STATUS1_RATE_MASK	GENMASK(3, 1)
++#define MDIO_AN_TX_VEND_STATUS1_10BASET		0
++#define MDIO_AN_TX_VEND_STATUS1_100BASETX	1
++#define MDIO_AN_TX_VEND_STATUS1_1000BASET	2
++#define MDIO_AN_TX_VEND_STATUS1_10GBASET	3
++#define MDIO_AN_TX_VEND_STATUS1_2500BASET	4
++#define MDIO_AN_TX_VEND_STATUS1_5000BASET	5
++#define MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX	BIT(0)
++
++#define MDIO_AN_TX_VEND_INT_STATUS1		0xcc00
++#define MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT	BIT(1)
++
++#define MDIO_AN_TX_VEND_INT_STATUS2		0xcc01
++#define MDIO_AN_TX_VEND_INT_STATUS2_MASK	BIT(0)
++
++#define MDIO_AN_TX_VEND_INT_MASK2		0xd401
++#define MDIO_AN_TX_VEND_INT_MASK2_LINK		BIT(0)
++
++#define MDIO_AN_RX_LP_STAT1			0xe820
++#define MDIO_AN_RX_LP_STAT1_1000BASET_FULL	BIT(15)
++#define MDIO_AN_RX_LP_STAT1_1000BASET_HALF	BIT(14)
++#define MDIO_AN_RX_LP_STAT1_SHORT_REACH		BIT(13)
++#define MDIO_AN_RX_LP_STAT1_AQRATE_DOWNSHIFT	BIT(12)
++#define MDIO_AN_RX_LP_STAT1_AQ_PHY		BIT(2)
++
++#define MDIO_AN_RX_LP_STAT4			0xe823
++#define MDIO_AN_RX_LP_STAT4_FW_MAJOR		GENMASK(15, 8)
++#define MDIO_AN_RX_LP_STAT4_FW_MINOR		GENMASK(7, 0)
++
++#define MDIO_AN_RX_VEND_STAT3			0xe832
++#define MDIO_AN_RX_VEND_STAT3_AFR		BIT(0)
++
++/* MDIO_MMD_C22EXT */
++#define MDIO_C22EXT_STAT_SGMII_RX_GOOD_FRAMES		0xd292
++#define MDIO_C22EXT_STAT_SGMII_RX_BAD_FRAMES		0xd294
++#define MDIO_C22EXT_STAT_SGMII_RX_FALSE_CARRIER		0xd297
++#define MDIO_C22EXT_STAT_SGMII_TX_GOOD_FRAMES		0xd313
++#define MDIO_C22EXT_STAT_SGMII_TX_BAD_FRAMES		0xd315
++#define MDIO_C22EXT_STAT_SGMII_TX_FALSE_CARRIER		0xd317
++#define MDIO_C22EXT_STAT_SGMII_TX_COLLISIONS		0xd318
++#define MDIO_C22EXT_STAT_SGMII_TX_LINE_COLLISIONS	0xd319
++#define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR	0xd31a
++#define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES		0xd31b
++
++/* Vendor specific 1, MDIO_MMD_VEND1 */
++#define VEND1_GLOBAL_FW_ID			0x0020
++#define VEND1_GLOBAL_FW_ID_MAJOR		GENMASK(15, 8)
++#define VEND1_GLOBAL_FW_ID_MINOR		GENMASK(7, 0)
++
++#define VEND1_GLOBAL_GEN_STAT2			0xc831
++#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG	BIT(15)
++
++/* The following registers all have similar layouts; first the registers... */
++#define VEND1_GLOBAL_CFG_10M			0x0310
++#define VEND1_GLOBAL_CFG_100M			0x031b
++#define VEND1_GLOBAL_CFG_1G			0x031c
++#define VEND1_GLOBAL_CFG_2_5G			0x031d
++#define VEND1_GLOBAL_CFG_5G			0x031e
++#define VEND1_GLOBAL_CFG_10G			0x031f
++/* ...and now the fields */
++#define VEND1_GLOBAL_CFG_RATE_ADAPT		GENMASK(8, 7)
++#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE	0
++#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX		1
++#define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE	2
++
++#define VEND1_GLOBAL_RSVD_STAT1			0xc885
++#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID	GENMASK(7, 4)
++#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID		GENMASK(3, 0)
++
++#define VEND1_GLOBAL_RSVD_STAT9			0xc88d
++#define VEND1_GLOBAL_RSVD_STAT9_MODE		GENMASK(7, 0)
++#define VEND1_GLOBAL_RSVD_STAT9_1000BT2		0x23
++
++#define VEND1_GLOBAL_INT_STD_STATUS		0xfc00
++#define VEND1_GLOBAL_INT_VEND_STATUS		0xfc01
++
++#define VEND1_GLOBAL_INT_STD_MASK		0xff00
++#define VEND1_GLOBAL_INT_STD_MASK_PMA1		BIT(15)
++#define VEND1_GLOBAL_INT_STD_MASK_PMA2		BIT(14)
++#define VEND1_GLOBAL_INT_STD_MASK_PCS1		BIT(13)
++#define VEND1_GLOBAL_INT_STD_MASK_PCS2		BIT(12)
++#define VEND1_GLOBAL_INT_STD_MASK_PCS3		BIT(11)
++#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS1	BIT(10)
++#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS2	BIT(9)
++#define VEND1_GLOBAL_INT_STD_MASK_AN1		BIT(8)
++#define VEND1_GLOBAL_INT_STD_MASK_AN2		BIT(7)
++#define VEND1_GLOBAL_INT_STD_MASK_GBE		BIT(6)
++#define VEND1_GLOBAL_INT_STD_MASK_ALL		BIT(0)
++
++#define VEND1_GLOBAL_INT_VEND_MASK		0xff01
++#define VEND1_GLOBAL_INT_VEND_MASK_PMA		BIT(15)
++#define VEND1_GLOBAL_INT_VEND_MASK_PCS		BIT(14)
++#define VEND1_GLOBAL_INT_VEND_MASK_PHY_XS	BIT(13)
++#define VEND1_GLOBAL_INT_VEND_MASK_AN		BIT(12)
++#define VEND1_GLOBAL_INT_VEND_MASK_GBE		BIT(11)
++#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL1	BIT(2)
++#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2	BIT(1)
++#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3	BIT(0)
++
++/* Sleep and timeout for checking if the Processor-Intensive
++ * MDIO operation is finished
++ */
++#define AQR107_OP_IN_PROG_SLEEP		1000
++#define AQR107_OP_IN_PROG_TIMEOUT	100000
++
++struct aqr107_hw_stat {
++	const char *name;
++	int reg;
++	int size;
++};
++
++#define SGMII_STAT(n, r, s) { n, MDIO_C22EXT_STAT_SGMII_ ## r, s }
++static const struct aqr107_hw_stat aqr107_hw_stats[] = {
++	SGMII_STAT("sgmii_rx_good_frames",	    RX_GOOD_FRAMES,	26),
++	SGMII_STAT("sgmii_rx_bad_frames",	    RX_BAD_FRAMES,	26),
++	SGMII_STAT("sgmii_rx_false_carrier_events", RX_FALSE_CARRIER,	 8),
++	SGMII_STAT("sgmii_tx_good_frames",	    TX_GOOD_FRAMES,	26),
++	SGMII_STAT("sgmii_tx_bad_frames",	    TX_BAD_FRAMES,	26),
++	SGMII_STAT("sgmii_tx_false_carrier_events", TX_FALSE_CARRIER,	 8),
++	SGMII_STAT("sgmii_tx_collisions",	    TX_COLLISIONS,	 8),
++	SGMII_STAT("sgmii_tx_line_collisions",	    TX_LINE_COLLISIONS,	 8),
++	SGMII_STAT("sgmii_tx_frame_alignment_err",  TX_FRAME_ALIGN_ERR,	16),
++	SGMII_STAT("sgmii_tx_runt_frames",	    TX_RUNT_FRAMES,	22),
++};
++#define AQR107_SGMII_STAT_SZ ARRAY_SIZE(aqr107_hw_stats)
++
++struct aqr107_priv {
++	u64 sgmii_stats[AQR107_SGMII_STAT_SZ];
++};
++
++static int aqr107_get_sset_count(struct phy_device *phydev)
++{
++	return AQR107_SGMII_STAT_SZ;
++}
++
++static void aqr107_get_strings(struct phy_device *phydev, u8 *data)
++{
++	int i;
++
++	for (i = 0; i < AQR107_SGMII_STAT_SZ; i++)
++		strscpy(data + i * ETH_GSTRING_LEN, aqr107_hw_stats[i].name,
++			ETH_GSTRING_LEN);
++}
++
++static u64 aqr107_get_stat(struct phy_device *phydev, int index)
++{
++	const struct aqr107_hw_stat *stat = aqr107_hw_stats + index;
++	int len_l = min(stat->size, 16);
++	int len_h = stat->size - len_l;
++	u64 ret;
++	int val;
++
++	val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg);
++	if (val < 0)
++		return U64_MAX;
++
++	ret = val & GENMASK(len_l - 1, 0);
++	if (len_h) {
++		val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg + 1);
++		if (val < 0)
++			return U64_MAX;
++
++		ret += (val & GENMASK(len_h - 1, 0)) << 16;
++	}
++
++	return ret;
++}
++
++static void aqr107_get_stats(struct phy_device *phydev,
++			     struct ethtool_stats *stats, u64 *data)
++{
++	struct aqr107_priv *priv = phydev->priv;
++	u64 val;
++	int i;
++
++	for (i = 0; i < AQR107_SGMII_STAT_SZ; i++) {
++		val = aqr107_get_stat(phydev, i);
++		if (val == U64_MAX)
++			phydev_err(phydev, "Reading HW Statistics failed for %s\n",
++				   aqr107_hw_stats[i].name);
++		else
++			priv->sgmii_stats[i] += val;
++
++		data[i] = priv->sgmii_stats[i];
++	}
++}
++
++static int aqr_config_aneg(struct phy_device *phydev)
++{
++	bool changed = false;
++	u16 reg;
++	int ret;
++
++	if (phydev->autoneg == AUTONEG_DISABLE)
++		return genphy_c45_pma_setup_forced(phydev);
++
++	ret = genphy_c45_an_config_aneg(phydev);
++	if (ret < 0)
++		return ret;
++	if (ret > 0)
++		changed = true;
++
++	/* Clause 45 has no standardized support for 1000BaseT, therefore
++	 * use vendor registers for this mode.
++	 */
++	reg = 0;
++	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
++			      phydev->advertising))
++		reg |= MDIO_AN_VEND_PROV_1000BASET_FULL;
++
++	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
++			      phydev->advertising))
++		reg |= MDIO_AN_VEND_PROV_1000BASET_HALF;
++
++	/* Handle the case when the 2.5G and 5G speeds are not advertised */
++	if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
++			      phydev->advertising))
++		reg |= MDIO_AN_VEND_PROV_2500BASET_FULL;
++
++	if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
++			      phydev->advertising))
++		reg |= MDIO_AN_VEND_PROV_5000BASET_FULL;
++
++	ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
++				     MDIO_AN_VEND_PROV_1000BASET_HALF |
++				     MDIO_AN_VEND_PROV_1000BASET_FULL |
++				     MDIO_AN_VEND_PROV_2500BASET_FULL |
++				     MDIO_AN_VEND_PROV_5000BASET_FULL, reg);
++	if (ret < 0)
++		return ret;
++	if (ret > 0)
++		changed = true;
++
++	return genphy_c45_check_and_restart_aneg(phydev, changed);
++}
++
++static int aqr_config_intr(struct phy_device *phydev)
++{
++	bool en = phydev->interrupts == PHY_INTERRUPT_ENABLED;
++	int err;
++
++	if (en) {
++		/* Clear any pending interrupts before enabling them */
++		err = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS2);
++		if (err < 0)
++			return err;
++	}
++
++	err = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_MASK2,
++			    en ? MDIO_AN_TX_VEND_INT_MASK2_LINK : 0);
++	if (err < 0)
++		return err;
++
++	err = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_STD_MASK,
++			    en ? VEND1_GLOBAL_INT_STD_MASK_ALL : 0);
++	if (err < 0)
++		return err;
++
++	err = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_VEND_MASK,
++			    en ? VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 |
++			    VEND1_GLOBAL_INT_VEND_MASK_AN : 0);
++	if (err < 0)
++		return err;
++
++	if (!en) {
++		/* Clear any pending interrupts after we have disabled them */
++		err = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS2);
++		if (err < 0)
++			return err;
++	}
++
++	return 0;
++}
++
++static irqreturn_t aqr_handle_interrupt(struct phy_device *phydev)
++{
++	int irq_status;
++
++	irq_status = phy_read_mmd(phydev, MDIO_MMD_AN,
++				  MDIO_AN_TX_VEND_INT_STATUS2);
++	if (irq_status < 0) {
++		phy_error(phydev);
++		return IRQ_NONE;
++	}
++
++	if (!(irq_status & MDIO_AN_TX_VEND_INT_STATUS2_MASK))
++		return IRQ_NONE;
++
++	phy_trigger_machine(phydev);
++
++	return IRQ_HANDLED;
++}
++
++static int aqr_read_status(struct phy_device *phydev)
++{
++	int val;
++
++	if (phydev->autoneg == AUTONEG_ENABLE) {
++		val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT1);
++		if (val < 0)
++			return val;
++
++		linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
++				 phydev->lp_advertising,
++				 val & MDIO_AN_RX_LP_STAT1_1000BASET_FULL);
++		linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
++				 phydev->lp_advertising,
++				 val & MDIO_AN_RX_LP_STAT1_1000BASET_HALF);
++	}
++
++	return genphy_c45_read_status(phydev);
++}
++
++static int aqr107_read_rate(struct phy_device *phydev)
++{
++	u32 config_reg;
++	int val;
++
++	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_STATUS1);
++	if (val < 0)
++		return val;
++
++	if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
++		phydev->duplex = DUPLEX_FULL;
++	else
++		phydev->duplex = DUPLEX_HALF;
++
++	switch (FIELD_GET(MDIO_AN_TX_VEND_STATUS1_RATE_MASK, val)) {
++	case MDIO_AN_TX_VEND_STATUS1_10BASET:
++		phydev->speed = SPEED_10;
++		config_reg = VEND1_GLOBAL_CFG_10M;
++		break;
++	case MDIO_AN_TX_VEND_STATUS1_100BASETX:
++		phydev->speed = SPEED_100;
++		config_reg = VEND1_GLOBAL_CFG_100M;
++		break;
++	case MDIO_AN_TX_VEND_STATUS1_1000BASET:
++		phydev->speed = SPEED_1000;
++		config_reg = VEND1_GLOBAL_CFG_1G;
++		break;
++	case MDIO_AN_TX_VEND_STATUS1_2500BASET:
++		phydev->speed = SPEED_2500;
++		config_reg = VEND1_GLOBAL_CFG_2_5G;
++		break;
++	case MDIO_AN_TX_VEND_STATUS1_5000BASET:
++		phydev->speed = SPEED_5000;
++		config_reg = VEND1_GLOBAL_CFG_5G;
++		break;
++	case MDIO_AN_TX_VEND_STATUS1_10GBASET:
++		phydev->speed = SPEED_10000;
++		config_reg = VEND1_GLOBAL_CFG_10G;
++		break;
++	default:
++		phydev->speed = SPEED_UNKNOWN;
++		return 0;
++	}
++
++	val = phy_read_mmd(phydev, MDIO_MMD_VEND1, config_reg);
++	if (val < 0)
++		return val;
++
++	if (FIELD_GET(VEND1_GLOBAL_CFG_RATE_ADAPT, val) ==
++	    VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE)
++		phydev->rate_matching = RATE_MATCH_PAUSE;
++	else
++		phydev->rate_matching = RATE_MATCH_NONE;
++
++	return 0;
++}
++
++static int aqr107_read_status(struct phy_device *phydev)
++{
++	int val, ret;
++
++	ret = aqr_read_status(phydev);
++	if (ret)
++		return ret;
++
++	if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE)
++		return 0;
++
++	val = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_VEND_IF_STATUS);
++	if (val < 0)
++		return val;
++
++	switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) {
++	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
++		phydev->interface = PHY_INTERFACE_MODE_10GKR;
++		break;
++	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX:
++		phydev->interface = PHY_INTERFACE_MODE_1000BASEKX;
++		break;
++	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
++		phydev->interface = PHY_INTERFACE_MODE_10GBASER;
++		break;
++	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
++		phydev->interface = PHY_INTERFACE_MODE_USXGMII;
++		break;
++	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI:
++		phydev->interface = PHY_INTERFACE_MODE_XAUI;
++		break;
++	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
++		phydev->interface = PHY_INTERFACE_MODE_SGMII;
++		break;
++	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI:
++		phydev->interface = PHY_INTERFACE_MODE_RXAUI;
++		break;
++	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII:
++		phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
++		break;
++	default:
++		phydev->interface = PHY_INTERFACE_MODE_NA;
++		break;
++	}
++
++	/* Read possibly downshifted rate from vendor register */
++	return aqr107_read_rate(phydev);
++}
++
++static int aqr107_get_downshift(struct phy_device *phydev, u8 *data)
++{
++	int val, cnt, enable;
++
++	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV);
++	if (val < 0)
++		return val;
++
++	enable = FIELD_GET(MDIO_AN_VEND_PROV_DOWNSHIFT_EN, val);
++	cnt = FIELD_GET(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, val);
++
++	*data = enable && cnt ? cnt : DOWNSHIFT_DEV_DISABLE;
++
++	return 0;
++}
++
++static int aqr107_set_downshift(struct phy_device *phydev, u8 cnt)
++{
++	int val = 0;
++
++	if (!FIELD_FIT(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, cnt))
++		return -E2BIG;
++
++	if (cnt != DOWNSHIFT_DEV_DISABLE) {
++		val = MDIO_AN_VEND_PROV_DOWNSHIFT_EN;
++		val |= FIELD_PREP(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, cnt);
++	}
++
++	return phy_modify_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
++			      MDIO_AN_VEND_PROV_DOWNSHIFT_EN |
++			      MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, val);
++}
++
++static int aqr107_get_tunable(struct phy_device *phydev,
++			      struct ethtool_tunable *tuna, void *data)
++{
++	switch (tuna->id) {
++	case ETHTOOL_PHY_DOWNSHIFT:
++		return aqr107_get_downshift(phydev, data);
++	default:
++		return -EOPNOTSUPP;
++	}
++}
++
++static int aqr107_set_tunable(struct phy_device *phydev,
++			      struct ethtool_tunable *tuna, const void *data)
++{
++	switch (tuna->id) {
++	case ETHTOOL_PHY_DOWNSHIFT:
++		return aqr107_set_downshift(phydev, *(const u8 *)data);
++	default:
++		return -EOPNOTSUPP;
++	}
++}
++
++/* If we configure settings whilst firmware is still initializing the chip,
++ * then these settings may be overwritten. Therefore make sure chip
++ * initialization has completed. Use presence of the firmware ID as
++ * indicator for initialization having completed.
++ * The chip also provides a "reset completed" bit, but it's cleared after
++ * read. Therefore function would time out if called again.
++ */
++static int aqr107_wait_reset_complete(struct phy_device *phydev)
++{
++	int val;
++
++	return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
++					 VEND1_GLOBAL_FW_ID, val, val != 0,
++					 20000, 2000000, false);
++}
++
++static void aqr107_chip_info(struct phy_device *phydev)
++{
++	u8 fw_major, fw_minor, build_id, prov_id;
++	int val;
++
++	val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
++	if (val < 0)
++		return;
++
++	fw_major = FIELD_GET(VEND1_GLOBAL_FW_ID_MAJOR, val);
++	fw_minor = FIELD_GET(VEND1_GLOBAL_FW_ID_MINOR, val);
++
++	val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT1);
++	if (val < 0)
++		return;
++
++	build_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID, val);
++	prov_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_PROV_ID, val);
++
++	phydev_dbg(phydev, "FW %u.%u, Build %u, Provisioning %u\n",
++		   fw_major, fw_minor, build_id, prov_id);
++}
++
++static int aqr107_config_init(struct phy_device *phydev)
++{
++	int ret;
++
++	/* Check that the PHY interface type is compatible */
++	if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
++	    phydev->interface != PHY_INTERFACE_MODE_1000BASEKX &&
++	    phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
++	    phydev->interface != PHY_INTERFACE_MODE_XGMII &&
++	    phydev->interface != PHY_INTERFACE_MODE_USXGMII &&
++	    phydev->interface != PHY_INTERFACE_MODE_10GKR &&
++	    phydev->interface != PHY_INTERFACE_MODE_10GBASER &&
++	    phydev->interface != PHY_INTERFACE_MODE_XAUI &&
++	    phydev->interface != PHY_INTERFACE_MODE_RXAUI)
++		return -ENODEV;
++
++	WARN(phydev->interface == PHY_INTERFACE_MODE_XGMII,
++	     "Your devicetree is out of date, please update it. The AQR107 family doesn't support XGMII, maybe you mean USXGMII.\n");
++
++	ret = aqr107_wait_reset_complete(phydev);
++	if (!ret)
++		aqr107_chip_info(phydev);
++
++	return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
++}
++
++static int aqcs109_config_init(struct phy_device *phydev)
++{
++	int ret;
++
++	/* Check that the PHY interface type is compatible */
++	if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
++	    phydev->interface != PHY_INTERFACE_MODE_2500BASEX)
++		return -ENODEV;
++
++	ret = aqr107_wait_reset_complete(phydev);
++	if (!ret)
++		aqr107_chip_info(phydev);
++
++	/* AQCS109 belongs to a chip family partially supporting 10G and 5G.
++	 * PMA speed ability bits are the same for all members of the family,
++	 * AQCS109 however supports speeds up to 2.5G only.
++	 */
++	phy_set_max_speed(phydev, SPEED_2500);
++
++	return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
++}
++
++static void aqr107_link_change_notify(struct phy_device *phydev)
++{
++	u8 fw_major, fw_minor;
++	bool downshift, short_reach, afr;
++	int mode, val;
++
++	if (phydev->state != PHY_RUNNING || phydev->autoneg == AUTONEG_DISABLE)
++		return;
++
++	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT1);
++	/* call failed or link partner is no Aquantia PHY */
++	if (val < 0 || !(val & MDIO_AN_RX_LP_STAT1_AQ_PHY))
++		return;
++
++	short_reach = val & MDIO_AN_RX_LP_STAT1_SHORT_REACH;
++	downshift = val & MDIO_AN_RX_LP_STAT1_AQRATE_DOWNSHIFT;
++
++	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT4);
++	if (val < 0)
++		return;
++
++	fw_major = FIELD_GET(MDIO_AN_RX_LP_STAT4_FW_MAJOR, val);
++	fw_minor = FIELD_GET(MDIO_AN_RX_LP_STAT4_FW_MINOR, val);
++
++	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_VEND_STAT3);
++	if (val < 0)
++		return;
++
++	afr = val & MDIO_AN_RX_VEND_STAT3_AFR;
++
++	phydev_dbg(phydev, "Link partner is Aquantia PHY, FW %u.%u%s%s%s\n",
++		   fw_major, fw_minor,
++		   short_reach ? ", short reach mode" : "",
++		   downshift ? ", fast-retrain downshift advertised" : "",
++		   afr ? ", fast reframe advertised" : "");
++
++	val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT9);
++	if (val < 0)
++		return;
++
++	mode = FIELD_GET(VEND1_GLOBAL_RSVD_STAT9_MODE, val);
++	if (mode == VEND1_GLOBAL_RSVD_STAT9_1000BT2)
++		phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
++}
++
++static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
++{
++	int val, err;
++
++	/* The datasheet notes to wait at least 1ms after issuing a
++	 * processor intensive operation before checking.
++	 * We cannot use the 'sleep_before_read' parameter of read_poll_timeout
++	 * because that just determines the maximum time slept, not the minimum.
++	 */
++	usleep_range(1000, 5000);
++
++	err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
++					VEND1_GLOBAL_GEN_STAT2, val,
++					!(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG),
++					AQR107_OP_IN_PROG_SLEEP,
++					AQR107_OP_IN_PROG_TIMEOUT, false);
++	if (err) {
++		phydev_err(phydev, "timeout: processor-intensive MDIO operation\n");
++		return err;
++	}
++
++	return 0;
++}
++
++static int aqr107_get_rate_matching(struct phy_device *phydev,
++				    phy_interface_t iface)
++{
++	if (iface == PHY_INTERFACE_MODE_10GBASER ||
++	    iface == PHY_INTERFACE_MODE_2500BASEX ||
++	    iface == PHY_INTERFACE_MODE_NA)
++		return RATE_MATCH_PAUSE;
++	return RATE_MATCH_NONE;
++}
++
++static int aqr107_suspend(struct phy_device *phydev)
++{
++	int err;
++
++	err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
++			       MDIO_CTRL1_LPOWER);
++	if (err)
++		return err;
++
++	return aqr107_wait_processor_intensive_op(phydev);
++}
++
++static int aqr107_resume(struct phy_device *phydev)
++{
++	int err;
++
++	err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
++				 MDIO_CTRL1_LPOWER);
++	if (err)
++		return err;
++
++	return aqr107_wait_processor_intensive_op(phydev);
++}
++
++static int aqr107_probe(struct phy_device *phydev)
++{
++	phydev->priv = devm_kzalloc(&phydev->mdio.dev,
++				    sizeof(struct aqr107_priv), GFP_KERNEL);
++	if (!phydev->priv)
++		return -ENOMEM;
++
++	return aqr_hwmon_probe(phydev);
++}
++
++static struct phy_driver aqr_driver[] = {
++{
++	PHY_ID_MATCH_MODEL(PHY_ID_AQ1202),
++	.name		= "Aquantia AQ1202",
++	.config_aneg    = aqr_config_aneg,
++	.config_intr	= aqr_config_intr,
++	.handle_interrupt = aqr_handle_interrupt,
++	.read_status	= aqr_read_status,
++},
++{
++	PHY_ID_MATCH_MODEL(PHY_ID_AQ2104),
++	.name		= "Aquantia AQ2104",
++	.config_aneg    = aqr_config_aneg,
++	.config_intr	= aqr_config_intr,
++	.handle_interrupt = aqr_handle_interrupt,
++	.read_status	= aqr_read_status,
++},
++{
++	PHY_ID_MATCH_MODEL(PHY_ID_AQR105),
++	.name		= "Aquantia AQR105",
++	.config_aneg    = aqr_config_aneg,
++	.config_intr	= aqr_config_intr,
++	.handle_interrupt = aqr_handle_interrupt,
++	.read_status	= aqr_read_status,
++	.suspend	= aqr107_suspend,
++	.resume		= aqr107_resume,
++},
++{
++	PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
++	.name		= "Aquantia AQR106",
++	.config_aneg    = aqr_config_aneg,
++	.config_intr	= aqr_config_intr,
++	.handle_interrupt = aqr_handle_interrupt,
++	.read_status	= aqr_read_status,
++},
++{
++	PHY_ID_MATCH_MODEL(PHY_ID_AQR107),
++	.name		= "Aquantia AQR107",
++	.probe		= aqr107_probe,
++	.get_rate_matching = aqr107_get_rate_matching,
++	.config_init	= aqr107_config_init,
++	.config_aneg    = aqr_config_aneg,
++	.config_intr	= aqr_config_intr,
++	.handle_interrupt = aqr_handle_interrupt,
++	.read_status	= aqr107_read_status,
++	.get_tunable    = aqr107_get_tunable,
++	.set_tunable    = aqr107_set_tunable,
++	.suspend	= aqr107_suspend,
++	.resume		= aqr107_resume,
++	.get_sset_count	= aqr107_get_sset_count,
++	.get_strings	= aqr107_get_strings,
++	.get_stats	= aqr107_get_stats,
++	.link_change_notify = aqr107_link_change_notify,
++},
++{
++	PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
++	.name		= "Aquantia AQCS109",
++	.probe		= aqr107_probe,
++	.get_rate_matching = aqr107_get_rate_matching,
++	.config_init	= aqcs109_config_init,
++	.config_aneg    = aqr_config_aneg,
++	.config_intr	= aqr_config_intr,
++	.handle_interrupt = aqr_handle_interrupt,
++	.read_status	= aqr107_read_status,
++	.get_tunable    = aqr107_get_tunable,
++	.set_tunable    = aqr107_set_tunable,
++	.suspend	= aqr107_suspend,
++	.resume		= aqr107_resume,
++	.get_sset_count	= aqr107_get_sset_count,
++	.get_strings	= aqr107_get_strings,
++	.get_stats	= aqr107_get_stats,
++	.link_change_notify = aqr107_link_change_notify,
++},
++{
++	PHY_ID_MATCH_MODEL(PHY_ID_AQR405),
++	.name		= "Aquantia AQR405",
++	.config_aneg    = aqr_config_aneg,
++	.config_intr	= aqr_config_intr,
++	.handle_interrupt = aqr_handle_interrupt,
++	.read_status	= aqr_read_status,
++},
++{
++	PHY_ID_MATCH_MODEL(PHY_ID_AQR112),
++	.name		= "Aquantia AQR112",
++	.probe		= aqr107_probe,
++	.config_aneg    = aqr_config_aneg,
++	.config_intr	= aqr_config_intr,
++	.handle_interrupt = aqr_handle_interrupt,
++	.get_tunable    = aqr107_get_tunable,
++	.set_tunable    = aqr107_set_tunable,
++	.suspend	= aqr107_suspend,
++	.resume		= aqr107_resume,
++	.read_status	= aqr107_read_status,
++	.get_rate_matching = aqr107_get_rate_matching,
++	.get_sset_count = aqr107_get_sset_count,
++	.get_strings	= aqr107_get_strings,
++	.get_stats	= aqr107_get_stats,
++	.link_change_notify = aqr107_link_change_notify,
++},
++{
++	PHY_ID_MATCH_MODEL(PHY_ID_AQR412),
++	.name		= "Aquantia AQR412",
++	.probe		= aqr107_probe,
++	.config_aneg    = aqr_config_aneg,
++	.config_intr	= aqr_config_intr,
++	.handle_interrupt = aqr_handle_interrupt,
++	.get_tunable    = aqr107_get_tunable,
++	.set_tunable    = aqr107_set_tunable,
++	.suspend	= aqr107_suspend,
++	.resume		= aqr107_resume,
++	.read_status	= aqr107_read_status,
++	.get_rate_matching = aqr107_get_rate_matching,
++	.get_sset_count = aqr107_get_sset_count,
++	.get_strings	= aqr107_get_strings,
++	.get_stats	= aqr107_get_stats,
++	.link_change_notify = aqr107_link_change_notify,
++},
++{
++	PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
++	.name           = "Aquantia AQR113C",
++	.probe          = aqr107_probe,
++	.get_rate_matching = aqr107_get_rate_matching,
++	.config_init    = aqr107_config_init,
++	.config_aneg    = aqr_config_aneg,
++	.config_intr    = aqr_config_intr,
++	.handle_interrupt       = aqr_handle_interrupt,
++	.read_status    = aqr107_read_status,
++	.get_tunable    = aqr107_get_tunable,
++	.set_tunable    = aqr107_set_tunable,
++	.suspend        = aqr107_suspend,
++	.resume         = aqr107_resume,
++	.get_sset_count = aqr107_get_sset_count,
++	.get_strings    = aqr107_get_strings,
++	.get_stats      = aqr107_get_stats,
++	.link_change_notify = aqr107_link_change_notify,
++},
++};
++
++module_phy_driver(aqr_driver);
++
++static struct mdio_device_id __maybe_unused aqr_tbl[] = {
++	{ PHY_ID_MATCH_MODEL(PHY_ID_AQ1202) },
++	{ PHY_ID_MATCH_MODEL(PHY_ID_AQ2104) },
++	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR105) },
++	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR106) },
++	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR107) },
++	{ PHY_ID_MATCH_MODEL(PHY_ID_AQCS109) },
++	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR405) },
++	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR112) },
++	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR412) },
++	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) },
++	{ }
++};
++
++MODULE_DEVICE_TABLE(mdio, aqr_tbl);
++
++MODULE_DESCRIPTION("Aquantia PHY driver");
++MODULE_AUTHOR("Shaohui Xie <[email protected]>");
++MODULE_LICENSE("GPL v2");
+--- a/drivers/net/phy/aquantia_hwmon.c
++++ /dev/null
+@@ -1,250 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/* HWMON driver for Aquantia PHY
+- *
+- * Author: Nikita Yushchenko <[email protected]>
+- * Author: Andrew Lunn <[email protected]>
+- * Author: Heiner Kallweit <[email protected]>
+- */
+-
+-#include <linux/phy.h>
+-#include <linux/device.h>
+-#include <linux/ctype.h>
+-#include <linux/hwmon.h>
+-
+-#include "aquantia.h"
+-
+-/* Vendor specific 1, MDIO_MMD_VEND2 */
+-#define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL	0xc421
+-#define VEND1_THERMAL_PROV_LOW_TEMP_FAIL	0xc422
+-#define VEND1_THERMAL_PROV_HIGH_TEMP_WARN	0xc423
+-#define VEND1_THERMAL_PROV_LOW_TEMP_WARN	0xc424
+-#define VEND1_THERMAL_STAT1			0xc820
+-#define VEND1_THERMAL_STAT2			0xc821
+-#define VEND1_THERMAL_STAT2_VALID		BIT(0)
+-#define VEND1_GENERAL_STAT1			0xc830
+-#define VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL	BIT(14)
+-#define VEND1_GENERAL_STAT1_LOW_TEMP_FAIL	BIT(13)
+-#define VEND1_GENERAL_STAT1_HIGH_TEMP_WARN	BIT(12)
+-#define VEND1_GENERAL_STAT1_LOW_TEMP_WARN	BIT(11)
+-
+-#if IS_REACHABLE(CONFIG_HWMON)
+-
+-static umode_t aqr_hwmon_is_visible(const void *data,
+-				    enum hwmon_sensor_types type,
+-				    u32 attr, int channel)
+-{
+-	if (type != hwmon_temp)
+-		return 0;
+-
+-	switch (attr) {
+-	case hwmon_temp_input:
+-	case hwmon_temp_min_alarm:
+-	case hwmon_temp_max_alarm:
+-	case hwmon_temp_lcrit_alarm:
+-	case hwmon_temp_crit_alarm:
+-		return 0444;
+-	case hwmon_temp_min:
+-	case hwmon_temp_max:
+-	case hwmon_temp_lcrit:
+-	case hwmon_temp_crit:
+-		return 0644;
+-	default:
+-		return 0;
+-	}
+-}
+-
+-static int aqr_hwmon_get(struct phy_device *phydev, int reg, long *value)
+-{
+-	int temp = phy_read_mmd(phydev, MDIO_MMD_VEND1, reg);
+-
+-	if (temp < 0)
+-		return temp;
+-
+-	/* 16 bit value is 2's complement with LSB = 1/256th degree Celsius */
+-	*value = (s16)temp * 1000 / 256;
+-
+-	return 0;
+-}
+-
+-static int aqr_hwmon_set(struct phy_device *phydev, int reg, long value)
+-{
+-	int temp;
+-
+-	if (value >= 128000 || value < -128000)
+-		return -ERANGE;
+-
+-	temp = value * 256 / 1000;
+-
+-	/* temp is in s16 range and we're interested in lower 16 bits only */
+-	return phy_write_mmd(phydev, MDIO_MMD_VEND1, reg, (u16)temp);
+-}
+-
+-static int aqr_hwmon_test_bit(struct phy_device *phydev, int reg, int bit)
+-{
+-	int val = phy_read_mmd(phydev, MDIO_MMD_VEND1, reg);
+-
+-	if (val < 0)
+-		return val;
+-
+-	return !!(val & bit);
+-}
+-
+-static int aqr_hwmon_status1(struct phy_device *phydev, int bit, long *value)
+-{
+-	int val = aqr_hwmon_test_bit(phydev, VEND1_GENERAL_STAT1, bit);
+-
+-	if (val < 0)
+-		return val;
+-
+-	*value = val;
+-
+-	return 0;
+-}
+-
+-static int aqr_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+-			  u32 attr, int channel, long *value)
+-{
+-	struct phy_device *phydev = dev_get_drvdata(dev);
+-	int reg;
+-
+-	if (type != hwmon_temp)
+-		return -EOPNOTSUPP;
+-
+-	switch (attr) {
+-	case hwmon_temp_input:
+-		reg = aqr_hwmon_test_bit(phydev, VEND1_THERMAL_STAT2,
+-					 VEND1_THERMAL_STAT2_VALID);
+-		if (reg < 0)
+-			return reg;
+-		if (!reg)
+-			return -EBUSY;
+-
+-		return aqr_hwmon_get(phydev, VEND1_THERMAL_STAT1, value);
+-
+-	case hwmon_temp_lcrit:
+-		return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_LOW_TEMP_FAIL,
+-				     value);
+-	case hwmon_temp_min:
+-		return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_LOW_TEMP_WARN,
+-				     value);
+-	case hwmon_temp_max:
+-		return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_WARN,
+-				     value);
+-	case hwmon_temp_crit:
+-		return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_FAIL,
+-				     value);
+-	case hwmon_temp_lcrit_alarm:
+-		return aqr_hwmon_status1(phydev,
+-					 VEND1_GENERAL_STAT1_LOW_TEMP_FAIL,
+-					 value);
+-	case hwmon_temp_min_alarm:
+-		return aqr_hwmon_status1(phydev,
+-					 VEND1_GENERAL_STAT1_LOW_TEMP_WARN,
+-					 value);
+-	case hwmon_temp_max_alarm:
+-		return aqr_hwmon_status1(phydev,
+-					 VEND1_GENERAL_STAT1_HIGH_TEMP_WARN,
+-					 value);
+-	case hwmon_temp_crit_alarm:
+-		return aqr_hwmon_status1(phydev,
+-					 VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL,
+-					 value);
+-	default:
+-		return -EOPNOTSUPP;
+-	}
+-}
+-
+-static int aqr_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+-			   u32 attr, int channel, long value)
+-{
+-	struct phy_device *phydev = dev_get_drvdata(dev);
+-
+-	if (type != hwmon_temp)
+-		return -EOPNOTSUPP;
+-
+-	switch (attr) {
+-	case hwmon_temp_lcrit:
+-		return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_LOW_TEMP_FAIL,
+-				     value);
+-	case hwmon_temp_min:
+-		return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_LOW_TEMP_WARN,
+-				     value);
+-	case hwmon_temp_max:
+-		return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_WARN,
+-				     value);
+-	case hwmon_temp_crit:
+-		return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_FAIL,
+-				     value);
+-	default:
+-		return -EOPNOTSUPP;
+-	}
+-}
+-
+-static const struct hwmon_ops aqr_hwmon_ops = {
+-	.is_visible = aqr_hwmon_is_visible,
+-	.read = aqr_hwmon_read,
+-	.write = aqr_hwmon_write,
+-};
+-
+-static u32 aqr_hwmon_chip_config[] = {
+-	HWMON_C_REGISTER_TZ,
+-	0,
+-};
+-
+-static const struct hwmon_channel_info aqr_hwmon_chip = {
+-	.type = hwmon_chip,
+-	.config = aqr_hwmon_chip_config,
+-};
+-
+-static u32 aqr_hwmon_temp_config[] = {
+-	HWMON_T_INPUT |
+-	HWMON_T_MAX | HWMON_T_MIN |
+-	HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
+-	HWMON_T_CRIT | HWMON_T_LCRIT |
+-	HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM,
+-	0,
+-};
+-
+-static const struct hwmon_channel_info aqr_hwmon_temp = {
+-	.type = hwmon_temp,
+-	.config = aqr_hwmon_temp_config,
+-};
+-
+-static const struct hwmon_channel_info * const aqr_hwmon_info[] = {
+-	&aqr_hwmon_chip,
+-	&aqr_hwmon_temp,
+-	NULL,
+-};
+-
+-static const struct hwmon_chip_info aqr_hwmon_chip_info = {
+-	.ops = &aqr_hwmon_ops,
+-	.info = aqr_hwmon_info,
+-};
+-
+-int aqr_hwmon_probe(struct phy_device *phydev)
+-{
+-	struct device *dev = &phydev->mdio.dev;
+-	struct device *hwmon_dev;
+-	char *hwmon_name;
+-	int i, j;
+-
+-	hwmon_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+-	if (!hwmon_name)
+-		return -ENOMEM;
+-
+-	for (i = j = 0; hwmon_name[i]; i++) {
+-		if (isalnum(hwmon_name[i])) {
+-			if (i != j)
+-				hwmon_name[j] = hwmon_name[i];
+-			j++;
+-		}
+-	}
+-	hwmon_name[j] = '\0';
+-
+-	hwmon_dev = devm_hwmon_device_register_with_info(dev, hwmon_name,
+-					phydev, &aqr_hwmon_chip_info, NULL);
+-
+-	return PTR_ERR_OR_ZERO(hwmon_dev);
+-}
+-
+-#endif
+--- a/drivers/net/phy/aquantia_main.c
++++ /dev/null
+@@ -1,882 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * Driver for Aquantia PHY
+- *
+- * Author: Shaohui Xie <[email protected]>
+- *
+- * Copyright 2015 Freescale Semiconductor, Inc.
+- */
+-
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/delay.h>
+-#include <linux/bitfield.h>
+-#include <linux/phy.h>
+-
+-#include "aquantia.h"
+-
+-#define PHY_ID_AQ1202	0x03a1b445
+-#define PHY_ID_AQ2104	0x03a1b460
+-#define PHY_ID_AQR105	0x03a1b4a2
+-#define PHY_ID_AQR106	0x03a1b4d0
+-#define PHY_ID_AQR107	0x03a1b4e0
+-#define PHY_ID_AQCS109	0x03a1b5c2
+-#define PHY_ID_AQR405	0x03a1b4b0
+-#define PHY_ID_AQR112	0x03a1b662
+-#define PHY_ID_AQR412	0x03a1b712
+-#define PHY_ID_AQR113C	0x31c31c12
+-
+-#define MDIO_PHYXS_VEND_IF_STATUS		0xe812
+-#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK	GENMASK(7, 3)
+-#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR	0
+-#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX	1
+-#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI	2
+-#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII	3
+-#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI	4
+-#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII	6
+-#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI	7
+-#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII	10
+-
+-#define MDIO_AN_VEND_PROV			0xc400
+-#define MDIO_AN_VEND_PROV_1000BASET_FULL	BIT(15)
+-#define MDIO_AN_VEND_PROV_1000BASET_HALF	BIT(14)
+-#define MDIO_AN_VEND_PROV_5000BASET_FULL	BIT(11)
+-#define MDIO_AN_VEND_PROV_2500BASET_FULL	BIT(10)
+-#define MDIO_AN_VEND_PROV_DOWNSHIFT_EN		BIT(4)
+-#define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK	GENMASK(3, 0)
+-#define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT	4
+-
+-#define MDIO_AN_TX_VEND_STATUS1			0xc800
+-#define MDIO_AN_TX_VEND_STATUS1_RATE_MASK	GENMASK(3, 1)
+-#define MDIO_AN_TX_VEND_STATUS1_10BASET		0
+-#define MDIO_AN_TX_VEND_STATUS1_100BASETX	1
+-#define MDIO_AN_TX_VEND_STATUS1_1000BASET	2
+-#define MDIO_AN_TX_VEND_STATUS1_10GBASET	3
+-#define MDIO_AN_TX_VEND_STATUS1_2500BASET	4
+-#define MDIO_AN_TX_VEND_STATUS1_5000BASET	5
+-#define MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX	BIT(0)
+-
+-#define MDIO_AN_TX_VEND_INT_STATUS1		0xcc00
+-#define MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT	BIT(1)
+-
+-#define MDIO_AN_TX_VEND_INT_STATUS2		0xcc01
+-#define MDIO_AN_TX_VEND_INT_STATUS2_MASK	BIT(0)
+-
+-#define MDIO_AN_TX_VEND_INT_MASK2		0xd401
+-#define MDIO_AN_TX_VEND_INT_MASK2_LINK		BIT(0)
+-
+-#define MDIO_AN_RX_LP_STAT1			0xe820
+-#define MDIO_AN_RX_LP_STAT1_1000BASET_FULL	BIT(15)
+-#define MDIO_AN_RX_LP_STAT1_1000BASET_HALF	BIT(14)
+-#define MDIO_AN_RX_LP_STAT1_SHORT_REACH		BIT(13)
+-#define MDIO_AN_RX_LP_STAT1_AQRATE_DOWNSHIFT	BIT(12)
+-#define MDIO_AN_RX_LP_STAT1_AQ_PHY		BIT(2)
+-
+-#define MDIO_AN_RX_LP_STAT4			0xe823
+-#define MDIO_AN_RX_LP_STAT4_FW_MAJOR		GENMASK(15, 8)
+-#define MDIO_AN_RX_LP_STAT4_FW_MINOR		GENMASK(7, 0)
+-
+-#define MDIO_AN_RX_VEND_STAT3			0xe832
+-#define MDIO_AN_RX_VEND_STAT3_AFR		BIT(0)
+-
+-/* MDIO_MMD_C22EXT */
+-#define MDIO_C22EXT_STAT_SGMII_RX_GOOD_FRAMES		0xd292
+-#define MDIO_C22EXT_STAT_SGMII_RX_BAD_FRAMES		0xd294
+-#define MDIO_C22EXT_STAT_SGMII_RX_FALSE_CARRIER		0xd297
+-#define MDIO_C22EXT_STAT_SGMII_TX_GOOD_FRAMES		0xd313
+-#define MDIO_C22EXT_STAT_SGMII_TX_BAD_FRAMES		0xd315
+-#define MDIO_C22EXT_STAT_SGMII_TX_FALSE_CARRIER		0xd317
+-#define MDIO_C22EXT_STAT_SGMII_TX_COLLISIONS		0xd318
+-#define MDIO_C22EXT_STAT_SGMII_TX_LINE_COLLISIONS	0xd319
+-#define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR	0xd31a
+-#define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES		0xd31b
+-
+-/* Vendor specific 1, MDIO_MMD_VEND1 */
+-#define VEND1_GLOBAL_FW_ID			0x0020
+-#define VEND1_GLOBAL_FW_ID_MAJOR		GENMASK(15, 8)
+-#define VEND1_GLOBAL_FW_ID_MINOR		GENMASK(7, 0)
+-
+-#define VEND1_GLOBAL_GEN_STAT2			0xc831
+-#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG	BIT(15)
+-
+-/* The following registers all have similar layouts; first the registers... */
+-#define VEND1_GLOBAL_CFG_10M			0x0310
+-#define VEND1_GLOBAL_CFG_100M			0x031b
+-#define VEND1_GLOBAL_CFG_1G			0x031c
+-#define VEND1_GLOBAL_CFG_2_5G			0x031d
+-#define VEND1_GLOBAL_CFG_5G			0x031e
+-#define VEND1_GLOBAL_CFG_10G			0x031f
+-/* ...and now the fields */
+-#define VEND1_GLOBAL_CFG_RATE_ADAPT		GENMASK(8, 7)
+-#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE	0
+-#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX		1
+-#define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE	2
+-
+-#define VEND1_GLOBAL_RSVD_STAT1			0xc885
+-#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID	GENMASK(7, 4)
+-#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID		GENMASK(3, 0)
+-
+-#define VEND1_GLOBAL_RSVD_STAT9			0xc88d
+-#define VEND1_GLOBAL_RSVD_STAT9_MODE		GENMASK(7, 0)
+-#define VEND1_GLOBAL_RSVD_STAT9_1000BT2		0x23
+-
+-#define VEND1_GLOBAL_INT_STD_STATUS		0xfc00
+-#define VEND1_GLOBAL_INT_VEND_STATUS		0xfc01
+-
+-#define VEND1_GLOBAL_INT_STD_MASK		0xff00
+-#define VEND1_GLOBAL_INT_STD_MASK_PMA1		BIT(15)
+-#define VEND1_GLOBAL_INT_STD_MASK_PMA2		BIT(14)
+-#define VEND1_GLOBAL_INT_STD_MASK_PCS1		BIT(13)
+-#define VEND1_GLOBAL_INT_STD_MASK_PCS2		BIT(12)
+-#define VEND1_GLOBAL_INT_STD_MASK_PCS3		BIT(11)
+-#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS1	BIT(10)
+-#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS2	BIT(9)
+-#define VEND1_GLOBAL_INT_STD_MASK_AN1		BIT(8)
+-#define VEND1_GLOBAL_INT_STD_MASK_AN2		BIT(7)
+-#define VEND1_GLOBAL_INT_STD_MASK_GBE		BIT(6)
+-#define VEND1_GLOBAL_INT_STD_MASK_ALL		BIT(0)
+-
+-#define VEND1_GLOBAL_INT_VEND_MASK		0xff01
+-#define VEND1_GLOBAL_INT_VEND_MASK_PMA		BIT(15)
+-#define VEND1_GLOBAL_INT_VEND_MASK_PCS		BIT(14)
+-#define VEND1_GLOBAL_INT_VEND_MASK_PHY_XS	BIT(13)
+-#define VEND1_GLOBAL_INT_VEND_MASK_AN		BIT(12)
+-#define VEND1_GLOBAL_INT_VEND_MASK_GBE		BIT(11)
+-#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL1	BIT(2)
+-#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2	BIT(1)
+-#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3	BIT(0)
+-
+-/* Sleep and timeout for checking if the Processor-Intensive
+- * MDIO operation is finished
+- */
+-#define AQR107_OP_IN_PROG_SLEEP		1000
+-#define AQR107_OP_IN_PROG_TIMEOUT	100000
+-
+-struct aqr107_hw_stat {
+-	const char *name;
+-	int reg;
+-	int size;
+-};
+-
+-#define SGMII_STAT(n, r, s) { n, MDIO_C22EXT_STAT_SGMII_ ## r, s }
+-static const struct aqr107_hw_stat aqr107_hw_stats[] = {
+-	SGMII_STAT("sgmii_rx_good_frames",	    RX_GOOD_FRAMES,	26),
+-	SGMII_STAT("sgmii_rx_bad_frames",	    RX_BAD_FRAMES,	26),
+-	SGMII_STAT("sgmii_rx_false_carrier_events", RX_FALSE_CARRIER,	 8),
+-	SGMII_STAT("sgmii_tx_good_frames",	    TX_GOOD_FRAMES,	26),
+-	SGMII_STAT("sgmii_tx_bad_frames",	    TX_BAD_FRAMES,	26),
+-	SGMII_STAT("sgmii_tx_false_carrier_events", TX_FALSE_CARRIER,	 8),
+-	SGMII_STAT("sgmii_tx_collisions",	    TX_COLLISIONS,	 8),
+-	SGMII_STAT("sgmii_tx_line_collisions",	    TX_LINE_COLLISIONS,	 8),
+-	SGMII_STAT("sgmii_tx_frame_alignment_err",  TX_FRAME_ALIGN_ERR,	16),
+-	SGMII_STAT("sgmii_tx_runt_frames",	    TX_RUNT_FRAMES,	22),
+-};
+-#define AQR107_SGMII_STAT_SZ ARRAY_SIZE(aqr107_hw_stats)
+-
+-struct aqr107_priv {
+-	u64 sgmii_stats[AQR107_SGMII_STAT_SZ];
+-};
+-
+-static int aqr107_get_sset_count(struct phy_device *phydev)
+-{
+-	return AQR107_SGMII_STAT_SZ;
+-}
+-
+-static void aqr107_get_strings(struct phy_device *phydev, u8 *data)
+-{
+-	int i;
+-
+-	for (i = 0; i < AQR107_SGMII_STAT_SZ; i++)
+-		strscpy(data + i * ETH_GSTRING_LEN, aqr107_hw_stats[i].name,
+-			ETH_GSTRING_LEN);
+-}
+-
+-static u64 aqr107_get_stat(struct phy_device *phydev, int index)
+-{
+-	const struct aqr107_hw_stat *stat = aqr107_hw_stats + index;
+-	int len_l = min(stat->size, 16);
+-	int len_h = stat->size - len_l;
+-	u64 ret;
+-	int val;
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg);
+-	if (val < 0)
+-		return U64_MAX;
+-
+-	ret = val & GENMASK(len_l - 1, 0);
+-	if (len_h) {
+-		val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg + 1);
+-		if (val < 0)
+-			return U64_MAX;
+-
+-		ret += (val & GENMASK(len_h - 1, 0)) << 16;
+-	}
+-
+-	return ret;
+-}
+-
+-static void aqr107_get_stats(struct phy_device *phydev,
+-			     struct ethtool_stats *stats, u64 *data)
+-{
+-	struct aqr107_priv *priv = phydev->priv;
+-	u64 val;
+-	int i;
+-
+-	for (i = 0; i < AQR107_SGMII_STAT_SZ; i++) {
+-		val = aqr107_get_stat(phydev, i);
+-		if (val == U64_MAX)
+-			phydev_err(phydev, "Reading HW Statistics failed for %s\n",
+-				   aqr107_hw_stats[i].name);
+-		else
+-			priv->sgmii_stats[i] += val;
+-
+-		data[i] = priv->sgmii_stats[i];
+-	}
+-}
+-
+-static int aqr_config_aneg(struct phy_device *phydev)
+-{
+-	bool changed = false;
+-	u16 reg;
+-	int ret;
+-
+-	if (phydev->autoneg == AUTONEG_DISABLE)
+-		return genphy_c45_pma_setup_forced(phydev);
+-
+-	ret = genphy_c45_an_config_aneg(phydev);
+-	if (ret < 0)
+-		return ret;
+-	if (ret > 0)
+-		changed = true;
+-
+-	/* Clause 45 has no standardized support for 1000BaseT, therefore
+-	 * use vendor registers for this mode.
+-	 */
+-	reg = 0;
+-	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+-			      phydev->advertising))
+-		reg |= MDIO_AN_VEND_PROV_1000BASET_FULL;
+-
+-	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+-			      phydev->advertising))
+-		reg |= MDIO_AN_VEND_PROV_1000BASET_HALF;
+-
+-	/* Handle the case when the 2.5G and 5G speeds are not advertised */
+-	if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+-			      phydev->advertising))
+-		reg |= MDIO_AN_VEND_PROV_2500BASET_FULL;
+-
+-	if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+-			      phydev->advertising))
+-		reg |= MDIO_AN_VEND_PROV_5000BASET_FULL;
+-
+-	ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
+-				     MDIO_AN_VEND_PROV_1000BASET_HALF |
+-				     MDIO_AN_VEND_PROV_1000BASET_FULL |
+-				     MDIO_AN_VEND_PROV_2500BASET_FULL |
+-				     MDIO_AN_VEND_PROV_5000BASET_FULL, reg);
+-	if (ret < 0)
+-		return ret;
+-	if (ret > 0)
+-		changed = true;
+-
+-	return genphy_c45_check_and_restart_aneg(phydev, changed);
+-}
+-
+-static int aqr_config_intr(struct phy_device *phydev)
+-{
+-	bool en = phydev->interrupts == PHY_INTERRUPT_ENABLED;
+-	int err;
+-
+-	if (en) {
+-		/* Clear any pending interrupts before enabling them */
+-		err = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS2);
+-		if (err < 0)
+-			return err;
+-	}
+-
+-	err = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_MASK2,
+-			    en ? MDIO_AN_TX_VEND_INT_MASK2_LINK : 0);
+-	if (err < 0)
+-		return err;
+-
+-	err = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_STD_MASK,
+-			    en ? VEND1_GLOBAL_INT_STD_MASK_ALL : 0);
+-	if (err < 0)
+-		return err;
+-
+-	err = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_VEND_MASK,
+-			    en ? VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 |
+-			    VEND1_GLOBAL_INT_VEND_MASK_AN : 0);
+-	if (err < 0)
+-		return err;
+-
+-	if (!en) {
+-		/* Clear any pending interrupts after we have disabled them */
+-		err = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS2);
+-		if (err < 0)
+-			return err;
+-	}
+-
+-	return 0;
+-}
+-
+-static irqreturn_t aqr_handle_interrupt(struct phy_device *phydev)
+-{
+-	int irq_status;
+-
+-	irq_status = phy_read_mmd(phydev, MDIO_MMD_AN,
+-				  MDIO_AN_TX_VEND_INT_STATUS2);
+-	if (irq_status < 0) {
+-		phy_error(phydev);
+-		return IRQ_NONE;
+-	}
+-
+-	if (!(irq_status & MDIO_AN_TX_VEND_INT_STATUS2_MASK))
+-		return IRQ_NONE;
+-
+-	phy_trigger_machine(phydev);
+-
+-	return IRQ_HANDLED;
+-}
+-
+-static int aqr_read_status(struct phy_device *phydev)
+-{
+-	int val;
+-
+-	if (phydev->autoneg == AUTONEG_ENABLE) {
+-		val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT1);
+-		if (val < 0)
+-			return val;
+-
+-		linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+-				 phydev->lp_advertising,
+-				 val & MDIO_AN_RX_LP_STAT1_1000BASET_FULL);
+-		linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+-				 phydev->lp_advertising,
+-				 val & MDIO_AN_RX_LP_STAT1_1000BASET_HALF);
+-	}
+-
+-	return genphy_c45_read_status(phydev);
+-}
+-
+-static int aqr107_read_rate(struct phy_device *phydev)
+-{
+-	u32 config_reg;
+-	int val;
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_STATUS1);
+-	if (val < 0)
+-		return val;
+-
+-	if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
+-		phydev->duplex = DUPLEX_FULL;
+-	else
+-		phydev->duplex = DUPLEX_HALF;
+-
+-	switch (FIELD_GET(MDIO_AN_TX_VEND_STATUS1_RATE_MASK, val)) {
+-	case MDIO_AN_TX_VEND_STATUS1_10BASET:
+-		phydev->speed = SPEED_10;
+-		config_reg = VEND1_GLOBAL_CFG_10M;
+-		break;
+-	case MDIO_AN_TX_VEND_STATUS1_100BASETX:
+-		phydev->speed = SPEED_100;
+-		config_reg = VEND1_GLOBAL_CFG_100M;
+-		break;
+-	case MDIO_AN_TX_VEND_STATUS1_1000BASET:
+-		phydev->speed = SPEED_1000;
+-		config_reg = VEND1_GLOBAL_CFG_1G;
+-		break;
+-	case MDIO_AN_TX_VEND_STATUS1_2500BASET:
+-		phydev->speed = SPEED_2500;
+-		config_reg = VEND1_GLOBAL_CFG_2_5G;
+-		break;
+-	case MDIO_AN_TX_VEND_STATUS1_5000BASET:
+-		phydev->speed = SPEED_5000;
+-		config_reg = VEND1_GLOBAL_CFG_5G;
+-		break;
+-	case MDIO_AN_TX_VEND_STATUS1_10GBASET:
+-		phydev->speed = SPEED_10000;
+-		config_reg = VEND1_GLOBAL_CFG_10G;
+-		break;
+-	default:
+-		phydev->speed = SPEED_UNKNOWN;
+-		return 0;
+-	}
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_VEND1, config_reg);
+-	if (val < 0)
+-		return val;
+-
+-	if (FIELD_GET(VEND1_GLOBAL_CFG_RATE_ADAPT, val) ==
+-	    VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE)
+-		phydev->rate_matching = RATE_MATCH_PAUSE;
+-	else
+-		phydev->rate_matching = RATE_MATCH_NONE;
+-
+-	return 0;
+-}
+-
+-static int aqr107_read_status(struct phy_device *phydev)
+-{
+-	int val, ret;
+-
+-	ret = aqr_read_status(phydev);
+-	if (ret)
+-		return ret;
+-
+-	if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE)
+-		return 0;
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_VEND_IF_STATUS);
+-	if (val < 0)
+-		return val;
+-
+-	switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) {
+-	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
+-		phydev->interface = PHY_INTERFACE_MODE_10GKR;
+-		break;
+-	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX:
+-		phydev->interface = PHY_INTERFACE_MODE_1000BASEKX;
+-		break;
+-	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
+-		phydev->interface = PHY_INTERFACE_MODE_10GBASER;
+-		break;
+-	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII:
+-		phydev->interface = PHY_INTERFACE_MODE_USXGMII;
+-		break;
+-	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI:
+-		phydev->interface = PHY_INTERFACE_MODE_XAUI;
+-		break;
+-	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
+-		phydev->interface = PHY_INTERFACE_MODE_SGMII;
+-		break;
+-	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI:
+-		phydev->interface = PHY_INTERFACE_MODE_RXAUI;
+-		break;
+-	case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII:
+-		phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+-		break;
+-	default:
+-		phydev->interface = PHY_INTERFACE_MODE_NA;
+-		break;
+-	}
+-
+-	/* Read possibly downshifted rate from vendor register */
+-	return aqr107_read_rate(phydev);
+-}
+-
+-static int aqr107_get_downshift(struct phy_device *phydev, u8 *data)
+-{
+-	int val, cnt, enable;
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV);
+-	if (val < 0)
+-		return val;
+-
+-	enable = FIELD_GET(MDIO_AN_VEND_PROV_DOWNSHIFT_EN, val);
+-	cnt = FIELD_GET(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, val);
+-
+-	*data = enable && cnt ? cnt : DOWNSHIFT_DEV_DISABLE;
+-
+-	return 0;
+-}
+-
+-static int aqr107_set_downshift(struct phy_device *phydev, u8 cnt)
+-{
+-	int val = 0;
+-
+-	if (!FIELD_FIT(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, cnt))
+-		return -E2BIG;
+-
+-	if (cnt != DOWNSHIFT_DEV_DISABLE) {
+-		val = MDIO_AN_VEND_PROV_DOWNSHIFT_EN;
+-		val |= FIELD_PREP(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, cnt);
+-	}
+-
+-	return phy_modify_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
+-			      MDIO_AN_VEND_PROV_DOWNSHIFT_EN |
+-			      MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, val);
+-}
+-
+-static int aqr107_get_tunable(struct phy_device *phydev,
+-			      struct ethtool_tunable *tuna, void *data)
+-{
+-	switch (tuna->id) {
+-	case ETHTOOL_PHY_DOWNSHIFT:
+-		return aqr107_get_downshift(phydev, data);
+-	default:
+-		return -EOPNOTSUPP;
+-	}
+-}
+-
+-static int aqr107_set_tunable(struct phy_device *phydev,
+-			      struct ethtool_tunable *tuna, const void *data)
+-{
+-	switch (tuna->id) {
+-	case ETHTOOL_PHY_DOWNSHIFT:
+-		return aqr107_set_downshift(phydev, *(const u8 *)data);
+-	default:
+-		return -EOPNOTSUPP;
+-	}
+-}
+-
+-/* If we configure settings whilst firmware is still initializing the chip,
+- * then these settings may be overwritten. Therefore make sure chip
+- * initialization has completed. Use presence of the firmware ID as
+- * indicator for initialization having completed.
+- * The chip also provides a "reset completed" bit, but it's cleared after
+- * read. Therefore function would time out if called again.
+- */
+-static int aqr107_wait_reset_complete(struct phy_device *phydev)
+-{
+-	int val;
+-
+-	return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+-					 VEND1_GLOBAL_FW_ID, val, val != 0,
+-					 20000, 2000000, false);
+-}
+-
+-static void aqr107_chip_info(struct phy_device *phydev)
+-{
+-	u8 fw_major, fw_minor, build_id, prov_id;
+-	int val;
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
+-	if (val < 0)
+-		return;
+-
+-	fw_major = FIELD_GET(VEND1_GLOBAL_FW_ID_MAJOR, val);
+-	fw_minor = FIELD_GET(VEND1_GLOBAL_FW_ID_MINOR, val);
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT1);
+-	if (val < 0)
+-		return;
+-
+-	build_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID, val);
+-	prov_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_PROV_ID, val);
+-
+-	phydev_dbg(phydev, "FW %u.%u, Build %u, Provisioning %u\n",
+-		   fw_major, fw_minor, build_id, prov_id);
+-}
+-
+-static int aqr107_config_init(struct phy_device *phydev)
+-{
+-	int ret;
+-
+-	/* Check that the PHY interface type is compatible */
+-	if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+-	    phydev->interface != PHY_INTERFACE_MODE_1000BASEKX &&
+-	    phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
+-	    phydev->interface != PHY_INTERFACE_MODE_XGMII &&
+-	    phydev->interface != PHY_INTERFACE_MODE_USXGMII &&
+-	    phydev->interface != PHY_INTERFACE_MODE_10GKR &&
+-	    phydev->interface != PHY_INTERFACE_MODE_10GBASER &&
+-	    phydev->interface != PHY_INTERFACE_MODE_XAUI &&
+-	    phydev->interface != PHY_INTERFACE_MODE_RXAUI)
+-		return -ENODEV;
+-
+-	WARN(phydev->interface == PHY_INTERFACE_MODE_XGMII,
+-	     "Your devicetree is out of date, please update it. The AQR107 family doesn't support XGMII, maybe you mean USXGMII.\n");
+-
+-	ret = aqr107_wait_reset_complete(phydev);
+-	if (!ret)
+-		aqr107_chip_info(phydev);
+-
+-	return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+-}
+-
+-static int aqcs109_config_init(struct phy_device *phydev)
+-{
+-	int ret;
+-
+-	/* Check that the PHY interface type is compatible */
+-	if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+-	    phydev->interface != PHY_INTERFACE_MODE_2500BASEX)
+-		return -ENODEV;
+-
+-	ret = aqr107_wait_reset_complete(phydev);
+-	if (!ret)
+-		aqr107_chip_info(phydev);
+-
+-	/* AQCS109 belongs to a chip family partially supporting 10G and 5G.
+-	 * PMA speed ability bits are the same for all members of the family,
+-	 * AQCS109 however supports speeds up to 2.5G only.
+-	 */
+-	phy_set_max_speed(phydev, SPEED_2500);
+-
+-	return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+-}
+-
+-static void aqr107_link_change_notify(struct phy_device *phydev)
+-{
+-	u8 fw_major, fw_minor;
+-	bool downshift, short_reach, afr;
+-	int mode, val;
+-
+-	if (phydev->state != PHY_RUNNING || phydev->autoneg == AUTONEG_DISABLE)
+-		return;
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT1);
+-	/* call failed or link partner is no Aquantia PHY */
+-	if (val < 0 || !(val & MDIO_AN_RX_LP_STAT1_AQ_PHY))
+-		return;
+-
+-	short_reach = val & MDIO_AN_RX_LP_STAT1_SHORT_REACH;
+-	downshift = val & MDIO_AN_RX_LP_STAT1_AQRATE_DOWNSHIFT;
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT4);
+-	if (val < 0)
+-		return;
+-
+-	fw_major = FIELD_GET(MDIO_AN_RX_LP_STAT4_FW_MAJOR, val);
+-	fw_minor = FIELD_GET(MDIO_AN_RX_LP_STAT4_FW_MINOR, val);
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_VEND_STAT3);
+-	if (val < 0)
+-		return;
+-
+-	afr = val & MDIO_AN_RX_VEND_STAT3_AFR;
+-
+-	phydev_dbg(phydev, "Link partner is Aquantia PHY, FW %u.%u%s%s%s\n",
+-		   fw_major, fw_minor,
+-		   short_reach ? ", short reach mode" : "",
+-		   downshift ? ", fast-retrain downshift advertised" : "",
+-		   afr ? ", fast reframe advertised" : "");
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT9);
+-	if (val < 0)
+-		return;
+-
+-	mode = FIELD_GET(VEND1_GLOBAL_RSVD_STAT9_MODE, val);
+-	if (mode == VEND1_GLOBAL_RSVD_STAT9_1000BT2)
+-		phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
+-}
+-
+-static int aqr107_wait_processor_intensive_op(struct phy_device *phydev)
+-{
+-	int val, err;
+-
+-	/* The datasheet notes to wait at least 1ms after issuing a
+-	 * processor intensive operation before checking.
+-	 * We cannot use the 'sleep_before_read' parameter of read_poll_timeout
+-	 * because that just determines the maximum time slept, not the minimum.
+-	 */
+-	usleep_range(1000, 5000);
+-
+-	err = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+-					VEND1_GLOBAL_GEN_STAT2, val,
+-					!(val & VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG),
+-					AQR107_OP_IN_PROG_SLEEP,
+-					AQR107_OP_IN_PROG_TIMEOUT, false);
+-	if (err) {
+-		phydev_err(phydev, "timeout: processor-intensive MDIO operation\n");
+-		return err;
+-	}
+-
+-	return 0;
+-}
+-
+-static int aqr107_get_rate_matching(struct phy_device *phydev,
+-				    phy_interface_t iface)
+-{
+-	if (iface == PHY_INTERFACE_MODE_10GBASER ||
+-	    iface == PHY_INTERFACE_MODE_2500BASEX ||
+-	    iface == PHY_INTERFACE_MODE_NA)
+-		return RATE_MATCH_PAUSE;
+-	return RATE_MATCH_NONE;
+-}
+-
+-static int aqr107_suspend(struct phy_device *phydev)
+-{
+-	int err;
+-
+-	err = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+-			       MDIO_CTRL1_LPOWER);
+-	if (err)
+-		return err;
+-
+-	return aqr107_wait_processor_intensive_op(phydev);
+-}
+-
+-static int aqr107_resume(struct phy_device *phydev)
+-{
+-	int err;
+-
+-	err = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+-				 MDIO_CTRL1_LPOWER);
+-	if (err)
+-		return err;
+-
+-	return aqr107_wait_processor_intensive_op(phydev);
+-}
+-
+-static int aqr107_probe(struct phy_device *phydev)
+-{
+-	phydev->priv = devm_kzalloc(&phydev->mdio.dev,
+-				    sizeof(struct aqr107_priv), GFP_KERNEL);
+-	if (!phydev->priv)
+-		return -ENOMEM;
+-
+-	return aqr_hwmon_probe(phydev);
+-}
+-
+-static struct phy_driver aqr_driver[] = {
+-{
+-	PHY_ID_MATCH_MODEL(PHY_ID_AQ1202),
+-	.name		= "Aquantia AQ1202",
+-	.config_aneg    = aqr_config_aneg,
+-	.config_intr	= aqr_config_intr,
+-	.handle_interrupt = aqr_handle_interrupt,
+-	.read_status	= aqr_read_status,
+-},
+-{
+-	PHY_ID_MATCH_MODEL(PHY_ID_AQ2104),
+-	.name		= "Aquantia AQ2104",
+-	.config_aneg    = aqr_config_aneg,
+-	.config_intr	= aqr_config_intr,
+-	.handle_interrupt = aqr_handle_interrupt,
+-	.read_status	= aqr_read_status,
+-},
+-{
+-	PHY_ID_MATCH_MODEL(PHY_ID_AQR105),
+-	.name		= "Aquantia AQR105",
+-	.config_aneg    = aqr_config_aneg,
+-	.config_intr	= aqr_config_intr,
+-	.handle_interrupt = aqr_handle_interrupt,
+-	.read_status	= aqr_read_status,
+-	.suspend	= aqr107_suspend,
+-	.resume		= aqr107_resume,
+-},
+-{
+-	PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
+-	.name		= "Aquantia AQR106",
+-	.config_aneg    = aqr_config_aneg,
+-	.config_intr	= aqr_config_intr,
+-	.handle_interrupt = aqr_handle_interrupt,
+-	.read_status	= aqr_read_status,
+-},
+-{
+-	PHY_ID_MATCH_MODEL(PHY_ID_AQR107),
+-	.name		= "Aquantia AQR107",
+-	.probe		= aqr107_probe,
+-	.get_rate_matching = aqr107_get_rate_matching,
+-	.config_init	= aqr107_config_init,
+-	.config_aneg    = aqr_config_aneg,
+-	.config_intr	= aqr_config_intr,
+-	.handle_interrupt = aqr_handle_interrupt,
+-	.read_status	= aqr107_read_status,
+-	.get_tunable    = aqr107_get_tunable,
+-	.set_tunable    = aqr107_set_tunable,
+-	.suspend	= aqr107_suspend,
+-	.resume		= aqr107_resume,
+-	.get_sset_count	= aqr107_get_sset_count,
+-	.get_strings	= aqr107_get_strings,
+-	.get_stats	= aqr107_get_stats,
+-	.link_change_notify = aqr107_link_change_notify,
+-},
+-{
+-	PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
+-	.name		= "Aquantia AQCS109",
+-	.probe		= aqr107_probe,
+-	.get_rate_matching = aqr107_get_rate_matching,
+-	.config_init	= aqcs109_config_init,
+-	.config_aneg    = aqr_config_aneg,
+-	.config_intr	= aqr_config_intr,
+-	.handle_interrupt = aqr_handle_interrupt,
+-	.read_status	= aqr107_read_status,
+-	.get_tunable    = aqr107_get_tunable,
+-	.set_tunable    = aqr107_set_tunable,
+-	.suspend	= aqr107_suspend,
+-	.resume		= aqr107_resume,
+-	.get_sset_count	= aqr107_get_sset_count,
+-	.get_strings	= aqr107_get_strings,
+-	.get_stats	= aqr107_get_stats,
+-	.link_change_notify = aqr107_link_change_notify,
+-},
+-{
+-	PHY_ID_MATCH_MODEL(PHY_ID_AQR405),
+-	.name		= "Aquantia AQR405",
+-	.config_aneg    = aqr_config_aneg,
+-	.config_intr	= aqr_config_intr,
+-	.handle_interrupt = aqr_handle_interrupt,
+-	.read_status	= aqr_read_status,
+-},
+-{
+-	PHY_ID_MATCH_MODEL(PHY_ID_AQR112),
+-	.name		= "Aquantia AQR112",
+-	.probe		= aqr107_probe,
+-	.config_aneg    = aqr_config_aneg,
+-	.config_intr	= aqr_config_intr,
+-	.handle_interrupt = aqr_handle_interrupt,
+-	.get_tunable    = aqr107_get_tunable,
+-	.set_tunable    = aqr107_set_tunable,
+-	.suspend	= aqr107_suspend,
+-	.resume		= aqr107_resume,
+-	.read_status	= aqr107_read_status,
+-	.get_rate_matching = aqr107_get_rate_matching,
+-	.get_sset_count = aqr107_get_sset_count,
+-	.get_strings	= aqr107_get_strings,
+-	.get_stats	= aqr107_get_stats,
+-	.link_change_notify = aqr107_link_change_notify,
+-},
+-{
+-	PHY_ID_MATCH_MODEL(PHY_ID_AQR412),
+-	.name		= "Aquantia AQR412",
+-	.probe		= aqr107_probe,
+-	.config_aneg    = aqr_config_aneg,
+-	.config_intr	= aqr_config_intr,
+-	.handle_interrupt = aqr_handle_interrupt,
+-	.get_tunable    = aqr107_get_tunable,
+-	.set_tunable    = aqr107_set_tunable,
+-	.suspend	= aqr107_suspend,
+-	.resume		= aqr107_resume,
+-	.read_status	= aqr107_read_status,
+-	.get_rate_matching = aqr107_get_rate_matching,
+-	.get_sset_count = aqr107_get_sset_count,
+-	.get_strings	= aqr107_get_strings,
+-	.get_stats	= aqr107_get_stats,
+-	.link_change_notify = aqr107_link_change_notify,
+-},
+-{
+-	PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
+-	.name           = "Aquantia AQR113C",
+-	.probe          = aqr107_probe,
+-	.get_rate_matching = aqr107_get_rate_matching,
+-	.config_init    = aqr107_config_init,
+-	.config_aneg    = aqr_config_aneg,
+-	.config_intr    = aqr_config_intr,
+-	.handle_interrupt       = aqr_handle_interrupt,
+-	.read_status    = aqr107_read_status,
+-	.get_tunable    = aqr107_get_tunable,
+-	.set_tunable    = aqr107_set_tunable,
+-	.suspend        = aqr107_suspend,
+-	.resume         = aqr107_resume,
+-	.get_sset_count = aqr107_get_sset_count,
+-	.get_strings    = aqr107_get_strings,
+-	.get_stats      = aqr107_get_stats,
+-	.link_change_notify = aqr107_link_change_notify,
+-},
+-};
+-
+-module_phy_driver(aqr_driver);
+-
+-static struct mdio_device_id __maybe_unused aqr_tbl[] = {
+-	{ PHY_ID_MATCH_MODEL(PHY_ID_AQ1202) },
+-	{ PHY_ID_MATCH_MODEL(PHY_ID_AQ2104) },
+-	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR105) },
+-	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR106) },
+-	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR107) },
+-	{ PHY_ID_MATCH_MODEL(PHY_ID_AQCS109) },
+-	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR405) },
+-	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR112) },
+-	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR412) },
+-	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) },
+-	{ }
+-};
+-
+-MODULE_DEVICE_TABLE(mdio, aqr_tbl);
+-
+-MODULE_DESCRIPTION("Aquantia PHY driver");
+-MODULE_AUTHOR("Shaohui Xie <[email protected]>");
+-MODULE_LICENSE("GPL v2");

+ 183 - 0
target/linux/generic/backport-6.6/702-02-v6.7-net-phy-aquantia-move-MMD_VEND-define-to-header.patch

@@ -0,0 +1,183 @@
+From e1fbfa4a995d42e02e22b0dff2f8b4fdee1504b3 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Tue, 14 Nov 2023 15:08:42 +0100
+Subject: [PATCH 2/3] net: phy: aquantia: move MMD_VEND define to header
+
+Move MMD_VEND define to header to clean things up and in preparation for
+firmware loading support that require some define placed in
+aquantia_main.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/aquantia/aquantia.h       | 69 +++++++++++++++++++++++
+ drivers/net/phy/aquantia/aquantia_hwmon.c | 14 -----
+ drivers/net/phy/aquantia/aquantia_main.c  | 55 ------------------
+ 3 files changed, 69 insertions(+), 69 deletions(-)
+
+--- a/drivers/net/phy/aquantia/aquantia.h
++++ b/drivers/net/phy/aquantia/aquantia.h
+@@ -9,6 +9,75 @@
+ #include <linux/device.h>
+ #include <linux/phy.h>
+ 
++/* Vendor specific 1, MDIO_MMD_VEND1 */
++#define VEND1_GLOBAL_FW_ID			0x0020
++#define VEND1_GLOBAL_FW_ID_MAJOR		GENMASK(15, 8)
++#define VEND1_GLOBAL_FW_ID_MINOR		GENMASK(7, 0)
++
++/* The following registers all have similar layouts; first the registers... */
++#define VEND1_GLOBAL_CFG_10M			0x0310
++#define VEND1_GLOBAL_CFG_100M			0x031b
++#define VEND1_GLOBAL_CFG_1G			0x031c
++#define VEND1_GLOBAL_CFG_2_5G			0x031d
++#define VEND1_GLOBAL_CFG_5G			0x031e
++#define VEND1_GLOBAL_CFG_10G			0x031f
++/* ...and now the fields */
++#define VEND1_GLOBAL_CFG_RATE_ADAPT		GENMASK(8, 7)
++#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE	0
++#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX		1
++#define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE	2
++
++/* Vendor specific 1, MDIO_MMD_VEND2 */
++#define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL	0xc421
++#define VEND1_THERMAL_PROV_LOW_TEMP_FAIL	0xc422
++#define VEND1_THERMAL_PROV_HIGH_TEMP_WARN	0xc423
++#define VEND1_THERMAL_PROV_LOW_TEMP_WARN	0xc424
++#define VEND1_THERMAL_STAT1			0xc820
++#define VEND1_THERMAL_STAT2			0xc821
++#define VEND1_THERMAL_STAT2_VALID		BIT(0)
++#define VEND1_GENERAL_STAT1			0xc830
++#define VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL	BIT(14)
++#define VEND1_GENERAL_STAT1_LOW_TEMP_FAIL	BIT(13)
++#define VEND1_GENERAL_STAT1_HIGH_TEMP_WARN	BIT(12)
++#define VEND1_GENERAL_STAT1_LOW_TEMP_WARN	BIT(11)
++
++#define VEND1_GLOBAL_GEN_STAT2			0xc831
++#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG	BIT(15)
++
++#define VEND1_GLOBAL_RSVD_STAT1			0xc885
++#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID	GENMASK(7, 4)
++#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID		GENMASK(3, 0)
++
++#define VEND1_GLOBAL_RSVD_STAT9			0xc88d
++#define VEND1_GLOBAL_RSVD_STAT9_MODE		GENMASK(7, 0)
++#define VEND1_GLOBAL_RSVD_STAT9_1000BT2		0x23
++
++#define VEND1_GLOBAL_INT_STD_STATUS		0xfc00
++#define VEND1_GLOBAL_INT_VEND_STATUS		0xfc01
++
++#define VEND1_GLOBAL_INT_STD_MASK		0xff00
++#define VEND1_GLOBAL_INT_STD_MASK_PMA1		BIT(15)
++#define VEND1_GLOBAL_INT_STD_MASK_PMA2		BIT(14)
++#define VEND1_GLOBAL_INT_STD_MASK_PCS1		BIT(13)
++#define VEND1_GLOBAL_INT_STD_MASK_PCS2		BIT(12)
++#define VEND1_GLOBAL_INT_STD_MASK_PCS3		BIT(11)
++#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS1	BIT(10)
++#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS2	BIT(9)
++#define VEND1_GLOBAL_INT_STD_MASK_AN1		BIT(8)
++#define VEND1_GLOBAL_INT_STD_MASK_AN2		BIT(7)
++#define VEND1_GLOBAL_INT_STD_MASK_GBE		BIT(6)
++#define VEND1_GLOBAL_INT_STD_MASK_ALL		BIT(0)
++
++#define VEND1_GLOBAL_INT_VEND_MASK		0xff01
++#define VEND1_GLOBAL_INT_VEND_MASK_PMA		BIT(15)
++#define VEND1_GLOBAL_INT_VEND_MASK_PCS		BIT(14)
++#define VEND1_GLOBAL_INT_VEND_MASK_PHY_XS	BIT(13)
++#define VEND1_GLOBAL_INT_VEND_MASK_AN		BIT(12)
++#define VEND1_GLOBAL_INT_VEND_MASK_GBE		BIT(11)
++#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL1	BIT(2)
++#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2	BIT(1)
++#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3	BIT(0)
++
+ #if IS_REACHABLE(CONFIG_HWMON)
+ int aqr_hwmon_probe(struct phy_device *phydev);
+ #else
+--- a/drivers/net/phy/aquantia/aquantia_hwmon.c
++++ b/drivers/net/phy/aquantia/aquantia_hwmon.c
+@@ -13,20 +13,6 @@
+ 
+ #include "aquantia.h"
+ 
+-/* Vendor specific 1, MDIO_MMD_VEND2 */
+-#define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL	0xc421
+-#define VEND1_THERMAL_PROV_LOW_TEMP_FAIL	0xc422
+-#define VEND1_THERMAL_PROV_HIGH_TEMP_WARN	0xc423
+-#define VEND1_THERMAL_PROV_LOW_TEMP_WARN	0xc424
+-#define VEND1_THERMAL_STAT1			0xc820
+-#define VEND1_THERMAL_STAT2			0xc821
+-#define VEND1_THERMAL_STAT2_VALID		BIT(0)
+-#define VEND1_GENERAL_STAT1			0xc830
+-#define VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL	BIT(14)
+-#define VEND1_GENERAL_STAT1_LOW_TEMP_FAIL	BIT(13)
+-#define VEND1_GENERAL_STAT1_HIGH_TEMP_WARN	BIT(12)
+-#define VEND1_GENERAL_STAT1_LOW_TEMP_WARN	BIT(11)
+-
+ #if IS_REACHABLE(CONFIG_HWMON)
+ 
+ static umode_t aqr_hwmon_is_visible(const void *data,
+--- a/drivers/net/phy/aquantia/aquantia_main.c
++++ b/drivers/net/phy/aquantia/aquantia_main.c
+@@ -91,61 +91,6 @@
+ #define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR	0xd31a
+ #define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES		0xd31b
+ 
+-/* Vendor specific 1, MDIO_MMD_VEND1 */
+-#define VEND1_GLOBAL_FW_ID			0x0020
+-#define VEND1_GLOBAL_FW_ID_MAJOR		GENMASK(15, 8)
+-#define VEND1_GLOBAL_FW_ID_MINOR		GENMASK(7, 0)
+-
+-#define VEND1_GLOBAL_GEN_STAT2			0xc831
+-#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG	BIT(15)
+-
+-/* The following registers all have similar layouts; first the registers... */
+-#define VEND1_GLOBAL_CFG_10M			0x0310
+-#define VEND1_GLOBAL_CFG_100M			0x031b
+-#define VEND1_GLOBAL_CFG_1G			0x031c
+-#define VEND1_GLOBAL_CFG_2_5G			0x031d
+-#define VEND1_GLOBAL_CFG_5G			0x031e
+-#define VEND1_GLOBAL_CFG_10G			0x031f
+-/* ...and now the fields */
+-#define VEND1_GLOBAL_CFG_RATE_ADAPT		GENMASK(8, 7)
+-#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE	0
+-#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX		1
+-#define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE	2
+-
+-#define VEND1_GLOBAL_RSVD_STAT1			0xc885
+-#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID	GENMASK(7, 4)
+-#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID		GENMASK(3, 0)
+-
+-#define VEND1_GLOBAL_RSVD_STAT9			0xc88d
+-#define VEND1_GLOBAL_RSVD_STAT9_MODE		GENMASK(7, 0)
+-#define VEND1_GLOBAL_RSVD_STAT9_1000BT2		0x23
+-
+-#define VEND1_GLOBAL_INT_STD_STATUS		0xfc00
+-#define VEND1_GLOBAL_INT_VEND_STATUS		0xfc01
+-
+-#define VEND1_GLOBAL_INT_STD_MASK		0xff00
+-#define VEND1_GLOBAL_INT_STD_MASK_PMA1		BIT(15)
+-#define VEND1_GLOBAL_INT_STD_MASK_PMA2		BIT(14)
+-#define VEND1_GLOBAL_INT_STD_MASK_PCS1		BIT(13)
+-#define VEND1_GLOBAL_INT_STD_MASK_PCS2		BIT(12)
+-#define VEND1_GLOBAL_INT_STD_MASK_PCS3		BIT(11)
+-#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS1	BIT(10)
+-#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS2	BIT(9)
+-#define VEND1_GLOBAL_INT_STD_MASK_AN1		BIT(8)
+-#define VEND1_GLOBAL_INT_STD_MASK_AN2		BIT(7)
+-#define VEND1_GLOBAL_INT_STD_MASK_GBE		BIT(6)
+-#define VEND1_GLOBAL_INT_STD_MASK_ALL		BIT(0)
+-
+-#define VEND1_GLOBAL_INT_VEND_MASK		0xff01
+-#define VEND1_GLOBAL_INT_VEND_MASK_PMA		BIT(15)
+-#define VEND1_GLOBAL_INT_VEND_MASK_PCS		BIT(14)
+-#define VEND1_GLOBAL_INT_VEND_MASK_PHY_XS	BIT(13)
+-#define VEND1_GLOBAL_INT_VEND_MASK_AN		BIT(12)
+-#define VEND1_GLOBAL_INT_VEND_MASK_GBE		BIT(11)
+-#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL1	BIT(2)
+-#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2	BIT(1)
+-#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3	BIT(0)
+-
+ /* Sleep and timeout for checking if the Processor-Intensive
+  * MDIO operation is finished
+  */

+ 504 - 0
target/linux/generic/backport-6.6/702-03-v6.7-net-phy-aquantia-add-firmware-load-support.patch

@@ -0,0 +1,504 @@
+From e93984ebc1c82bd34f7a1b3391efaceee0a8ae96 Mon Sep 17 00:00:00 2001
+From: Robert Marko <[email protected]>
+Date: Tue, 14 Nov 2023 15:08:43 +0100
+Subject: [PATCH 3/3] net: phy: aquantia: add firmware load support
+
+Aquantia PHY-s require firmware to be loaded before they start operating.
+It can be automatically loaded in case when there is a SPI-NOR connected
+to Aquantia PHY-s or can be loaded from the host via MDIO.
+
+This patch adds support for loading the firmware via MDIO as in most cases
+there is no SPI-NOR being used to save on cost.
+Firmware loading code itself is ported from mainline U-boot with cleanups.
+
+The firmware has mixed values both in big and little endian.
+PHY core itself is big-endian but it expects values to be in little-endian.
+The firmware is little-endian but CRC-16 value for it is stored at the end
+of firmware in big-endian.
+
+It seems the PHY does the conversion internally from firmware that is
+little-endian to the PHY that is big-endian on using the mailbox
+but mailbox returns a big-endian CRC-16 to verify the written data
+integrity.
+
+Co-developed-by: Christian Marangi <[email protected]>
+Signed-off-by: Robert Marko <[email protected]>
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/aquantia/Kconfig             |   1 +
+ drivers/net/phy/aquantia/Makefile            |   2 +-
+ drivers/net/phy/aquantia/aquantia.h          |  32 ++
+ drivers/net/phy/aquantia/aquantia_firmware.c | 370 +++++++++++++++++++
+ drivers/net/phy/aquantia/aquantia_main.c     |   6 +
+ 5 files changed, 410 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/net/phy/aquantia/aquantia_firmware.c
+
+--- a/drivers/net/phy/aquantia/Kconfig
++++ b/drivers/net/phy/aquantia/Kconfig
+@@ -1,5 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ config AQUANTIA_PHY
+ 	tristate "Aquantia PHYs"
++	select CRC_CCITT
+ 	help
+ 	  Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
+--- a/drivers/net/phy/aquantia/Makefile
++++ b/drivers/net/phy/aquantia/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-aquantia-objs			+= aquantia_main.o
++aquantia-objs			+= aquantia_main.o aquantia_firmware.o
+ ifdef CONFIG_HWMON
+ aquantia-objs			+= aquantia_hwmon.o
+ endif
+--- a/drivers/net/phy/aquantia/aquantia.h
++++ b/drivers/net/phy/aquantia/aquantia.h
+@@ -10,10 +10,35 @@
+ #include <linux/phy.h>
+ 
+ /* Vendor specific 1, MDIO_MMD_VEND1 */
++#define VEND1_GLOBAL_SC				0x0
++#define VEND1_GLOBAL_SC_SOFT_RESET		BIT(15)
++#define VEND1_GLOBAL_SC_LOW_POWER		BIT(11)
++
+ #define VEND1_GLOBAL_FW_ID			0x0020
+ #define VEND1_GLOBAL_FW_ID_MAJOR		GENMASK(15, 8)
+ #define VEND1_GLOBAL_FW_ID_MINOR		GENMASK(7, 0)
+ 
++#define VEND1_GLOBAL_MAILBOX_INTERFACE1			0x0200
++#define VEND1_GLOBAL_MAILBOX_INTERFACE1_EXECUTE		BIT(15)
++#define VEND1_GLOBAL_MAILBOX_INTERFACE1_WRITE		BIT(14)
++#define VEND1_GLOBAL_MAILBOX_INTERFACE1_CRC_RESET	BIT(12)
++#define VEND1_GLOBAL_MAILBOX_INTERFACE1_BUSY		BIT(8)
++
++#define VEND1_GLOBAL_MAILBOX_INTERFACE2			0x0201
++#define VEND1_GLOBAL_MAILBOX_INTERFACE3			0x0202
++#define VEND1_GLOBAL_MAILBOX_INTERFACE3_MSW_ADDR_MASK	GENMASK(15, 0)
++#define VEND1_GLOBAL_MAILBOX_INTERFACE3_MSW_ADDR(x)	FIELD_PREP(VEND1_GLOBAL_MAILBOX_INTERFACE3_MSW_ADDR_MASK, (u16)((x) >> 16))
++#define VEND1_GLOBAL_MAILBOX_INTERFACE4			0x0203
++#define VEND1_GLOBAL_MAILBOX_INTERFACE4_LSW_ADDR_MASK	GENMASK(15, 2)
++#define VEND1_GLOBAL_MAILBOX_INTERFACE4_LSW_ADDR(x)	FIELD_PREP(VEND1_GLOBAL_MAILBOX_INTERFACE4_LSW_ADDR_MASK, (u16)(x))
++
++#define VEND1_GLOBAL_MAILBOX_INTERFACE5			0x0204
++#define VEND1_GLOBAL_MAILBOX_INTERFACE5_MSW_DATA_MASK	GENMASK(15, 0)
++#define VEND1_GLOBAL_MAILBOX_INTERFACE5_MSW_DATA(x)	FIELD_PREP(VEND1_GLOBAL_MAILBOX_INTERFACE5_MSW_DATA_MASK, (u16)((x) >> 16))
++#define VEND1_GLOBAL_MAILBOX_INTERFACE6			0x0205
++#define VEND1_GLOBAL_MAILBOX_INTERFACE6_LSW_DATA_MASK	GENMASK(15, 0)
++#define VEND1_GLOBAL_MAILBOX_INTERFACE6_LSW_DATA(x)	FIELD_PREP(VEND1_GLOBAL_MAILBOX_INTERFACE6_LSW_DATA_MASK, (u16)(x))
++
+ /* The following registers all have similar layouts; first the registers... */
+ #define VEND1_GLOBAL_CFG_10M			0x0310
+ #define VEND1_GLOBAL_CFG_100M			0x031b
+@@ -28,6 +53,11 @@
+ #define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE	2
+ 
+ /* Vendor specific 1, MDIO_MMD_VEND2 */
++#define VEND1_GLOBAL_CONTROL2			0xc001
++#define VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_RST	BIT(15)
++#define VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD	BIT(6)
++#define VEND1_GLOBAL_CONTROL2_UP_RUN_STALL	BIT(0)
++
+ #define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL	0xc421
+ #define VEND1_THERMAL_PROV_LOW_TEMP_FAIL	0xc422
+ #define VEND1_THERMAL_PROV_HIGH_TEMP_WARN	0xc423
+@@ -83,3 +113,5 @@ int aqr_hwmon_probe(struct phy_device *p
+ #else
+ static inline int aqr_hwmon_probe(struct phy_device *phydev) { return 0; }
+ #endif
++
++int aqr_firmware_load(struct phy_device *phydev);
+--- /dev/null
++++ b/drivers/net/phy/aquantia/aquantia_firmware.c
+@@ -0,0 +1,370 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/bitfield.h>
++#include <linux/of.h>
++#include <linux/firmware.h>
++#include <linux/crc-ccitt.h>
++#include <linux/nvmem-consumer.h>
++
++#include <asm/unaligned.h>
++
++#include "aquantia.h"
++
++#define UP_RESET_SLEEP		100
++
++/* addresses of memory segments in the phy */
++#define DRAM_BASE_ADDR		0x3FFE0000
++#define IRAM_BASE_ADDR		0x40000000
++
++/* firmware image format constants */
++#define VERSION_STRING_SIZE		0x40
++#define VERSION_STRING_OFFSET		0x0200
++/* primary offset is written at an offset from the start of the fw blob */
++#define PRIMARY_OFFSET_OFFSET		0x8
++/* primary offset needs to be then added to a base offset */
++#define PRIMARY_OFFSET_SHIFT		12
++#define PRIMARY_OFFSET(x)		((x) << PRIMARY_OFFSET_SHIFT)
++#define HEADER_OFFSET			0x300
++
++struct aqr_fw_header {
++	u32 padding;
++	u8 iram_offset[3];
++	u8 iram_size[3];
++	u8 dram_offset[3];
++	u8 dram_size[3];
++} __packed;
++
++enum aqr_fw_src {
++	AQR_FW_SRC_NVMEM = 0,
++	AQR_FW_SRC_FS,
++};
++
++static const char * const aqr_fw_src_string[] = {
++	[AQR_FW_SRC_NVMEM] = "NVMEM",
++	[AQR_FW_SRC_FS] = "FS",
++};
++
++/* AQR firmware doesn't have fixed offsets for iram and dram section
++ * but instead provide an header with the offset to use on reading
++ * and parsing the firmware.
++ *
++ * AQR firmware can't be trusted and each offset is validated to be
++ * not negative and be in the size of the firmware itself.
++ */
++static bool aqr_fw_validate_get(size_t size, size_t offset, size_t get_size)
++{
++	return offset + get_size <= size;
++}
++
++static int aqr_fw_get_be16(const u8 *data, size_t offset, size_t size, u16 *value)
++{
++	if (!aqr_fw_validate_get(size, offset, sizeof(u16)))
++		return -EINVAL;
++
++	*value = get_unaligned_be16(data + offset);
++
++	return 0;
++}
++
++static int aqr_fw_get_le16(const u8 *data, size_t offset, size_t size, u16 *value)
++{
++	if (!aqr_fw_validate_get(size, offset, sizeof(u16)))
++		return -EINVAL;
++
++	*value = get_unaligned_le16(data + offset);
++
++	return 0;
++}
++
++static int aqr_fw_get_le24(const u8 *data, size_t offset, size_t size, u32 *value)
++{
++	if (!aqr_fw_validate_get(size, offset, sizeof(u8) * 3))
++		return -EINVAL;
++
++	*value = get_unaligned_le24(data + offset);
++
++	return 0;
++}
++
++/* load data into the phy's memory */
++static int aqr_fw_load_memory(struct phy_device *phydev, u32 addr,
++			      const u8 *data, size_t len)
++{
++	u16 crc = 0, up_crc;
++	size_t pos;
++
++	/* PHY expect addr in LE */
++	addr = (__force u32)cpu_to_le32(addr);
++
++	phy_write_mmd(phydev, MDIO_MMD_VEND1,
++		      VEND1_GLOBAL_MAILBOX_INTERFACE1,
++		      VEND1_GLOBAL_MAILBOX_INTERFACE1_CRC_RESET);
++	phy_write_mmd(phydev, MDIO_MMD_VEND1,
++		      VEND1_GLOBAL_MAILBOX_INTERFACE3,
++		      VEND1_GLOBAL_MAILBOX_INTERFACE3_MSW_ADDR(addr));
++	phy_write_mmd(phydev, MDIO_MMD_VEND1,
++		      VEND1_GLOBAL_MAILBOX_INTERFACE4,
++		      VEND1_GLOBAL_MAILBOX_INTERFACE4_LSW_ADDR(addr));
++
++	/* We assume and enforce the size to be word aligned.
++	 * If a firmware that is not word aligned is found, please report upstream.
++	 */
++	for (pos = 0; pos < len; pos += sizeof(u32)) {
++		u32 word;
++
++		/* FW data is always stored in little-endian */
++		word = get_unaligned((const u32 *)(data + pos));
++
++		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_MAILBOX_INTERFACE5,
++			      VEND1_GLOBAL_MAILBOX_INTERFACE5_MSW_DATA(word));
++		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_MAILBOX_INTERFACE6,
++			      VEND1_GLOBAL_MAILBOX_INTERFACE6_LSW_DATA(word));
++
++		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_MAILBOX_INTERFACE1,
++			      VEND1_GLOBAL_MAILBOX_INTERFACE1_EXECUTE |
++			      VEND1_GLOBAL_MAILBOX_INTERFACE1_WRITE);
++
++		/* calculate CRC as we load data to the mailbox.
++		 * We convert word to big-endian as PHY is BE and mailbox will
++		 * return a BE CRC.
++		 */
++		word = (__force u32)cpu_to_be32(word);
++		crc = crc_ccitt_false(crc, (u8 *)&word, sizeof(word));
++	}
++
++	up_crc = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_MAILBOX_INTERFACE2);
++	if (crc != up_crc) {
++		phydev_err(phydev, "CRC mismatch: calculated 0x%04x PHY 0x%04x\n",
++			   crc, up_crc);
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++static int aqr_fw_boot(struct phy_device *phydev, const u8 *data, size_t size,
++		       enum aqr_fw_src fw_src)
++{
++	u16 calculated_crc, read_crc, read_primary_offset;
++	u32 iram_offset = 0, iram_size = 0;
++	u32 dram_offset = 0, dram_size = 0;
++	char version[VERSION_STRING_SIZE];
++	u32 primary_offset = 0;
++	int ret;
++
++	/* extract saved CRC at the end of the fw
++	 * CRC is saved in big-endian as PHY is BE
++	 */
++	ret = aqr_fw_get_be16(data, size - sizeof(u16), size, &read_crc);
++	if (ret) {
++		phydev_err(phydev, "bad firmware CRC in firmware\n");
++		return ret;
++	}
++	calculated_crc = crc_ccitt_false(0, data, size - sizeof(u16));
++	if (read_crc != calculated_crc) {
++		phydev_err(phydev, "bad firmware CRC: file 0x%04x calculated 0x%04x\n",
++			   read_crc, calculated_crc);
++		return -EINVAL;
++	}
++
++	/* Get the primary offset to extract DRAM and IRAM sections. */
++	ret = aqr_fw_get_le16(data, PRIMARY_OFFSET_OFFSET, size, &read_primary_offset);
++	if (ret) {
++		phydev_err(phydev, "bad primary offset in firmware\n");
++		return ret;
++	}
++	primary_offset = PRIMARY_OFFSET(read_primary_offset);
++
++	/* Find the DRAM and IRAM sections within the firmware file.
++	 * Make sure the fw_header is correctly in the firmware.
++	 */
++	if (!aqr_fw_validate_get(size, primary_offset + HEADER_OFFSET,
++				 sizeof(struct aqr_fw_header))) {
++		phydev_err(phydev, "bad fw_header in firmware\n");
++		return -EINVAL;
++	}
++
++	/* offset are in LE and values needs to be converted to cpu endian */
++	ret = aqr_fw_get_le24(data, primary_offset + HEADER_OFFSET +
++			      offsetof(struct aqr_fw_header, iram_offset),
++			      size, &iram_offset);
++	if (ret) {
++		phydev_err(phydev, "bad iram offset in firmware\n");
++		return ret;
++	}
++	ret = aqr_fw_get_le24(data, primary_offset + HEADER_OFFSET +
++			      offsetof(struct aqr_fw_header, iram_size),
++			      size, &iram_size);
++	if (ret) {
++		phydev_err(phydev, "invalid iram size in firmware\n");
++		return ret;
++	}
++	ret = aqr_fw_get_le24(data, primary_offset + HEADER_OFFSET +
++			      offsetof(struct aqr_fw_header, dram_offset),
++			      size, &dram_offset);
++	if (ret) {
++		phydev_err(phydev, "bad dram offset in firmware\n");
++		return ret;
++	}
++	ret = aqr_fw_get_le24(data, primary_offset + HEADER_OFFSET +
++			      offsetof(struct aqr_fw_header, dram_size),
++			      size, &dram_size);
++	if (ret) {
++		phydev_err(phydev, "invalid dram size in firmware\n");
++		return ret;
++	}
++
++	/* Increment the offset with the primary offset.
++	 * Validate iram/dram offset and size.
++	 */
++	iram_offset += primary_offset;
++	if (iram_size % sizeof(u32)) {
++		phydev_err(phydev, "iram size if not aligned to word size. Please report this upstream!\n");
++		return -EINVAL;
++	}
++	if (!aqr_fw_validate_get(size, iram_offset, iram_size)) {
++		phydev_err(phydev, "invalid iram offset for iram size\n");
++		return -EINVAL;
++	}
++
++	dram_offset += primary_offset;
++	if (dram_size % sizeof(u32)) {
++		phydev_err(phydev, "dram size if not aligned to word size. Please report this upstream!\n");
++		return -EINVAL;
++	}
++	if (!aqr_fw_validate_get(size, dram_offset, dram_size)) {
++		phydev_err(phydev, "invalid iram offset for iram size\n");
++		return -EINVAL;
++	}
++
++	phydev_dbg(phydev, "primary %d IRAM offset=%d size=%d DRAM offset=%d size=%d\n",
++		   primary_offset, iram_offset, iram_size, dram_offset, dram_size);
++
++	if (!aqr_fw_validate_get(size, dram_offset + VERSION_STRING_OFFSET,
++				 VERSION_STRING_SIZE)) {
++		phydev_err(phydev, "invalid version in firmware\n");
++		return -EINVAL;
++	}
++	strscpy(version, (char *)data + dram_offset + VERSION_STRING_OFFSET,
++		VERSION_STRING_SIZE);
++	if (version[0] == '\0') {
++		phydev_err(phydev, "invalid version in firmware\n");
++		return -EINVAL;
++	}
++	phydev_info(phydev, "loading firmware version '%s' from '%s'\n", version,
++		    aqr_fw_src_string[fw_src]);
++
++	/* stall the microcprocessor */
++	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_CONTROL2,
++		      VEND1_GLOBAL_CONTROL2_UP_RUN_STALL | VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD);
++
++	phydev_dbg(phydev, "loading DRAM 0x%08x from offset=%d size=%d\n",
++		   DRAM_BASE_ADDR, dram_offset, dram_size);
++	ret = aqr_fw_load_memory(phydev, DRAM_BASE_ADDR, data + dram_offset,
++				 dram_size);
++	if (ret)
++		return ret;
++
++	phydev_dbg(phydev, "loading IRAM 0x%08x from offset=%d size=%d\n",
++		   IRAM_BASE_ADDR, iram_offset, iram_size);
++	ret = aqr_fw_load_memory(phydev, IRAM_BASE_ADDR, data + iram_offset,
++				 iram_size);
++	if (ret)
++		return ret;
++
++	/* make sure soft reset and low power mode are clear */
++	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_SC,
++			   VEND1_GLOBAL_SC_SOFT_RESET | VEND1_GLOBAL_SC_LOW_POWER);
++
++	/* Release the microprocessor. UP_RESET must be held for 100 usec. */
++	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_CONTROL2,
++		      VEND1_GLOBAL_CONTROL2_UP_RUN_STALL |
++		      VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD |
++		      VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_RST);
++	usleep_range(UP_RESET_SLEEP, UP_RESET_SLEEP * 2);
++
++	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_CONTROL2,
++		      VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD);
++
++	return 0;
++}
++
++static int aqr_firmware_load_nvmem(struct phy_device *phydev)
++{
++	struct nvmem_cell *cell;
++	size_t size;
++	u8 *buf;
++	int ret;
++
++	cell = nvmem_cell_get(&phydev->mdio.dev, "firmware");
++	if (IS_ERR(cell))
++		return PTR_ERR(cell);
++
++	buf = nvmem_cell_read(cell, &size);
++	if (IS_ERR(buf)) {
++		ret = PTR_ERR(buf);
++		goto exit;
++	}
++
++	ret = aqr_fw_boot(phydev, buf, size, AQR_FW_SRC_NVMEM);
++	if (ret)
++		phydev_err(phydev, "firmware loading failed: %d\n", ret);
++
++	kfree(buf);
++exit:
++	nvmem_cell_put(cell);
++
++	return ret;
++}
++
++static int aqr_firmware_load_fs(struct phy_device *phydev)
++{
++	struct device *dev = &phydev->mdio.dev;
++	const struct firmware *fw;
++	const char *fw_name;
++	int ret;
++
++	ret = of_property_read_string(dev->of_node, "firmware-name",
++				      &fw_name);
++	if (ret)
++		return ret;
++
++	ret = request_firmware(&fw, fw_name, dev);
++	if (ret) {
++		phydev_err(phydev, "failed to find FW file %s (%d)\n",
++			   fw_name, ret);
++		return ret;
++	}
++
++	ret = aqr_fw_boot(phydev, fw->data, fw->size, AQR_FW_SRC_FS);
++	if (ret)
++		phydev_err(phydev, "firmware loading failed: %d\n", ret);
++
++	release_firmware(fw);
++
++	return ret;
++}
++
++int aqr_firmware_load(struct phy_device *phydev)
++{
++	int ret;
++
++	/* Check if the firmware is not already loaded by pooling
++	 * the current version returned by the PHY. If 0 is returned,
++	 * no firmware is loaded.
++	 */
++	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
++	if (ret > 0)
++		goto exit;
++
++	ret = aqr_firmware_load_nvmem(phydev);
++	if (!ret)
++		goto exit;
++
++	ret = aqr_firmware_load_fs(phydev);
++	if (ret)
++		return ret;
++
++exit:
++	return 0;
++}
+--- a/drivers/net/phy/aquantia/aquantia_main.c
++++ b/drivers/net/phy/aquantia/aquantia_main.c
+@@ -658,11 +658,17 @@ static int aqr107_resume(struct phy_devi
+ 
+ static int aqr107_probe(struct phy_device *phydev)
+ {
++	int ret;
++
+ 	phydev->priv = devm_kzalloc(&phydev->mdio.dev,
+ 				    sizeof(struct aqr107_priv), GFP_KERNEL);
+ 	if (!phydev->priv)
+ 		return -ENOMEM;
+ 
++	ret = aqr_firmware_load(phydev);
++	if (ret)
++		return ret;
++
+ 	return aqr_hwmon_probe(phydev);
+ }
+ 

+ 69 - 0
target/linux/generic/backport-6.6/707-v6.8-02-net-phy-at803x-move-disable-WOL-to-specific-at8031-p.patch

@@ -0,0 +1,69 @@
+From 6a3b8c573b5a152a6aa7a0b54c5e18b84c6ba6f5 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Fri, 8 Dec 2023 15:51:49 +0100
+Subject: [PATCH 02/13] net: phy: at803x: move disable WOL to specific at8031
+ probe
+
+Move the WOL disable call to specific at8031 probe to make at803x_probe
+more generic and drop extra check for PHY ID.
+
+Keep the same previous behaviour by first calling at803x_probe and then
+disabling WOL.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 27 +++++++++++++++++----------
+ 1 file changed, 17 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -886,15 +886,6 @@ static int at803x_probe(struct phy_devic
+ 			priv->is_fiber = true;
+ 			break;
+ 		}
+-
+-		/* Disable WoL in 1588 register which is enabled
+-		 * by default
+-		 */
+-		ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+-				     AT803X_PHY_MMD3_WOL_CTRL,
+-				     AT803X_WOL_EN, 0);
+-		if (ret)
+-			return ret;
+ 	}
+ 
+ 	return 0;
+@@ -1591,6 +1582,22 @@ static int at803x_cable_test_start(struc
+ 	return 0;
+ }
+ 
++static int at8031_probe(struct phy_device *phydev)
++{
++	int ret;
++
++	ret = at803x_probe(phydev);
++	if (ret)
++		return ret;
++
++	/* Disable WoL in 1588 register which is enabled
++	 * by default
++	 */
++	return phy_modify_mmd(phydev, MDIO_MMD_PCS,
++			      AT803X_PHY_MMD3_WOL_CTRL,
++			      AT803X_WOL_EN, 0);
++}
++
+ static int qca83xx_config_init(struct phy_device *phydev)
+ {
+ 	u8 switch_revision;
+@@ -2092,7 +2099,7 @@ static struct phy_driver at803x_driver[]
+ 	PHY_ID_MATCH_EXACT(ATH8031_PHY_ID),
+ 	.name			= "Qualcomm Atheros AR8031/AR8033",
+ 	.flags			= PHY_POLL_CABLE_TEST,
+-	.probe			= at803x_probe,
++	.probe			= at8031_probe,
+ 	.config_init		= at803x_config_init,
+ 	.config_aneg		= at803x_config_aneg,
+ 	.soft_reset		= genphy_soft_reset,

+ 129 - 0
target/linux/generic/backport-6.6/707-v6.8-03-net-phy-at803x-raname-hw_stats-functions-to-qca83xx-.patch

@@ -0,0 +1,129 @@
+From 07b1ad83b9ed6db1735ba10baf67b7a565ac0cef Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Fri, 8 Dec 2023 15:51:50 +0100
+Subject: [PATCH 03/13] net: phy: at803x: raname hw_stats functions to qca83xx
+ specific name
+
+The function and the struct related to hw_stats were specific to qca83xx
+PHY but were called following the convention in the driver of calling
+everything with at803x prefix.
+
+To better organize the code, rename these function a more specific name
+to better describe that they are specific to 83xx PHY family.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 44 ++++++++++++++++++++--------------------
+ 1 file changed, 22 insertions(+), 22 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -295,7 +295,7 @@ struct at803x_hw_stat {
+ 	enum stat_access_type access_type;
+ };
+ 
+-static struct at803x_hw_stat at803x_hw_stats[] = {
++static struct at803x_hw_stat qca83xx_hw_stats[] = {
+ 	{ "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
+ 	{ "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
+ 	{ "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
+@@ -311,7 +311,7 @@ struct at803x_priv {
+ 	bool is_1000basex;
+ 	struct regulator_dev *vddio_rdev;
+ 	struct regulator_dev *vddh_rdev;
+-	u64 stats[ARRAY_SIZE(at803x_hw_stats)];
++	u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
+ };
+ 
+ struct at803x_context {
+@@ -529,24 +529,24 @@ static void at803x_get_wol(struct phy_de
+ 		wol->wolopts |= WAKE_MAGIC;
+ }
+ 
+-static int at803x_get_sset_count(struct phy_device *phydev)
++static int qca83xx_get_sset_count(struct phy_device *phydev)
+ {
+-	return ARRAY_SIZE(at803x_hw_stats);
++	return ARRAY_SIZE(qca83xx_hw_stats);
+ }
+ 
+-static void at803x_get_strings(struct phy_device *phydev, u8 *data)
++static void qca83xx_get_strings(struct phy_device *phydev, u8 *data)
+ {
+ 	int i;
+ 
+-	for (i = 0; i < ARRAY_SIZE(at803x_hw_stats); i++) {
++	for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++) {
+ 		strscpy(data + i * ETH_GSTRING_LEN,
+-			at803x_hw_stats[i].string, ETH_GSTRING_LEN);
++			qca83xx_hw_stats[i].string, ETH_GSTRING_LEN);
+ 	}
+ }
+ 
+-static u64 at803x_get_stat(struct phy_device *phydev, int i)
++static u64 qca83xx_get_stat(struct phy_device *phydev, int i)
+ {
+-	struct at803x_hw_stat stat = at803x_hw_stats[i];
++	struct at803x_hw_stat stat = qca83xx_hw_stats[i];
+ 	struct at803x_priv *priv = phydev->priv;
+ 	int val;
+ 	u64 ret;
+@@ -567,13 +567,13 @@ static u64 at803x_get_stat(struct phy_de
+ 	return ret;
+ }
+ 
+-static void at803x_get_stats(struct phy_device *phydev,
+-			     struct ethtool_stats *stats, u64 *data)
++static void qca83xx_get_stats(struct phy_device *phydev,
++			      struct ethtool_stats *stats, u64 *data)
+ {
+ 	int i;
+ 
+-	for (i = 0; i < ARRAY_SIZE(at803x_hw_stats); i++)
+-		data[i] = at803x_get_stat(phydev, i);
++	for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++)
++		data[i] = qca83xx_get_stat(phydev, i);
+ }
+ 
+ static int at803x_suspend(struct phy_device *phydev)
+@@ -2175,9 +2175,9 @@ static struct phy_driver at803x_driver[]
+ 	.flags			= PHY_IS_INTERNAL,
+ 	.config_init		= qca83xx_config_init,
+ 	.soft_reset		= genphy_soft_reset,
+-	.get_sset_count		= at803x_get_sset_count,
+-	.get_strings		= at803x_get_strings,
+-	.get_stats		= at803x_get_stats,
++	.get_sset_count		= qca83xx_get_sset_count,
++	.get_strings		= qca83xx_get_strings,
++	.get_stats		= qca83xx_get_stats,
+ 	.suspend		= qca83xx_suspend,
+ 	.resume			= qca83xx_resume,
+ }, {
+@@ -2191,9 +2191,9 @@ static struct phy_driver at803x_driver[]
+ 	.flags			= PHY_IS_INTERNAL,
+ 	.config_init		= qca83xx_config_init,
+ 	.soft_reset		= genphy_soft_reset,
+-	.get_sset_count		= at803x_get_sset_count,
+-	.get_strings		= at803x_get_strings,
+-	.get_stats		= at803x_get_stats,
++	.get_sset_count		= qca83xx_get_sset_count,
++	.get_strings		= qca83xx_get_strings,
++	.get_stats		= qca83xx_get_stats,
+ 	.suspend		= qca83xx_suspend,
+ 	.resume			= qca83xx_resume,
+ }, {
+@@ -2207,9 +2207,9 @@ static struct phy_driver at803x_driver[]
+ 	.flags			= PHY_IS_INTERNAL,
+ 	.config_init		= qca83xx_config_init,
+ 	.soft_reset		= genphy_soft_reset,
+-	.get_sset_count		= at803x_get_sset_count,
+-	.get_strings		= at803x_get_strings,
+-	.get_stats		= at803x_get_stats,
++	.get_sset_count		= qca83xx_get_sset_count,
++	.get_strings		= qca83xx_get_strings,
++	.get_stats		= qca83xx_get_stats,
+ 	.suspend		= qca83xx_suspend,
+ 	.resume			= qca83xx_resume,
+ }, {

+ 155 - 0
target/linux/generic/backport-6.6/707-v6.8-04-net-phy-at803x-move-qca83xx-specific-check-in-dedica.patch

@@ -0,0 +1,155 @@
+From d43cff3f82336c0bd965ea552232d9f4ddac71a6 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Fri, 8 Dec 2023 15:51:51 +0100
+Subject: [PATCH 04/13] net: phy: at803x: move qca83xx specific check in
+ dedicated functions
+
+Rework qca83xx specific check to dedicated function to tidy things up
+and drop useless phy_id check.
+
+Also drop an useless link_change_notify for QCA8337 as it did nothing an
+returned early.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 68 ++++++++++++++++++++++------------------
+ 1 file changed, 37 insertions(+), 31 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -1623,27 +1623,26 @@ static int qca83xx_config_init(struct ph
+ 		break;
+ 	}
+ 
++	/* Following original QCA sourcecode set port to prefer master */
++	phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
++
++	return 0;
++}
++
++static int qca8327_config_init(struct phy_device *phydev)
++{
+ 	/* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
+ 	 * Disable on init and enable only with 100m speed following
+ 	 * qca original source code.
+ 	 */
+-	if (phydev->drv->phy_id == QCA8327_A_PHY_ID ||
+-	    phydev->drv->phy_id == QCA8327_B_PHY_ID)
+-		at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+-				      QCA8327_DEBUG_MANU_CTRL_EN, 0);
++	at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
++			      QCA8327_DEBUG_MANU_CTRL_EN, 0);
+ 
+-	/* Following original QCA sourcecode set port to prefer master */
+-	phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
+-
+-	return 0;
++	return qca83xx_config_init(phydev);
+ }
+ 
+ static void qca83xx_link_change_notify(struct phy_device *phydev)
+ {
+-	/* QCA8337 doesn't require DAC Amplitude adjustement */
+-	if (phydev->drv->phy_id == QCA8337_PHY_ID)
+-		return;
+-
+ 	/* Set DAC Amplitude adjustment to +6% for 100m on link running */
+ 	if (phydev->state == PHY_RUNNING) {
+ 		if (phydev->speed == SPEED_100)
+@@ -1686,19 +1685,6 @@ static int qca83xx_resume(struct phy_dev
+ 
+ static int qca83xx_suspend(struct phy_device *phydev)
+ {
+-	u16 mask = 0;
+-
+-	/* Only QCA8337 support actual suspend.
+-	 * QCA8327 cause port unreliability when phy suspend
+-	 * is set.
+-	 */
+-	if (phydev->drv->phy_id == QCA8337_PHY_ID) {
+-		genphy_suspend(phydev);
+-	} else {
+-		mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
+-		phy_modify(phydev, MII_BMCR, mask, 0);
+-	}
+-
+ 	at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
+ 			      AT803X_DEBUG_GATE_CLK_IN1000, 0);
+ 
+@@ -1709,6 +1695,27 @@ static int qca83xx_suspend(struct phy_de
+ 	return 0;
+ }
+ 
++static int qca8337_suspend(struct phy_device *phydev)
++{
++	/* Only QCA8337 support actual suspend. */
++	genphy_suspend(phydev);
++
++	return qca83xx_suspend(phydev);
++}
++
++static int qca8327_suspend(struct phy_device *phydev)
++{
++	u16 mask = 0;
++
++	/* QCA8327 cause port unreliability when phy suspend
++	 * is set.
++	 */
++	mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
++	phy_modify(phydev, MII_BMCR, mask, 0);
++
++	return qca83xx_suspend(phydev);
++}
++
+ static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
+ {
+ 	int ret;
+@@ -2170,7 +2177,6 @@ static struct phy_driver at803x_driver[]
+ 	.phy_id_mask		= QCA8K_PHY_ID_MASK,
+ 	.name			= "Qualcomm Atheros 8337 internal PHY",
+ 	/* PHY_GBIT_FEATURES */
+-	.link_change_notify	= qca83xx_link_change_notify,
+ 	.probe			= at803x_probe,
+ 	.flags			= PHY_IS_INTERNAL,
+ 	.config_init		= qca83xx_config_init,
+@@ -2178,7 +2184,7 @@ static struct phy_driver at803x_driver[]
+ 	.get_sset_count		= qca83xx_get_sset_count,
+ 	.get_strings		= qca83xx_get_strings,
+ 	.get_stats		= qca83xx_get_stats,
+-	.suspend		= qca83xx_suspend,
++	.suspend		= qca8337_suspend,
+ 	.resume			= qca83xx_resume,
+ }, {
+ 	/* QCA8327-A from switch QCA8327-AL1A */
+@@ -2189,12 +2195,12 @@ static struct phy_driver at803x_driver[]
+ 	.link_change_notify	= qca83xx_link_change_notify,
+ 	.probe			= at803x_probe,
+ 	.flags			= PHY_IS_INTERNAL,
+-	.config_init		= qca83xx_config_init,
++	.config_init		= qca8327_config_init,
+ 	.soft_reset		= genphy_soft_reset,
+ 	.get_sset_count		= qca83xx_get_sset_count,
+ 	.get_strings		= qca83xx_get_strings,
+ 	.get_stats		= qca83xx_get_stats,
+-	.suspend		= qca83xx_suspend,
++	.suspend		= qca8327_suspend,
+ 	.resume			= qca83xx_resume,
+ }, {
+ 	/* QCA8327-B from switch QCA8327-BL1A */
+@@ -2205,12 +2211,12 @@ static struct phy_driver at803x_driver[]
+ 	.link_change_notify	= qca83xx_link_change_notify,
+ 	.probe			= at803x_probe,
+ 	.flags			= PHY_IS_INTERNAL,
+-	.config_init		= qca83xx_config_init,
++	.config_init		= qca8327_config_init,
+ 	.soft_reset		= genphy_soft_reset,
+ 	.get_sset_count		= qca83xx_get_sset_count,
+ 	.get_strings		= qca83xx_get_strings,
+ 	.get_stats		= qca83xx_get_stats,
+-	.suspend		= qca83xx_suspend,
++	.suspend		= qca8327_suspend,
+ 	.resume			= qca83xx_resume,
+ }, {
+ 	/* Qualcomm QCA8081 */

+ 94 - 0
target/linux/generic/backport-6.6/707-v6.8-05-net-phy-at803x-move-specific-DT-option-for-at8031-to.patch

@@ -0,0 +1,94 @@
+From 900eef75cc5018e149c52fe305c9c3fe424c52a7 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Fri, 8 Dec 2023 15:51:52 +0100
+Subject: [PATCH 05/13] net: phy: at803x: move specific DT option for at8031 to
+ specific probe
+
+Move specific DT options for at8031 to specific probe to tidy things up
+and make at803x_parse_dt more generic.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 55 ++++++++++++++++++++++------------------
+ 1 file changed, 31 insertions(+), 24 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -825,30 +825,6 @@ static int at803x_parse_dt(struct phy_de
+ 		}
+ 	}
+ 
+-	/* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
+-	 * options.
+-	 */
+-	if (phydev->drv->phy_id == ATH8031_PHY_ID) {
+-		if (of_property_read_bool(node, "qca,keep-pll-enabled"))
+-			priv->flags |= AT803X_KEEP_PLL_ENABLED;
+-
+-		ret = at8031_register_regulators(phydev);
+-		if (ret < 0)
+-			return ret;
+-
+-		ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
+-							 "vddio");
+-		if (ret) {
+-			phydev_err(phydev, "failed to get VDDIO regulator\n");
+-			return ret;
+-		}
+-
+-		/* Only AR8031/8033 support 1000Base-X for SFP modules */
+-		ret = phy_sfp_probe(phydev, &at803x_sfp_ops);
+-		if (ret < 0)
+-			return ret;
+-	}
+-
+ 	return 0;
+ }
+ 
+@@ -1582,6 +1558,30 @@ static int at803x_cable_test_start(struc
+ 	return 0;
+ }
+ 
++static int at8031_parse_dt(struct phy_device *phydev)
++{
++	struct device_node *node = phydev->mdio.dev.of_node;
++	struct at803x_priv *priv = phydev->priv;
++	int ret;
++
++	if (of_property_read_bool(node, "qca,keep-pll-enabled"))
++		priv->flags |= AT803X_KEEP_PLL_ENABLED;
++
++	ret = at8031_register_regulators(phydev);
++	if (ret < 0)
++		return ret;
++
++	ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
++						 "vddio");
++	if (ret) {
++		phydev_err(phydev, "failed to get VDDIO regulator\n");
++		return ret;
++	}
++
++	/* Only AR8031/8033 support 1000Base-X for SFP modules */
++	return phy_sfp_probe(phydev, &at803x_sfp_ops);
++}
++
+ static int at8031_probe(struct phy_device *phydev)
+ {
+ 	int ret;
+@@ -1590,6 +1590,13 @@ static int at8031_probe(struct phy_devic
+ 	if (ret)
+ 		return ret;
+ 
++	/* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
++	 * options.
++	 */
++	ret = at8031_parse_dt(phydev);
++	if (ret)
++		return ret;
++
+ 	/* Disable WoL in 1588 register which is enabled
+ 	 * by default
+ 	 */

+ 78 - 0
target/linux/generic/backport-6.6/707-v6.8-06-net-phy-at803x-move-specific-at8031-probe-mode-check.patch

@@ -0,0 +1,78 @@
+From 25d2ba94005fac18fe68878cddff59a67e115554 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Fri, 8 Dec 2023 15:51:53 +0100
+Subject: [PATCH 06/13] net: phy: at803x: move specific at8031 probe mode check
+ to dedicated probe
+
+Move specific at8031 probe mode check to dedicated probe to make
+at803x_probe more generic and keep code tidy.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 39 +++++++++++++++++++--------------------
+ 1 file changed, 19 insertions(+), 20 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -844,26 +844,6 @@ static int at803x_probe(struct phy_devic
+ 	if (ret)
+ 		return ret;
+ 
+-	if (phydev->drv->phy_id == ATH8031_PHY_ID) {
+-		int ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+-		int mode_cfg;
+-
+-		if (ccr < 0)
+-			return ccr;
+-		mode_cfg = ccr & AT803X_MODE_CFG_MASK;
+-
+-		switch (mode_cfg) {
+-		case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
+-		case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
+-			priv->is_1000basex = true;
+-			fallthrough;
+-		case AT803X_MODE_CFG_FX100_RGMII_50OHM:
+-		case AT803X_MODE_CFG_FX100_RGMII_75OHM:
+-			priv->is_fiber = true;
+-			break;
+-		}
+-	}
+-
+ 	return 0;
+ }
+ 
+@@ -1584,6 +1564,9 @@ static int at8031_parse_dt(struct phy_de
+ 
+ static int at8031_probe(struct phy_device *phydev)
+ {
++	struct at803x_priv *priv = phydev->priv;
++	int mode_cfg;
++	int ccr;
+ 	int ret;
+ 
+ 	ret = at803x_probe(phydev);
+@@ -1597,6 +1580,22 @@ static int at8031_probe(struct phy_devic
+ 	if (ret)
+ 		return ret;
+ 
++	ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
++	if (ccr < 0)
++		return ccr;
++	mode_cfg = ccr & AT803X_MODE_CFG_MASK;
++
++	switch (mode_cfg) {
++	case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
++	case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
++		priv->is_1000basex = true;
++		fallthrough;
++	case AT803X_MODE_CFG_FX100_RGMII_50OHM:
++	case AT803X_MODE_CFG_FX100_RGMII_75OHM:
++		priv->is_fiber = true;
++		break;
++	}
++
+ 	/* Disable WoL in 1588 register which is enabled
+ 	 * by default
+ 	 */

+ 86 - 0
target/linux/generic/backport-6.6/707-v6.8-07-net-phy-at803x-move-specific-at8031-config_init-to-d.patch

@@ -0,0 +1,86 @@
+From 3ae3bc426eaf57ca8f53d75777d9a5ef779bc7b7 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Fri, 8 Dec 2023 15:51:54 +0100
+Subject: [PATCH 07/13] net: phy: at803x: move specific at8031 config_init to
+ dedicated function
+
+Move specific at8031 config_init to dedicated function to make
+at803x_config_init more generic and tidy things up.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 45 ++++++++++++++++++++++------------------
+ 1 file changed, 25 insertions(+), 20 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -951,27 +951,8 @@ static int at803x_hibernation_mode_confi
+ 
+ static int at803x_config_init(struct phy_device *phydev)
+ {
+-	struct at803x_priv *priv = phydev->priv;
+ 	int ret;
+ 
+-	if (phydev->drv->phy_id == ATH8031_PHY_ID) {
+-		/* Some bootloaders leave the fiber page selected.
+-		 * Switch to the appropriate page (fiber or copper), as otherwise we
+-		 * read the PHY capabilities from the wrong page.
+-		 */
+-		phy_lock_mdio_bus(phydev);
+-		ret = at803x_write_page(phydev,
+-					priv->is_fiber ? AT803X_PAGE_FIBER :
+-							 AT803X_PAGE_COPPER);
+-		phy_unlock_mdio_bus(phydev);
+-		if (ret)
+-			return ret;
+-
+-		ret = at8031_pll_config(phydev);
+-		if (ret < 0)
+-			return ret;
+-	}
+-
+ 	/* The RX and TX delay default is:
+ 	 *   after HW reset: RX delay enabled and TX delay disabled
+ 	 *   after SW reset: RX delay enabled, while TX delay retains the
+@@ -1604,6 +1585,30 @@ static int at8031_probe(struct phy_devic
+ 			      AT803X_WOL_EN, 0);
+ }
+ 
++static int at8031_config_init(struct phy_device *phydev)
++{
++	struct at803x_priv *priv = phydev->priv;
++	int ret;
++
++	/* Some bootloaders leave the fiber page selected.
++	 * Switch to the appropriate page (fiber or copper), as otherwise we
++	 * read the PHY capabilities from the wrong page.
++	 */
++	phy_lock_mdio_bus(phydev);
++	ret = at803x_write_page(phydev,
++				priv->is_fiber ? AT803X_PAGE_FIBER :
++						 AT803X_PAGE_COPPER);
++	phy_unlock_mdio_bus(phydev);
++	if (ret)
++		return ret;
++
++	ret = at8031_pll_config(phydev);
++	if (ret < 0)
++		return ret;
++
++	return at803x_config_init(phydev);
++}
++
+ static int qca83xx_config_init(struct phy_device *phydev)
+ {
+ 	u8 switch_revision;
+@@ -2113,7 +2118,7 @@ static struct phy_driver at803x_driver[]
+ 	.name			= "Qualcomm Atheros AR8031/AR8033",
+ 	.flags			= PHY_POLL_CABLE_TEST,
+ 	.probe			= at8031_probe,
+-	.config_init		= at803x_config_init,
++	.config_init		= at8031_config_init,
+ 	.config_aneg		= at803x_config_aneg,
+ 	.soft_reset		= genphy_soft_reset,
+ 	.set_wol		= at803x_set_wol,

+ 92 - 0
target/linux/generic/backport-6.6/707-v6.8-08-net-phy-at803x-move-specific-at8031-WOL-bits-to-dedi.patch

@@ -0,0 +1,92 @@
+From 27b89c9dc1b0393090d68d651b82f30ad2696baa Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Fri, 8 Dec 2023 15:51:55 +0100
+Subject: [PATCH 08/13] net: phy: at803x: move specific at8031 WOL bits to
+ dedicated function
+
+Move specific at8031 WOL enable/disable to dedicated function to make
+at803x_set_wol more generic.
+
+This is needed in preparation for PHY driver split as qca8081 share the
+same function to toggle WOL settings.
+
+In this new implementation WOL module in at8031 is enabled after the
+generic interrupt is setup. This should not cause any problem as the
+WOL_INT has a separate implementation and only relay on MAC bits.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 42 ++++++++++++++++++++++++----------------
+ 1 file changed, 25 insertions(+), 17 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -466,27 +466,11 @@ static int at803x_set_wol(struct phy_dev
+ 			phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
+ 				      mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
+ 
+-		/* Enable WOL function for 1588 */
+-		if (phydev->drv->phy_id == ATH8031_PHY_ID) {
+-			ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+-					     AT803X_PHY_MMD3_WOL_CTRL,
+-					     0, AT803X_WOL_EN);
+-			if (ret)
+-				return ret;
+-		}
+ 		/* Enable WOL interrupt */
+ 		ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
+ 		if (ret)
+ 			return ret;
+ 	} else {
+-		/* Disable WoL function for 1588 */
+-		if (phydev->drv->phy_id == ATH8031_PHY_ID) {
+-			ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+-					     AT803X_PHY_MMD3_WOL_CTRL,
+-					     AT803X_WOL_EN, 0);
+-			if (ret)
+-				return ret;
+-		}
+ 		/* Disable WOL interrupt */
+ 		ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
+ 		if (ret)
+@@ -1609,6 +1593,30 @@ static int at8031_config_init(struct phy
+ 	return at803x_config_init(phydev);
+ }
+ 
++static int at8031_set_wol(struct phy_device *phydev,
++			  struct ethtool_wolinfo *wol)
++{
++	int ret;
++
++	/* First setup MAC address and enable WOL interrupt */
++	ret = at803x_set_wol(phydev, wol);
++	if (ret)
++		return ret;
++
++	if (wol->wolopts & WAKE_MAGIC)
++		/* Enable WOL function for 1588 */
++		ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
++				     AT803X_PHY_MMD3_WOL_CTRL,
++				     0, AT803X_WOL_EN);
++	else
++		/* Disable WoL function for 1588 */
++		ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
++				     AT803X_PHY_MMD3_WOL_CTRL,
++				     AT803X_WOL_EN, 0);
++
++	return ret;
++}
++
+ static int qca83xx_config_init(struct phy_device *phydev)
+ {
+ 	u8 switch_revision;
+@@ -2121,7 +2129,7 @@ static struct phy_driver at803x_driver[]
+ 	.config_init		= at8031_config_init,
+ 	.config_aneg		= at803x_config_aneg,
+ 	.soft_reset		= genphy_soft_reset,
+-	.set_wol		= at803x_set_wol,
++	.set_wol		= at8031_set_wol,
+ 	.get_wol		= at803x_get_wol,
+ 	.suspend		= at803x_suspend,
+ 	.resume			= at803x_resume,

+ 78 - 0
target/linux/generic/backport-6.6/707-v6.8-09-net-phy-at803x-move-specific-at8031-config_intr-to-d.patch

@@ -0,0 +1,78 @@
+From 30dd62191d3dd97c08f7f9dc9ce77ffab457e4fb Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Fri, 8 Dec 2023 15:51:56 +0100
+Subject: [PATCH 09/13] net: phy: at803x: move specific at8031 config_intr to
+ dedicated function
+
+Move specific at8031 config_intr bits to dedicated function to make
+at803x_config_initr more generic.
+
+This is needed in preparation for PHY driver split as qca8081 share the
+same function to setup interrupts.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 30 ++++++++++++++++++++++++------
+ 1 file changed, 24 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -990,7 +990,6 @@ static int at803x_ack_interrupt(struct p
+ 
+ static int at803x_config_intr(struct phy_device *phydev)
+ {
+-	struct at803x_priv *priv = phydev->priv;
+ 	int err;
+ 	int value;
+ 
+@@ -1007,10 +1006,6 @@ static int at803x_config_intr(struct phy
+ 		value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
+ 		value |= AT803X_INTR_ENABLE_LINK_FAIL;
+ 		value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
+-		if (priv->is_fiber) {
+-			value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
+-			value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
+-		}
+ 
+ 		err = phy_write(phydev, AT803X_INTR_ENABLE, value);
+ 	} else {
+@@ -1617,6 +1612,29 @@ static int at8031_set_wol(struct phy_dev
+ 	return ret;
+ }
+ 
++static int at8031_config_intr(struct phy_device *phydev)
++{
++	struct at803x_priv *priv = phydev->priv;
++	int err, value = 0;
++
++	if (phydev->interrupts == PHY_INTERRUPT_ENABLED &&
++	    priv->is_fiber) {
++		/* Clear any pending interrupts */
++		err = at803x_ack_interrupt(phydev);
++		if (err)
++			return err;
++
++		value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
++		value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
++
++		err = phy_set_bits(phydev, AT803X_INTR_ENABLE, value);
++		if (err)
++			return err;
++	}
++
++	return at803x_config_intr(phydev);
++}
++
+ static int qca83xx_config_init(struct phy_device *phydev)
+ {
+ 	u8 switch_revision;
+@@ -2137,7 +2155,7 @@ static struct phy_driver at803x_driver[]
+ 	.write_page		= at803x_write_page,
+ 	.get_features		= at803x_get_features,
+ 	.read_status		= at803x_read_status,
+-	.config_intr		= at803x_config_intr,
++	.config_intr		= at8031_config_intr,
+ 	.handle_interrupt	= at803x_handle_interrupt,
+ 	.get_tunable		= at803x_get_tunable,
+ 	.set_tunable		= at803x_set_tunable,

+ 78 - 0
target/linux/generic/backport-6.6/707-v6.8-10-net-phy-at803x-make-at8031-related-DT-functions-name.patch

@@ -0,0 +1,78 @@
+From a5ab9d8e7ae0da8328ac1637a9755311508dc8ab Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Fri, 8 Dec 2023 15:51:57 +0100
+Subject: [PATCH 10/13] net: phy: at803x: make at8031 related DT functions name
+ more specific
+
+Rename at8031 related DT function name to a more specific name
+referencing they are only related to at8031 and not to the generic
+at803x PHY family.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -583,7 +583,7 @@ static int at803x_resume(struct phy_devi
+ 	return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
+ }
+ 
+-static int at803x_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
++static int at8031_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
+ 					    unsigned int selector)
+ {
+ 	struct phy_device *phydev = rdev_get_drvdata(rdev);
+@@ -596,7 +596,7 @@ static int at803x_rgmii_reg_set_voltage_
+ 					     AT803X_DEBUG_RGMII_1V8, 0);
+ }
+ 
+-static int at803x_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
++static int at8031_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
+ {
+ 	struct phy_device *phydev = rdev_get_drvdata(rdev);
+ 	int val;
+@@ -610,8 +610,8 @@ static int at803x_rgmii_reg_get_voltage_
+ 
+ static const struct regulator_ops vddio_regulator_ops = {
+ 	.list_voltage = regulator_list_voltage_table,
+-	.set_voltage_sel = at803x_rgmii_reg_set_voltage_sel,
+-	.get_voltage_sel = at803x_rgmii_reg_get_voltage_sel,
++	.set_voltage_sel = at8031_rgmii_reg_set_voltage_sel,
++	.get_voltage_sel = at8031_rgmii_reg_get_voltage_sel,
+ };
+ 
+ static const unsigned int vddio_voltage_table[] = {
+@@ -666,7 +666,7 @@ static int at8031_register_regulators(st
+ 	return 0;
+ }
+ 
+-static int at803x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
++static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+ {
+ 	struct phy_device *phydev = upstream;
+ 	__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
+@@ -710,10 +710,10 @@ static int at803x_sfp_insert(void *upstr
+ 	return 0;
+ }
+ 
+-static const struct sfp_upstream_ops at803x_sfp_ops = {
++static const struct sfp_upstream_ops at8031_sfp_ops = {
+ 	.attach = phy_sfp_attach,
+ 	.detach = phy_sfp_detach,
+-	.module_insert = at803x_sfp_insert,
++	.module_insert = at8031_sfp_insert,
+ };
+ 
+ static int at803x_parse_dt(struct phy_device *phydev)
+@@ -1519,7 +1519,7 @@ static int at8031_parse_dt(struct phy_de
+ 	}
+ 
+ 	/* Only AR8031/8033 support 1000Base-X for SFP modules */
+-	return phy_sfp_probe(phydev, &at803x_sfp_ops);
++	return phy_sfp_probe(phydev, &at8031_sfp_ops);
+ }
+ 
+ static int at8031_probe(struct phy_device *phydev)

+ 297 - 0
target/linux/generic/backport-6.6/707-v6.8-11-net-phy-at803x-move-at8031-functions-in-dedicated-se.patch

@@ -0,0 +1,297 @@
+From f932a6dc8bae0dae9645b5b1b4c65aed8a8acb2a Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Fri, 8 Dec 2023 15:51:58 +0100
+Subject: [PATCH 11/13] net: phy: at803x: move at8031 functions in dedicated
+ section
+
+Move at8031 functions in dedicated section with dedicated at8031
+parse_dt and probe.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 266 +++++++++++++++++++--------------------
+ 1 file changed, 133 insertions(+), 133 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -583,139 +583,6 @@ static int at803x_resume(struct phy_devi
+ 	return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
+ }
+ 
+-static int at8031_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
+-					    unsigned int selector)
+-{
+-	struct phy_device *phydev = rdev_get_drvdata(rdev);
+-
+-	if (selector)
+-		return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+-					     0, AT803X_DEBUG_RGMII_1V8);
+-	else
+-		return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+-					     AT803X_DEBUG_RGMII_1V8, 0);
+-}
+-
+-static int at8031_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
+-{
+-	struct phy_device *phydev = rdev_get_drvdata(rdev);
+-	int val;
+-
+-	val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
+-	if (val < 0)
+-		return val;
+-
+-	return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
+-}
+-
+-static const struct regulator_ops vddio_regulator_ops = {
+-	.list_voltage = regulator_list_voltage_table,
+-	.set_voltage_sel = at8031_rgmii_reg_set_voltage_sel,
+-	.get_voltage_sel = at8031_rgmii_reg_get_voltage_sel,
+-};
+-
+-static const unsigned int vddio_voltage_table[] = {
+-	1500000,
+-	1800000,
+-};
+-
+-static const struct regulator_desc vddio_desc = {
+-	.name = "vddio",
+-	.of_match = of_match_ptr("vddio-regulator"),
+-	.n_voltages = ARRAY_SIZE(vddio_voltage_table),
+-	.volt_table = vddio_voltage_table,
+-	.ops = &vddio_regulator_ops,
+-	.type = REGULATOR_VOLTAGE,
+-	.owner = THIS_MODULE,
+-};
+-
+-static const struct regulator_ops vddh_regulator_ops = {
+-};
+-
+-static const struct regulator_desc vddh_desc = {
+-	.name = "vddh",
+-	.of_match = of_match_ptr("vddh-regulator"),
+-	.n_voltages = 1,
+-	.fixed_uV = 2500000,
+-	.ops = &vddh_regulator_ops,
+-	.type = REGULATOR_VOLTAGE,
+-	.owner = THIS_MODULE,
+-};
+-
+-static int at8031_register_regulators(struct phy_device *phydev)
+-{
+-	struct at803x_priv *priv = phydev->priv;
+-	struct device *dev = &phydev->mdio.dev;
+-	struct regulator_config config = { };
+-
+-	config.dev = dev;
+-	config.driver_data = phydev;
+-
+-	priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
+-	if (IS_ERR(priv->vddio_rdev)) {
+-		phydev_err(phydev, "failed to register VDDIO regulator\n");
+-		return PTR_ERR(priv->vddio_rdev);
+-	}
+-
+-	priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
+-	if (IS_ERR(priv->vddh_rdev)) {
+-		phydev_err(phydev, "failed to register VDDH regulator\n");
+-		return PTR_ERR(priv->vddh_rdev);
+-	}
+-
+-	return 0;
+-}
+-
+-static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+-{
+-	struct phy_device *phydev = upstream;
+-	__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
+-	__ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+-	DECLARE_PHY_INTERFACE_MASK(interfaces);
+-	phy_interface_t iface;
+-
+-	linkmode_zero(phy_support);
+-	phylink_set(phy_support, 1000baseX_Full);
+-	phylink_set(phy_support, 1000baseT_Full);
+-	phylink_set(phy_support, Autoneg);
+-	phylink_set(phy_support, Pause);
+-	phylink_set(phy_support, Asym_Pause);
+-
+-	linkmode_zero(sfp_support);
+-	sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
+-	/* Some modules support 10G modes as well as others we support.
+-	 * Mask out non-supported modes so the correct interface is picked.
+-	 */
+-	linkmode_and(sfp_support, phy_support, sfp_support);
+-
+-	if (linkmode_empty(sfp_support)) {
+-		dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
+-		return -EINVAL;
+-	}
+-
+-	iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
+-
+-	/* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
+-	 * interface for use with SFP modules.
+-	 * However, some copper modules detected as having a preferred SGMII
+-	 * interface do default to and function in 1000Base-X mode, so just
+-	 * print a warning and allow such modules, as they may have some chance
+-	 * of working.
+-	 */
+-	if (iface == PHY_INTERFACE_MODE_SGMII)
+-		dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
+-	else if (iface != PHY_INTERFACE_MODE_1000BASEX)
+-		return -EINVAL;
+-
+-	return 0;
+-}
+-
+-static const struct sfp_upstream_ops at8031_sfp_ops = {
+-	.attach = phy_sfp_attach,
+-	.detach = phy_sfp_detach,
+-	.module_insert = at8031_sfp_insert,
+-};
+-
+ static int at803x_parse_dt(struct phy_device *phydev)
+ {
+ 	struct device_node *node = phydev->mdio.dev.of_node;
+@@ -1498,6 +1365,139 @@ static int at803x_cable_test_start(struc
+ 	return 0;
+ }
+ 
++static int at8031_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
++					    unsigned int selector)
++{
++	struct phy_device *phydev = rdev_get_drvdata(rdev);
++
++	if (selector)
++		return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
++					     0, AT803X_DEBUG_RGMII_1V8);
++	else
++		return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
++					     AT803X_DEBUG_RGMII_1V8, 0);
++}
++
++static int at8031_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
++{
++	struct phy_device *phydev = rdev_get_drvdata(rdev);
++	int val;
++
++	val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
++	if (val < 0)
++		return val;
++
++	return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
++}
++
++static const struct regulator_ops vddio_regulator_ops = {
++	.list_voltage = regulator_list_voltage_table,
++	.set_voltage_sel = at8031_rgmii_reg_set_voltage_sel,
++	.get_voltage_sel = at8031_rgmii_reg_get_voltage_sel,
++};
++
++static const unsigned int vddio_voltage_table[] = {
++	1500000,
++	1800000,
++};
++
++static const struct regulator_desc vddio_desc = {
++	.name = "vddio",
++	.of_match = of_match_ptr("vddio-regulator"),
++	.n_voltages = ARRAY_SIZE(vddio_voltage_table),
++	.volt_table = vddio_voltage_table,
++	.ops = &vddio_regulator_ops,
++	.type = REGULATOR_VOLTAGE,
++	.owner = THIS_MODULE,
++};
++
++static const struct regulator_ops vddh_regulator_ops = {
++};
++
++static const struct regulator_desc vddh_desc = {
++	.name = "vddh",
++	.of_match = of_match_ptr("vddh-regulator"),
++	.n_voltages = 1,
++	.fixed_uV = 2500000,
++	.ops = &vddh_regulator_ops,
++	.type = REGULATOR_VOLTAGE,
++	.owner = THIS_MODULE,
++};
++
++static int at8031_register_regulators(struct phy_device *phydev)
++{
++	struct at803x_priv *priv = phydev->priv;
++	struct device *dev = &phydev->mdio.dev;
++	struct regulator_config config = { };
++
++	config.dev = dev;
++	config.driver_data = phydev;
++
++	priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
++	if (IS_ERR(priv->vddio_rdev)) {
++		phydev_err(phydev, "failed to register VDDIO regulator\n");
++		return PTR_ERR(priv->vddio_rdev);
++	}
++
++	priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
++	if (IS_ERR(priv->vddh_rdev)) {
++		phydev_err(phydev, "failed to register VDDH regulator\n");
++		return PTR_ERR(priv->vddh_rdev);
++	}
++
++	return 0;
++}
++
++static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
++{
++	struct phy_device *phydev = upstream;
++	__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
++	__ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
++	DECLARE_PHY_INTERFACE_MASK(interfaces);
++	phy_interface_t iface;
++
++	linkmode_zero(phy_support);
++	phylink_set(phy_support, 1000baseX_Full);
++	phylink_set(phy_support, 1000baseT_Full);
++	phylink_set(phy_support, Autoneg);
++	phylink_set(phy_support, Pause);
++	phylink_set(phy_support, Asym_Pause);
++
++	linkmode_zero(sfp_support);
++	sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
++	/* Some modules support 10G modes as well as others we support.
++	 * Mask out non-supported modes so the correct interface is picked.
++	 */
++	linkmode_and(sfp_support, phy_support, sfp_support);
++
++	if (linkmode_empty(sfp_support)) {
++		dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
++		return -EINVAL;
++	}
++
++	iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
++
++	/* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
++	 * interface for use with SFP modules.
++	 * However, some copper modules detected as having a preferred SGMII
++	 * interface do default to and function in 1000Base-X mode, so just
++	 * print a warning and allow such modules, as they may have some chance
++	 * of working.
++	 */
++	if (iface == PHY_INTERFACE_MODE_SGMII)
++		dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
++	else if (iface != PHY_INTERFACE_MODE_1000BASEX)
++		return -EINVAL;
++
++	return 0;
++}
++
++static const struct sfp_upstream_ops at8031_sfp_ops = {
++	.attach = phy_sfp_attach,
++	.detach = phy_sfp_detach,
++	.module_insert = at8031_sfp_insert,
++};
++
+ static int at8031_parse_dt(struct phy_device *phydev)
+ {
+ 	struct device_node *node = phydev->mdio.dev.of_node;

+ 114 - 0
target/linux/generic/backport-6.6/707-v6.8-12-net-phy-at803x-move-at8035-specific-DT-parse-to-dedi.patch

@@ -0,0 +1,114 @@
+From 21a2802a8365cfa82cc02187c1f95136d85592ad Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Fri, 8 Dec 2023 15:51:59 +0100
+Subject: [PATCH 12/13] net: phy: at803x: move at8035 specific DT parse to
+ dedicated probe
+
+Move at8035 specific DT parse for clock out frequency to dedicated probe
+to make at803x probe function more generic.
+
+This is to tidy code and no behaviour change are intended.
+
+Detection logic is changed, we check if the clk 25m mask is set and if
+it's not zero, we assume the qca,clk-out-frequency property is set.
+
+The property is checked in the generic at803x_parse_dt called by
+at803x_probe.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 60 +++++++++++++++++++++++++++-------------
+ 1 file changed, 41 insertions(+), 19 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -638,23 +638,6 @@ static int at803x_parse_dt(struct phy_de
+ 
+ 		priv->clk_25m_reg |= FIELD_PREP(AT803X_CLK_OUT_MASK, sel);
+ 		priv->clk_25m_mask |= AT803X_CLK_OUT_MASK;
+-
+-		/* Fixup for the AR8030/AR8035. This chip has another mask and
+-		 * doesn't support the DSP reference. Eg. the lowest bit of the
+-		 * mask. The upper two bits select the same frequencies. Mask
+-		 * the lowest bit here.
+-		 *
+-		 * Warning:
+-		 *   There was no datasheet for the AR8030 available so this is
+-		 *   just a guess. But the AR8035 is listed as pin compatible
+-		 *   to the AR8030 so there might be a good chance it works on
+-		 *   the AR8030 too.
+-		 */
+-		if (phydev->drv->phy_id == ATH8030_PHY_ID ||
+-		    phydev->drv->phy_id == ATH8035_PHY_ID) {
+-			priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
+-			priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
+-		}
+ 	}
+ 
+ 	ret = of_property_read_u32(node, "qca,clk-out-strength", &strength);
+@@ -1635,6 +1618,45 @@ static int at8031_config_intr(struct phy
+ 	return at803x_config_intr(phydev);
+ }
+ 
++static int at8035_parse_dt(struct phy_device *phydev)
++{
++	struct at803x_priv *priv = phydev->priv;
++
++	/* Mask is set by the generic at803x_parse_dt
++	 * if property is set. Assume property is set
++	 * with the mask not zero.
++	 */
++	if (priv->clk_25m_mask) {
++		/* Fixup for the AR8030/AR8035. This chip has another mask and
++		 * doesn't support the DSP reference. Eg. the lowest bit of the
++		 * mask. The upper two bits select the same frequencies. Mask
++		 * the lowest bit here.
++		 *
++		 * Warning:
++		 *   There was no datasheet for the AR8030 available so this is
++		 *   just a guess. But the AR8035 is listed as pin compatible
++		 *   to the AR8030 so there might be a good chance it works on
++		 *   the AR8030 too.
++		 */
++		priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
++		priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
++	}
++
++	return 0;
++}
++
++/* AR8030 and AR8035 shared the same special mask for clk_25m */
++static int at8035_probe(struct phy_device *phydev)
++{
++	int ret;
++
++	ret = at803x_probe(phydev);
++	if (ret)
++		return ret;
++
++	return at8035_parse_dt(phydev);
++}
++
+ static int qca83xx_config_init(struct phy_device *phydev)
+ {
+ 	u8 switch_revision;
+@@ -2107,7 +2129,7 @@ static struct phy_driver at803x_driver[]
+ 	PHY_ID_MATCH_EXACT(ATH8035_PHY_ID),
+ 	.name			= "Qualcomm Atheros AR8035",
+ 	.flags			= PHY_POLL_CABLE_TEST,
+-	.probe			= at803x_probe,
++	.probe			= at8035_probe,
+ 	.config_aneg		= at803x_config_aneg,
+ 	.config_init		= at803x_config_init,
+ 	.soft_reset		= genphy_soft_reset,
+@@ -2128,7 +2150,7 @@ static struct phy_driver at803x_driver[]
+ 	.phy_id			= ATH8030_PHY_ID,
+ 	.name			= "Qualcomm Atheros AR8030",
+ 	.phy_id_mask		= AT8030_PHY_ID_MASK,
+-	.probe			= at803x_probe,
++	.probe			= at8035_probe,
+ 	.config_init		= at803x_config_init,
+ 	.link_change_notify	= at803x_link_change_notify,
+ 	.set_wol		= at803x_set_wol,

+ 219 - 0
target/linux/generic/backport-6.6/707-v6.8-13-net-phy-at803x-drop-specific-PHY-ID-check-from-cable.patch

@@ -0,0 +1,219 @@
+From ef9df47b449e32e06501a11272809be49019bdb6 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Fri, 8 Dec 2023 15:52:00 +0100
+Subject: [PATCH 13/13] net: phy: at803x: drop specific PHY ID check from cable
+ test functions
+
+Drop specific PHY ID check for cable test functions for at803x. This is
+done to make functions more generic. While at it better describe what
+the functions does by using more symbolic function names.
+
+PHYs that requires to set additional reg are moved to specific function
+calling the more generic one.
+
+cdt_start and cdt_wait_for_completion are changed to take an additional
+arg to pass specific values specific to the PHY.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 95 +++++++++++++++++++++-------------------
+ 1 file changed, 50 insertions(+), 45 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -1222,31 +1222,16 @@ static int at803x_cdt_fault_length(u16 s
+ 	return (dt * 824) / 10;
+ }
+ 
+-static int at803x_cdt_start(struct phy_device *phydev, int pair)
++static int at803x_cdt_start(struct phy_device *phydev,
++			    u32 cdt_start)
+ {
+-	u16 cdt;
+-
+-	/* qca8081 takes the different bit 15 to enable CDT test */
+-	if (phydev->drv->phy_id == QCA8081_PHY_ID)
+-		cdt = QCA808X_CDT_ENABLE_TEST |
+-			QCA808X_CDT_LENGTH_UNIT |
+-			QCA808X_CDT_INTER_CHECK_DIS;
+-	else
+-		cdt = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
+-			AT803X_CDT_ENABLE_TEST;
+-
+-	return phy_write(phydev, AT803X_CDT, cdt);
++	return phy_write(phydev, AT803X_CDT, cdt_start);
+ }
+ 
+-static int at803x_cdt_wait_for_completion(struct phy_device *phydev)
++static int at803x_cdt_wait_for_completion(struct phy_device *phydev,
++					  u32 cdt_en)
+ {
+ 	int val, ret;
+-	u16 cdt_en;
+-
+-	if (phydev->drv->phy_id == QCA8081_PHY_ID)
+-		cdt_en = QCA808X_CDT_ENABLE_TEST;
+-	else
+-		cdt_en = AT803X_CDT_ENABLE_TEST;
+ 
+ 	/* One test run takes about 25ms */
+ 	ret = phy_read_poll_timeout(phydev, AT803X_CDT, val,
+@@ -1266,11 +1251,13 @@ static int at803x_cable_test_one_pair(st
+ 	};
+ 	int ret, val;
+ 
+-	ret = at803x_cdt_start(phydev, pair);
++	val = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
++	      AT803X_CDT_ENABLE_TEST;
++	ret = at803x_cdt_start(phydev, val);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = at803x_cdt_wait_for_completion(phydev);
++	ret = at803x_cdt_wait_for_completion(phydev, AT803X_CDT_ENABLE_TEST);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1292,19 +1279,11 @@ static int at803x_cable_test_one_pair(st
+ }
+ 
+ static int at803x_cable_test_get_status(struct phy_device *phydev,
+-					bool *finished)
++					bool *finished, unsigned long pair_mask)
+ {
+-	unsigned long pair_mask;
+ 	int retries = 20;
+ 	int pair, ret;
+ 
+-	if (phydev->phy_id == ATH9331_PHY_ID ||
+-	    phydev->phy_id == ATH8032_PHY_ID ||
+-	    phydev->phy_id == QCA9561_PHY_ID)
+-		pair_mask = 0x3;
+-	else
+-		pair_mask = 0xf;
+-
+ 	*finished = false;
+ 
+ 	/* According to the datasheet the CDT can be performed when
+@@ -1331,7 +1310,7 @@ static int at803x_cable_test_get_status(
+ 	return 0;
+ }
+ 
+-static int at803x_cable_test_start(struct phy_device *phydev)
++static void at803x_cable_test_autoneg(struct phy_device *phydev)
+ {
+ 	/* Enable auto-negotiation, but advertise no capabilities, no link
+ 	 * will be established. A restart of the auto-negotiation is not
+@@ -1339,11 +1318,11 @@ static int at803x_cable_test_start(struc
+ 	 */
+ 	phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
+ 	phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
+-	if (phydev->phy_id != ATH9331_PHY_ID &&
+-	    phydev->phy_id != ATH8032_PHY_ID &&
+-	    phydev->phy_id != QCA9561_PHY_ID)
+-		phy_write(phydev, MII_CTRL1000, 0);
++}
+ 
++static int at803x_cable_test_start(struct phy_device *phydev)
++{
++	at803x_cable_test_autoneg(phydev);
+ 	/* we do all the (time consuming) work later */
+ 	return 0;
+ }
+@@ -1618,6 +1597,29 @@ static int at8031_config_intr(struct phy
+ 	return at803x_config_intr(phydev);
+ }
+ 
++/* AR8031 and AR8035 share the same cable test get status reg */
++static int at8031_cable_test_get_status(struct phy_device *phydev,
++					bool *finished)
++{
++	return at803x_cable_test_get_status(phydev, finished, 0xf);
++}
++
++/* AR8031 and AR8035 share the same cable test start logic */
++static int at8031_cable_test_start(struct phy_device *phydev)
++{
++	at803x_cable_test_autoneg(phydev);
++	phy_write(phydev, MII_CTRL1000, 0);
++	/* we do all the (time consuming) work later */
++	return 0;
++}
++
++/* AR8032, AR9331 and QCA9561 share the same cable test get status reg */
++static int at8032_cable_test_get_status(struct phy_device *phydev,
++					bool *finished)
++{
++	return at803x_cable_test_get_status(phydev, finished, 0x3);
++}
++
+ static int at8035_parse_dt(struct phy_device *phydev)
+ {
+ 	struct at803x_priv *priv = phydev->priv;
+@@ -2041,11 +2043,14 @@ static int qca808x_cable_test_get_status
+ 
+ 	*finished = false;
+ 
+-	ret = at803x_cdt_start(phydev, 0);
++	val = QCA808X_CDT_ENABLE_TEST |
++	      QCA808X_CDT_LENGTH_UNIT |
++	      QCA808X_CDT_INTER_CHECK_DIS;
++	ret = at803x_cdt_start(phydev, val);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = at803x_cdt_wait_for_completion(phydev);
++	ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -2143,8 +2148,8 @@ static struct phy_driver at803x_driver[]
+ 	.handle_interrupt	= at803x_handle_interrupt,
+ 	.get_tunable		= at803x_get_tunable,
+ 	.set_tunable		= at803x_set_tunable,
+-	.cable_test_start	= at803x_cable_test_start,
+-	.cable_test_get_status	= at803x_cable_test_get_status,
++	.cable_test_start	= at8031_cable_test_start,
++	.cable_test_get_status	= at8031_cable_test_get_status,
+ }, {
+ 	/* Qualcomm Atheros AR8030 */
+ 	.phy_id			= ATH8030_PHY_ID,
+@@ -2181,8 +2186,8 @@ static struct phy_driver at803x_driver[]
+ 	.handle_interrupt	= at803x_handle_interrupt,
+ 	.get_tunable		= at803x_get_tunable,
+ 	.set_tunable		= at803x_set_tunable,
+-	.cable_test_start	= at803x_cable_test_start,
+-	.cable_test_get_status	= at803x_cable_test_get_status,
++	.cable_test_start	= at8031_cable_test_start,
++	.cable_test_get_status	= at8031_cable_test_get_status,
+ }, {
+ 	/* Qualcomm Atheros AR8032 */
+ 	PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
+@@ -2197,7 +2202,7 @@ static struct phy_driver at803x_driver[]
+ 	.config_intr		= at803x_config_intr,
+ 	.handle_interrupt	= at803x_handle_interrupt,
+ 	.cable_test_start	= at803x_cable_test_start,
+-	.cable_test_get_status	= at803x_cable_test_get_status,
++	.cable_test_get_status	= at8032_cable_test_get_status,
+ }, {
+ 	/* ATHEROS AR9331 */
+ 	PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
+@@ -2210,7 +2215,7 @@ static struct phy_driver at803x_driver[]
+ 	.config_intr		= at803x_config_intr,
+ 	.handle_interrupt	= at803x_handle_interrupt,
+ 	.cable_test_start	= at803x_cable_test_start,
+-	.cable_test_get_status	= at803x_cable_test_get_status,
++	.cable_test_get_status	= at8032_cable_test_get_status,
+ 	.read_status		= at803x_read_status,
+ 	.soft_reset		= genphy_soft_reset,
+ 	.config_aneg		= at803x_config_aneg,
+@@ -2226,7 +2231,7 @@ static struct phy_driver at803x_driver[]
+ 	.config_intr		= at803x_config_intr,
+ 	.handle_interrupt	= at803x_handle_interrupt,
+ 	.cable_test_start	= at803x_cable_test_start,
+-	.cable_test_get_status	= at803x_cable_test_get_status,
++	.cable_test_get_status	= at8032_cable_test_get_status,
+ 	.read_status		= at803x_read_status,
+ 	.soft_reset		= genphy_soft_reset,
+ 	.config_aneg		= at803x_config_aneg,

+ 116 - 0
target/linux/generic/backport-6.6/708-v6.8-01-net-phy-at803x-move-specific-qca808x-config_aneg-to-.patch

@@ -0,0 +1,116 @@
+From 8e732f1c6f2dc5e18f766d0f1b11df9db2dd044a Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Thu, 14 Dec 2023 01:44:31 +0100
+Subject: [PATCH 1/2] net: phy: at803x: move specific qca808x config_aneg to
+ dedicated function
+
+Move specific qca808x config_aneg to dedicated function to permit easier
+split of qca808x portion from at803x driver.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 66 ++++++++++++++++++++++++----------------
+ 1 file changed, 40 insertions(+), 26 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -1045,9 +1045,8 @@ static int at803x_config_mdix(struct phy
+ 			  FIELD_PREP(AT803X_SFC_MDI_CROSSOVER_MODE_M, val));
+ }
+ 
+-static int at803x_config_aneg(struct phy_device *phydev)
++static int at803x_prepare_config_aneg(struct phy_device *phydev)
+ {
+-	struct at803x_priv *priv = phydev->priv;
+ 	int ret;
+ 
+ 	ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
+@@ -1064,33 +1063,22 @@ static int at803x_config_aneg(struct phy
+ 			return ret;
+ 	}
+ 
+-	if (priv->is_1000basex)
+-		return genphy_c37_config_aneg(phydev);
+-
+-	/* Do not restart auto-negotiation by setting ret to 0 defautly,
+-	 * when calling __genphy_config_aneg later.
+-	 */
+-	ret = 0;
+-
+-	if (phydev->drv->phy_id == QCA8081_PHY_ID) {
+-		int phy_ctrl = 0;
++	return 0;
++}
+ 
+-		/* The reg MII_BMCR also needs to be configured for force mode, the
+-		 * genphy_config_aneg is also needed.
+-		 */
+-		if (phydev->autoneg == AUTONEG_DISABLE)
+-			genphy_c45_pma_setup_forced(phydev);
++static int at803x_config_aneg(struct phy_device *phydev)
++{
++	struct at803x_priv *priv = phydev->priv;
++	int ret;
+ 
+-		if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
+-			phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
++	ret = at803x_prepare_config_aneg(phydev);
++	if (ret)
++		return ret;
+ 
+-		ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+-				MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
+-		if (ret < 0)
+-			return ret;
+-	}
++	if (priv->is_1000basex)
++		return genphy_c37_config_aneg(phydev);
+ 
+-	return __genphy_config_aneg(phydev, ret);
++	return genphy_config_aneg(phydev);
+ }
+ 
+ static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
+@@ -2118,6 +2106,32 @@ static int qca808x_get_features(struct p
+ 	return 0;
+ }
+ 
++static int qca808x_config_aneg(struct phy_device *phydev)
++{
++	int phy_ctrl = 0;
++	int ret;
++
++	ret = at803x_prepare_config_aneg(phydev);
++	if (ret)
++		return ret;
++
++	/* The reg MII_BMCR also needs to be configured for force mode, the
++	 * genphy_config_aneg is also needed.
++	 */
++	if (phydev->autoneg == AUTONEG_DISABLE)
++		genphy_c45_pma_setup_forced(phydev);
++
++	if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
++		phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
++
++	ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
++				     MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
++	if (ret < 0)
++		return ret;
++
++	return __genphy_config_aneg(phydev, ret);
++}
++
+ static void qca808x_link_change_notify(struct phy_device *phydev)
+ {
+ 	/* Assert interface sgmii fifo on link down, deassert it on link up,
+@@ -2295,7 +2309,7 @@ static struct phy_driver at803x_driver[]
+ 	.set_wol		= at803x_set_wol,
+ 	.get_wol		= at803x_get_wol,
+ 	.get_features		= qca808x_get_features,
+-	.config_aneg		= at803x_config_aneg,
++	.config_aneg		= qca808x_config_aneg,
+ 	.suspend		= genphy_suspend,
+ 	.resume			= genphy_resume,
+ 	.read_status		= qca808x_read_status,

+ 97 - 0
target/linux/generic/backport-6.6/708-v6.8-02-net-phy-at803x-make-read-specific-status-function-mo.patch

@@ -0,0 +1,97 @@
+From 38eb804e8458ba181a03a0498ce4bf84eebd1931 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Thu, 14 Dec 2023 01:44:32 +0100
+Subject: [PATCH 2/2] net: phy: at803x: make read specific status function more
+ generic
+
+Rework read specific status function to be more generic. The function
+apply different speed mask based on the PHY ID. Make it more generic by
+adding an additional arg to pass the specific speed (ss) mask and use
+the provided mask to parse the speed value.
+
+This is needed to permit an easier deatch of qca808x code from the
+at803x driver.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 26 ++++++++++++++++++--------
+ 1 file changed, 18 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -301,6 +301,11 @@ static struct at803x_hw_stat qca83xx_hw_
+ 	{ "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
+ };
+ 
++struct at803x_ss_mask {
++	u16 speed_mask;
++	u8 speed_shift;
++};
++
+ struct at803x_priv {
+ 	int flags;
+ 	u16 clk_25m_reg;
+@@ -921,7 +926,8 @@ static void at803x_link_change_notify(st
+ 	}
+ }
+ 
+-static int at803x_read_specific_status(struct phy_device *phydev)
++static int at803x_read_specific_status(struct phy_device *phydev,
++				       struct at803x_ss_mask ss_mask)
+ {
+ 	int ss;
+ 
+@@ -940,11 +946,8 @@ static int at803x_read_specific_status(s
+ 		if (sfc < 0)
+ 			return sfc;
+ 
+-		/* qca8081 takes the different bits for speed value from at803x */
+-		if (phydev->drv->phy_id == QCA8081_PHY_ID)
+-			speed = FIELD_GET(QCA808X_SS_SPEED_MASK, ss);
+-		else
+-			speed = FIELD_GET(AT803X_SS_SPEED_MASK, ss);
++		speed = ss & ss_mask.speed_mask;
++		speed >>= ss_mask.speed_shift;
+ 
+ 		switch (speed) {
+ 		case AT803X_SS_SPEED_10:
+@@ -989,6 +992,7 @@ static int at803x_read_specific_status(s
+ static int at803x_read_status(struct phy_device *phydev)
+ {
+ 	struct at803x_priv *priv = phydev->priv;
++	struct at803x_ss_mask ss_mask = { 0 };
+ 	int err, old_link = phydev->link;
+ 
+ 	if (priv->is_1000basex)
+@@ -1012,7 +1016,9 @@ static int at803x_read_status(struct phy
+ 	if (err < 0)
+ 		return err;
+ 
+-	err = at803x_read_specific_status(phydev);
++	ss_mask.speed_mask = AT803X_SS_SPEED_MASK;
++	ss_mask.speed_shift = __bf_shf(AT803X_SS_SPEED_MASK);
++	err = at803x_read_specific_status(phydev, ss_mask);
+ 	if (err < 0)
+ 		return err;
+ 
+@@ -1869,6 +1875,7 @@ static int qca808x_config_init(struct ph
+ 
+ static int qca808x_read_status(struct phy_device *phydev)
+ {
++	struct at803x_ss_mask ss_mask = { 0 };
+ 	int ret;
+ 
+ 	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
+@@ -1882,7 +1889,10 @@ static int qca808x_read_status(struct ph
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = at803x_read_specific_status(phydev);
++	/* qca8081 takes the different bits for speed value from at803x */
++	ss_mask.speed_mask = QCA808X_SS_SPEED_MASK;
++	ss_mask.speed_shift = __bf_shf(QCA808X_SS_SPEED_MASK);
++	ret = at803x_read_specific_status(phydev, ss_mask);
+ 	if (ret < 0)
+ 		return ret;
+ 

+ 27 - 0
target/linux/generic/backport-6.6/709-v6.8-01-net-phy-at803x-remove-extra-space-after-cast.patch

@@ -0,0 +1,27 @@
+From fc9d7264ddc32eaa647d6bfcdc25cdf9f786fde0 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Mon, 18 Dec 2023 00:27:39 +0100
+Subject: [PATCH 1/2] net: phy: at803x: remove extra space after cast
+
+Remove extra space after cast as reported by checkpatch to keep code
+clean.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Link: https://lore.kernel.org/r/[email protected]
+Signed-off-by: Paolo Abeni <[email protected]>
+---
+ drivers/net/phy/at803x.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -462,7 +462,7 @@ static int at803x_set_wol(struct phy_dev
+ 		if (!ndev)
+ 			return -ENODEV;
+ 
+-		mac = (const u8 *) ndev->dev_addr;
++		mac = (const u8 *)ndev->dev_addr;
+ 
+ 		if (!is_valid_ether_addr(mac))
+ 			return -EINVAL;

+ 38 - 0
target/linux/generic/backport-6.6/709-v6.8-02-net-phy-at803x-replace-msleep-1-with-usleep_range.patch

@@ -0,0 +1,38 @@
+From 3ab5720881a924fb6405d9e6a3b09f1026467c47 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Mon, 18 Dec 2023 00:25:08 +0100
+Subject: [PATCH 2/2] net: phy: at803x: replace msleep(1) with usleep_range
+
+Replace msleep(1) with usleep_range as suggested by timers-howto guide.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Link: https://lore.kernel.org/r/[email protected]
+Signed-off-by: Paolo Abeni <[email protected]>
+---
+ drivers/net/phy/at803x.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -916,9 +916,9 @@ static void at803x_link_change_notify(st
+ 		at803x_context_save(phydev, &context);
+ 
+ 		phy_device_reset(phydev, 1);
+-		msleep(1);
++		usleep_range(1000, 2000);
+ 		phy_device_reset(phydev, 0);
+-		msleep(1);
++		usleep_range(1000, 2000);
+ 
+ 		at803x_context_restore(phydev, &context);
+ 
+@@ -1733,7 +1733,7 @@ static int qca83xx_resume(struct phy_dev
+ 	if (ret)
+ 		return ret;
+ 
+-	msleep(1);
++	usleep_range(1000, 2000);
+ 
+ 	return 0;
+ }

+ 152 - 0
target/linux/generic/backport-6.6/710-v6.8-net-phy-at803x-better-align-function-varibles-to-ope.patch

@@ -0,0 +1,152 @@
+From 7961ef1fa10ec35ad6923fb5751877116e4b035b Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Tue, 19 Dec 2023 21:21:24 +0100
+Subject: [PATCH] net: phy: at803x: better align function varibles to open
+ parenthesis
+
+Better align function variables to open parenthesis as suggested by
+checkpatch script for qca808x function to make code cleaner.
+
+For cable_test_get_status function some additional rework was needed to
+handle too long functions.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 67 ++++++++++++++++++++++------------------
+ 1 file changed, 37 insertions(+), 30 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -1781,27 +1781,27 @@ static int qca808x_phy_fast_retrain_conf
+ 		return ret;
+ 
+ 	phy_write_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_TOP_OPTION1,
+-			QCA808X_TOP_OPTION1_DATA);
++		      QCA808X_TOP_OPTION1_DATA);
+ 	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB,
+-			QCA808X_MSE_THRESHOLD_20DB_VALUE);
++		      QCA808X_MSE_THRESHOLD_20DB_VALUE);
+ 	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB,
+-			QCA808X_MSE_THRESHOLD_17DB_VALUE);
++		      QCA808X_MSE_THRESHOLD_17DB_VALUE);
+ 	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB,
+-			QCA808X_MSE_THRESHOLD_27DB_VALUE);
++		      QCA808X_MSE_THRESHOLD_27DB_VALUE);
+ 	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB,
+-			QCA808X_MSE_THRESHOLD_28DB_VALUE);
++		      QCA808X_MSE_THRESHOLD_28DB_VALUE);
+ 	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_1,
+-			QCA808X_MMD3_DEBUG_1_VALUE);
++		      QCA808X_MMD3_DEBUG_1_VALUE);
+ 	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_4,
+-			QCA808X_MMD3_DEBUG_4_VALUE);
++		      QCA808X_MMD3_DEBUG_4_VALUE);
+ 	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_5,
+-			QCA808X_MMD3_DEBUG_5_VALUE);
++		      QCA808X_MMD3_DEBUG_5_VALUE);
+ 	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_3,
+-			QCA808X_MMD3_DEBUG_3_VALUE);
++		      QCA808X_MMD3_DEBUG_3_VALUE);
+ 	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_6,
+-			QCA808X_MMD3_DEBUG_6_VALUE);
++		      QCA808X_MMD3_DEBUG_6_VALUE);
+ 	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_2,
+-			QCA808X_MMD3_DEBUG_2_VALUE);
++		      QCA808X_MMD3_DEBUG_2_VALUE);
+ 
+ 	return 0;
+ }
+@@ -1838,13 +1838,14 @@ static int qca808x_config_init(struct ph
+ 
+ 	/* Active adc&vga on 802.3az for the link 1000M and 100M */
+ 	ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7,
+-			QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
++			     QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
+ 	if (ret)
+ 		return ret;
+ 
+ 	/* Adjust the threshold on 802.3az for the link 1000M */
+ 	ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+-			QCA808X_PHY_MMD3_AZ_TRAINING_CTRL, QCA808X_MMD3_AZ_TRAINING_VAL);
++			    QCA808X_PHY_MMD3_AZ_TRAINING_CTRL,
++			    QCA808X_MMD3_AZ_TRAINING_VAL);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1870,7 +1871,8 @@ static int qca808x_config_init(struct ph
+ 
+ 	/* Configure adc threshold as 100mv for the link 10M */
+ 	return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD,
+-			QCA808X_ADC_THRESHOLD_MASK, QCA808X_ADC_THRESHOLD_100MV);
++				     QCA808X_ADC_THRESHOLD_MASK,
++				     QCA808X_ADC_THRESHOLD_100MV);
+ }
+ 
+ static int qca808x_read_status(struct phy_device *phydev)
+@@ -1883,7 +1885,7 @@ static int qca808x_read_status(struct ph
+ 		return ret;
+ 
+ 	linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->lp_advertising,
+-			ret & MDIO_AN_10GBT_STAT_LP2_5G);
++			 ret & MDIO_AN_10GBT_STAT_LP2_5G);
+ 
+ 	ret = genphy_read_status(phydev);
+ 	if (ret)
+@@ -1913,7 +1915,7 @@ static int qca808x_read_status(struct ph
+ 		 */
+ 		if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
+ 			if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR ||
+-					qca808x_is_prefer_master(phydev)) {
++			    qca808x_is_prefer_master(phydev)) {
+ 				qca808x_phy_ms_seed_enable(phydev, false);
+ 			} else {
+ 				qca808x_phy_ms_seed_enable(phydev, true);
+@@ -2070,18 +2072,22 @@ static int qca808x_cable_test_get_status
+ 	ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_D,
+ 				qca808x_cable_test_result_trans(pair_d));
+ 
+-	if (qca808x_cdt_fault_length_valid(pair_a))
+-		ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A,
+-				qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A));
+-	if (qca808x_cdt_fault_length_valid(pair_b))
+-		ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_B,
+-				qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_B));
+-	if (qca808x_cdt_fault_length_valid(pair_c))
+-		ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_C,
+-				qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_C));
+-	if (qca808x_cdt_fault_length_valid(pair_d))
+-		ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_D,
+-				qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_D));
++	if (qca808x_cdt_fault_length_valid(pair_a)) {
++		val = qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A);
++		ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
++	}
++	if (qca808x_cdt_fault_length_valid(pair_b)) {
++		val = qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_B);
++		ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
++	}
++	if (qca808x_cdt_fault_length_valid(pair_c)) {
++		val = qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_C);
++		ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
++	}
++	if (qca808x_cdt_fault_length_valid(pair_d)) {
++		val = qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_D);
++		ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
++	}
+ 
+ 	*finished = true;
+ 
+@@ -2148,8 +2154,9 @@ static void qca808x_link_change_notify(s
+ 	 * the interface device address is always phy address added by 1.
+ 	 */
+ 	mdiobus_c45_modify_changed(phydev->mdio.bus, phydev->mdio.addr + 1,
+-			MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
+-			QCA8081_PHY_FIFO_RSTN, phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
++				   MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
++				   QCA8081_PHY_FIFO_RSTN,
++				   phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
+ }
+ 
+ static struct phy_driver at803x_driver[] = {

+ 62 - 0
target/linux/generic/backport-6.6/711-v6.8-01-net-phy-at803x-generalize-cdt-fault-length-function.patch

@@ -0,0 +1,62 @@
+From 22eb276098da820d9440fad22901f9b74ed4d659 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Thu, 4 Jan 2024 22:30:38 +0100
+Subject: [PATCH 1/4] net: phy: at803x: generalize cdt fault length function
+
+Generalize cable test fault length function since they all base on the
+same magic values (already reverse engineered to understand the meaning
+of it) to have consistenct values on every PHY.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Simon Horman <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -1192,10 +1192,8 @@ static bool at803x_cdt_fault_length_vali
+ 	return false;
+ }
+ 
+-static int at803x_cdt_fault_length(u16 status)
++static int at803x_cdt_fault_length(int dt)
+ {
+-	int dt;
+-
+ 	/* According to the datasheet the distance to the fault is
+ 	 * DELTA_TIME * 0.824 meters.
+ 	 *
+@@ -1211,8 +1209,6 @@ static int at803x_cdt_fault_length(u16 s
+ 	 * With a VF of 0.69 we get the factor 0.824 mentioned in the
+ 	 * datasheet.
+ 	 */
+-	dt = FIELD_GET(AT803X_CDT_STATUS_DELTA_TIME_MASK, status);
+-
+ 	return (dt * 824) / 10;
+ }
+ 
+@@ -1265,9 +1261,11 @@ static int at803x_cable_test_one_pair(st
+ 	ethnl_cable_test_result(phydev, ethtool_pair[pair],
+ 				at803x_cable_test_result_trans(val));
+ 
+-	if (at803x_cdt_fault_length_valid(val))
++	if (at803x_cdt_fault_length_valid(val)) {
++		val = FIELD_GET(AT803X_CDT_STATUS_DELTA_TIME_MASK, val);
+ 		ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
+ 					      at803x_cdt_fault_length(val));
++	}
+ 
+ 	return 1;
+ }
+@@ -1992,7 +1990,8 @@ static int qca808x_cdt_fault_length(stru
+ 	if (val < 0)
+ 		return val;
+ 
+-	return (FIELD_GET(QCA808X_CDT_DIAG_LENGTH, val) * 824) / 10;
++	val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH, val);
++	return at803x_cdt_fault_length(val);
+ }
+ 
+ static int qca808x_cable_test_start(struct phy_device *phydev)

+ 118 - 0
target/linux/generic/backport-6.6/711-v6.8-02-net-phy-at803x-refactor-qca808x-cable-test-get-statu.patch

@@ -0,0 +1,118 @@
+From e0e9ada1df6133513249861c1d91c1dbefd9383b Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Thu, 4 Jan 2024 22:30:39 +0100
+Subject: [PATCH 2/4] net: phy: at803x: refactor qca808x cable test get status
+ function
+
+Refactor qca808x cable test get status function to remove code
+duplication and clean things up.
+
+The same logic is applied to each pair hence it can be generalized and
+moved to a common function.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Simon Horman <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 80 ++++++++++++++++++++++++----------------
+ 1 file changed, 49 insertions(+), 31 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -2035,10 +2035,43 @@ static int qca808x_cable_test_start(stru
+ 	return 0;
+ }
+ 
++static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
++					      u16 status)
++{
++	u16 pair_code;
++	int length;
++
++	switch (pair) {
++	case ETHTOOL_A_CABLE_PAIR_A:
++		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
++		break;
++	case ETHTOOL_A_CABLE_PAIR_B:
++		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
++		break;
++	case ETHTOOL_A_CABLE_PAIR_C:
++		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
++		break;
++	case ETHTOOL_A_CABLE_PAIR_D:
++		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	ethnl_cable_test_result(phydev, pair,
++				qca808x_cable_test_result_trans(pair_code));
++
++	if (qca808x_cdt_fault_length_valid(pair_code)) {
++		length = qca808x_cdt_fault_length(phydev, pair);
++		ethnl_cable_test_fault_length(phydev, pair, length);
++	}
++
++	return 0;
++}
++
+ static int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
+ {
+ 	int ret, val;
+-	int pair_a, pair_b, pair_c, pair_d;
+ 
+ 	*finished = false;
+ 
+@@ -2057,36 +2090,21 @@ static int qca808x_cable_test_get_status
+ 	if (val < 0)
+ 		return val;
+ 
+-	pair_a = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, val);
+-	pair_b = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, val);
+-	pair_c = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, val);
+-	pair_d = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, val);
+-
+-	ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+-				qca808x_cable_test_result_trans(pair_a));
+-	ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_B,
+-				qca808x_cable_test_result_trans(pair_b));
+-	ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_C,
+-				qca808x_cable_test_result_trans(pair_c));
+-	ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_D,
+-				qca808x_cable_test_result_trans(pair_d));
+-
+-	if (qca808x_cdt_fault_length_valid(pair_a)) {
+-		val = qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A);
+-		ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
+-	}
+-	if (qca808x_cdt_fault_length_valid(pair_b)) {
+-		val = qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_B);
+-		ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
+-	}
+-	if (qca808x_cdt_fault_length_valid(pair_c)) {
+-		val = qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_C);
+-		ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
+-	}
+-	if (qca808x_cdt_fault_length_valid(pair_d)) {
+-		val = qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_D);
+-		ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
+-	}
++	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
++	if (ret)
++		return ret;
++
++	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
++	if (ret)
++		return ret;
++
++	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
++	if (ret)
++		return ret;
++
++	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
++	if (ret)
++		return ret;
+ 
+ 	*finished = true;
+ 

+ 182 - 0
target/linux/generic/backport-6.6/711-v6.8-03-net-phy-at803x-add-support-for-cdt-cross-short-test-.patch

@@ -0,0 +1,182 @@
+From ea73e5ea442ee2aade67b1fb1233ccb3cbea2ceb Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Thu, 4 Jan 2024 22:30:40 +0100
+Subject: [PATCH 3/4] net: phy: at803x: add support for cdt cross short test
+ for qca808x
+
+QCA808x PHY Family supports Cable Diagnostic Test also for Cross Pair
+Short.
+
+Add all the define to make enable and support these additional tests.
+
+Cross Short test was previously disabled by default, this is now changed
+and enabled by default. In this mode, the mask changed a bit and length
+is shifted based on the fault condition.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Simon Horman <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 86 ++++++++++++++++++++++++++++++++--------
+ 1 file changed, 69 insertions(+), 17 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -254,6 +254,7 @@
+ 
+ #define QCA808X_CDT_ENABLE_TEST			BIT(15)
+ #define QCA808X_CDT_INTER_CHECK_DIS		BIT(13)
++#define QCA808X_CDT_STATUS			BIT(11)
+ #define QCA808X_CDT_LENGTH_UNIT			BIT(10)
+ 
+ #define QCA808X_MMD3_CDT_STATUS			0x8064
+@@ -261,16 +262,44 @@
+ #define QCA808X_MMD3_CDT_DIAG_PAIR_B		0x8066
+ #define QCA808X_MMD3_CDT_DIAG_PAIR_C		0x8067
+ #define QCA808X_MMD3_CDT_DIAG_PAIR_D		0x8068
+-#define QCA808X_CDT_DIAG_LENGTH			GENMASK(7, 0)
++#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT	GENMASK(15, 8)
++#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT	GENMASK(7, 0)
+ 
+ #define QCA808X_CDT_CODE_PAIR_A			GENMASK(15, 12)
+ #define QCA808X_CDT_CODE_PAIR_B			GENMASK(11, 8)
+ #define QCA808X_CDT_CODE_PAIR_C			GENMASK(7, 4)
+ #define QCA808X_CDT_CODE_PAIR_D			GENMASK(3, 0)
+-#define QCA808X_CDT_STATUS_STAT_FAIL		0
+-#define QCA808X_CDT_STATUS_STAT_NORMAL		1
+-#define QCA808X_CDT_STATUS_STAT_OPEN		2
+-#define QCA808X_CDT_STATUS_STAT_SHORT		3
++
++#define QCA808X_CDT_STATUS_STAT_TYPE		GENMASK(1, 0)
++#define QCA808X_CDT_STATUS_STAT_FAIL		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
++#define QCA808X_CDT_STATUS_STAT_NORMAL		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
++#define QCA808X_CDT_STATUS_STAT_SAME_OPEN	FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
++#define QCA808X_CDT_STATUS_STAT_SAME_SHORT	FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
++
++#define QCA808X_CDT_STATUS_STAT_MDI		GENMASK(3, 2)
++#define QCA808X_CDT_STATUS_STAT_MDI1		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
++#define QCA808X_CDT_STATUS_STAT_MDI2		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
++#define QCA808X_CDT_STATUS_STAT_MDI3		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
++
++/* NORMAL are MDI with type set to 0 */
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI1
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
++									 QCA808X_CDT_STATUS_STAT_MDI1)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
++									 QCA808X_CDT_STATUS_STAT_MDI1)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI2
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
++									 QCA808X_CDT_STATUS_STAT_MDI2)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
++									 QCA808X_CDT_STATUS_STAT_MDI2)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI3
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
++									 QCA808X_CDT_STATUS_STAT_MDI3)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
++									 QCA808X_CDT_STATUS_STAT_MDI3)
++
++/* Added for reference of existence but should be handled by wait_for_completion already */
++#define QCA808X_CDT_STATUS_STAT_BUSY		(BIT(1) | BIT(3))
+ 
+ /* QCA808X 1G chip type */
+ #define QCA808X_PHY_MMD7_CHIP_TYPE		0x901d
+@@ -1941,8 +1970,17 @@ static int qca808x_soft_reset(struct phy
+ static bool qca808x_cdt_fault_length_valid(int cdt_code)
+ {
+ 	switch (cdt_code) {
+-	case QCA808X_CDT_STATUS_STAT_SHORT:
+-	case QCA808X_CDT_STATUS_STAT_OPEN:
++	case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+ 		return true;
+ 	default:
+ 		return false;
+@@ -1954,17 +1992,28 @@ static int qca808x_cable_test_result_tra
+ 	switch (cdt_code) {
+ 	case QCA808X_CDT_STATUS_STAT_NORMAL:
+ 		return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+-	case QCA808X_CDT_STATUS_STAT_SHORT:
++	case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+ 		return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+-	case QCA808X_CDT_STATUS_STAT_OPEN:
++	case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+ 		return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
++		return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
+ 	case QCA808X_CDT_STATUS_STAT_FAIL:
+ 	default:
+ 		return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ 	}
+ }
+ 
+-static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair)
++static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
++				    int result)
+ {
+ 	int val;
+ 	u32 cdt_length_reg = 0;
+@@ -1990,7 +2039,11 @@ static int qca808x_cdt_fault_length(stru
+ 	if (val < 0)
+ 		return val;
+ 
+-	val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH, val);
++	if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
++		val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
++	else
++		val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
++
+ 	return at803x_cdt_fault_length(val);
+ }
+ 
+@@ -2038,8 +2091,8 @@ static int qca808x_cable_test_start(stru
+ static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
+ 					      u16 status)
+ {
++	int length, result;
+ 	u16 pair_code;
+-	int length;
+ 
+ 	switch (pair) {
+ 	case ETHTOOL_A_CABLE_PAIR_A:
+@@ -2058,11 +2111,11 @@ static int qca808x_cable_test_get_pair_s
+ 		return -EINVAL;
+ 	}
+ 
+-	ethnl_cable_test_result(phydev, pair,
+-				qca808x_cable_test_result_trans(pair_code));
++	result = qca808x_cable_test_result_trans(pair_code);
++	ethnl_cable_test_result(phydev, pair, result);
+ 
+ 	if (qca808x_cdt_fault_length_valid(pair_code)) {
+-		length = qca808x_cdt_fault_length(phydev, pair);
++		length = qca808x_cdt_fault_length(phydev, pair, result);
+ 		ethnl_cable_test_fault_length(phydev, pair, length);
+ 	}
+ 
+@@ -2076,8 +2129,7 @@ static int qca808x_cable_test_get_status
+ 	*finished = false;
+ 
+ 	val = QCA808X_CDT_ENABLE_TEST |
+-	      QCA808X_CDT_LENGTH_UNIT |
+-	      QCA808X_CDT_INTER_CHECK_DIS;
++	      QCA808X_CDT_LENGTH_UNIT;
+ 	ret = at803x_cdt_start(phydev, val);
+ 	if (ret)
+ 		return ret;

+ 62 - 0
target/linux/generic/backport-6.6/711-v6.8-04-net-phy-at803x-make-read_status-more-generic.patch

@@ -0,0 +1,62 @@
+From c34d9452d4e5d98a655d7b625e85466320885416 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Thu, 4 Jan 2024 22:30:41 +0100
+Subject: [PATCH 4/4] net: phy: at803x: make read_status more generic
+
+Make read_status more generic in preparation on moving it to shared
+library as other PHY Family Driver will have the exact same
+implementation.
+
+The only specific part was a check for AR8031/33 if 1000basex was used.
+The check is moved to a dedicated function specific for those PHYs.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Simon Horman <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/at803x.c | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -1020,13 +1020,9 @@ static int at803x_read_specific_status(s
+ 
+ static int at803x_read_status(struct phy_device *phydev)
+ {
+-	struct at803x_priv *priv = phydev->priv;
+ 	struct at803x_ss_mask ss_mask = { 0 };
+ 	int err, old_link = phydev->link;
+ 
+-	if (priv->is_1000basex)
+-		return genphy_c37_read_status(phydev);
+-
+ 	/* Update the link, but return if there was an error */
+ 	err = genphy_update_link(phydev);
+ 	if (err)
+@@ -1618,6 +1614,17 @@ static int at8031_config_intr(struct phy
+ 	return at803x_config_intr(phydev);
+ }
+ 
++/* AR8031 and AR8033 share the same read status logic */
++static int at8031_read_status(struct phy_device *phydev)
++{
++	struct at803x_priv *priv = phydev->priv;
++
++	if (priv->is_1000basex)
++		return genphy_c37_read_status(phydev);
++
++	return at803x_read_status(phydev);
++}
++
+ /* AR8031 and AR8035 share the same cable test get status reg */
+ static int at8031_cable_test_get_status(struct phy_device *phydev,
+ 					bool *finished)
+@@ -2281,7 +2288,7 @@ static struct phy_driver at803x_driver[]
+ 	.read_page		= at803x_read_page,
+ 	.write_page		= at803x_write_page,
+ 	.get_features		= at803x_get_features,
+-	.read_status		= at803x_read_status,
++	.read_status		= at8031_read_status,
+ 	.config_intr		= at8031_config_intr,
+ 	.handle_interrupt	= at803x_handle_interrupt,
+ 	.get_tunable		= at803x_get_tunable,

+ 408 - 0
target/linux/generic/backport-6.6/712-v6.9-net-phy-at803x-add-LED-support-for-qca808x.patch

@@ -0,0 +1,408 @@
+From 7196062b64ee470b91015f3d2e82d225948258ea Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Thu, 25 Jan 2024 21:37:01 +0100
+Subject: [PATCH 5/5] net: phy: at803x: add LED support for qca808x
+
+Add LED support for QCA8081 PHY.
+
+Documentation for this LEDs PHY is very scarce even with NDA access
+to Documentation for OEMs. Only the blink pattern are documented and are
+very confusing most of the time. No documentation is present about
+forcing the LED on/off or to always blink.
+
+Those settings were reversed by poking the regs and trying to find the
+correct bits to trigger these modes. Some bits mode are not clear and
+maybe the documentation option are not 100% correct. For the sake of LED
+support the reversed option are enough to add support for current LED
+APIs.
+
+Supported HW control modes are:
+- tx
+- rx
+- link_10
+- link_100
+- link_1000
+- link_2500
+- half_duplex
+- full_duplex
+
+Also add support for LED polarity set to set LED polarity to active
+high or low. QSDK sets this value to high by default but PHY reset value
+doesn't have this enabled by default.
+
+QSDK also sets 2 additional bits but their usage is not clear, info about
+this is added in the header. It was verified that for correct function
+of the LED if active high is needed, only BIT 6 is needed.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Link: https://lore.kernel.org/r/[email protected]
+Signed-off-by: Jakub Kicinski <[email protected]>
+---
+ drivers/net/phy/at803x.c | 327 +++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 327 insertions(+)
+
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -301,6 +301,87 @@
+ /* Added for reference of existence but should be handled by wait_for_completion already */
+ #define QCA808X_CDT_STATUS_STAT_BUSY		(BIT(1) | BIT(3))
+ 
++#define QCA808X_MMD7_LED_GLOBAL			0x8073
++#define QCA808X_LED_BLINK_1			GENMASK(11, 6)
++#define QCA808X_LED_BLINK_2			GENMASK(5, 0)
++/* Values are the same for both BLINK_1 and BLINK_2 */
++#define QCA808X_LED_BLINK_FREQ_MASK		GENMASK(5, 3)
++#define QCA808X_LED_BLINK_FREQ_2HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x0)
++#define QCA808X_LED_BLINK_FREQ_4HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x1)
++#define QCA808X_LED_BLINK_FREQ_8HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x2)
++#define QCA808X_LED_BLINK_FREQ_16HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x3)
++#define QCA808X_LED_BLINK_FREQ_32HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x4)
++#define QCA808X_LED_BLINK_FREQ_64HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x5)
++#define QCA808X_LED_BLINK_FREQ_128HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x6)
++#define QCA808X_LED_BLINK_FREQ_256HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x7)
++#define QCA808X_LED_BLINK_DUTY_MASK		GENMASK(2, 0)
++#define QCA808X_LED_BLINK_DUTY_50_50		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x0)
++#define QCA808X_LED_BLINK_DUTY_75_25		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x1)
++#define QCA808X_LED_BLINK_DUTY_25_75		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x2)
++#define QCA808X_LED_BLINK_DUTY_33_67		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x3)
++#define QCA808X_LED_BLINK_DUTY_67_33		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x4)
++#define QCA808X_LED_BLINK_DUTY_17_83		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x5)
++#define QCA808X_LED_BLINK_DUTY_83_17		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x6)
++#define QCA808X_LED_BLINK_DUTY_8_92		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x7)
++
++#define QCA808X_MMD7_LED2_CTRL			0x8074
++#define QCA808X_MMD7_LED2_FORCE_CTRL		0x8075
++#define QCA808X_MMD7_LED1_CTRL			0x8076
++#define QCA808X_MMD7_LED1_FORCE_CTRL		0x8077
++#define QCA808X_MMD7_LED0_CTRL			0x8078
++#define QCA808X_MMD7_LED_CTRL(x)		(0x8078 - ((x) * 2))
++
++/* LED hw control pattern is the same for every LED */
++#define QCA808X_LED_PATTERN_MASK		GENMASK(15, 0)
++#define QCA808X_LED_SPEED2500_ON		BIT(15)
++#define QCA808X_LED_SPEED2500_BLINK		BIT(14)
++/* Follow blink trigger even if duplex or speed condition doesn't match */
++#define QCA808X_LED_BLINK_CHECK_BYPASS		BIT(13)
++#define QCA808X_LED_FULL_DUPLEX_ON		BIT(12)
++#define QCA808X_LED_HALF_DUPLEX_ON		BIT(11)
++#define QCA808X_LED_TX_BLINK			BIT(10)
++#define QCA808X_LED_RX_BLINK			BIT(9)
++#define QCA808X_LED_TX_ON_10MS			BIT(8)
++#define QCA808X_LED_RX_ON_10MS			BIT(7)
++#define QCA808X_LED_SPEED1000_ON		BIT(6)
++#define QCA808X_LED_SPEED100_ON			BIT(5)
++#define QCA808X_LED_SPEED10_ON			BIT(4)
++#define QCA808X_LED_COLLISION_BLINK		BIT(3)
++#define QCA808X_LED_SPEED1000_BLINK		BIT(2)
++#define QCA808X_LED_SPEED100_BLINK		BIT(1)
++#define QCA808X_LED_SPEED10_BLINK		BIT(0)
++
++#define QCA808X_MMD7_LED0_FORCE_CTRL		0x8079
++#define QCA808X_MMD7_LED_FORCE_CTRL(x)		(0x8079 - ((x) * 2))
++
++/* LED force ctrl is the same for every LED
++ * No documentation exist for this, not even internal one
++ * with NDA as QCOM gives only info about configuring
++ * hw control pattern rules and doesn't indicate any way
++ * to force the LED to specific mode.
++ * These define comes from reverse and testing and maybe
++ * lack of some info or some info are not entirely correct.
++ * For the basic LED control and hw control these finding
++ * are enough to support LED control in all the required APIs.
++ *
++ * On doing some comparison with implementation with qca807x,
++ * it was found that it's 1:1 equal to it and confirms all the
++ * reverse done. It was also found further specification with the
++ * force mode and the blink modes.
++ */
++#define QCA808X_LED_FORCE_EN			BIT(15)
++#define QCA808X_LED_FORCE_MODE_MASK		GENMASK(14, 13)
++#define QCA808X_LED_FORCE_BLINK_1		FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x3)
++#define QCA808X_LED_FORCE_BLINK_2		FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x2)
++#define QCA808X_LED_FORCE_ON			FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x1)
++#define QCA808X_LED_FORCE_OFF			FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x0)
++
++#define QCA808X_MMD7_LED_POLARITY_CTRL		0x901a
++/* QSDK sets by default 0x46 to this reg that sets BIT 6 for
++ * LED to active high. It's not clear what BIT 3 and BIT 4 does.
++ */
++#define QCA808X_LED_ACTIVE_HIGH			BIT(6)
++
+ /* QCA808X 1G chip type */
+ #define QCA808X_PHY_MMD7_CHIP_TYPE		0x901d
+ #define QCA808X_PHY_CHIP_TYPE_1G		BIT(0)
+@@ -346,6 +427,7 @@ struct at803x_priv {
+ 	struct regulator_dev *vddio_rdev;
+ 	struct regulator_dev *vddh_rdev;
+ 	u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
++	int led_polarity_mode;
+ };
+ 
+ struct at803x_context {
+@@ -706,6 +788,9 @@ static int at803x_probe(struct phy_devic
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
++	/* Init LED polarity mode to -1 */
++	priv->led_polarity_mode = -1;
++
+ 	phydev->priv = priv;
+ 
+ 	ret = at803x_parse_dt(phydev);
+@@ -2235,6 +2320,242 @@ static void qca808x_link_change_notify(s
+ 				   phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
+ }
+ 
++static int qca808x_led_parse_netdev(struct phy_device *phydev, unsigned long rules,
++				    u16 *offload_trigger)
++{
++	/* Parsing specific to netdev trigger */
++	if (test_bit(TRIGGER_NETDEV_TX, &rules))
++		*offload_trigger |= QCA808X_LED_TX_BLINK;
++	if (test_bit(TRIGGER_NETDEV_RX, &rules))
++		*offload_trigger |= QCA808X_LED_RX_BLINK;
++	if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
++		*offload_trigger |= QCA808X_LED_SPEED10_ON;
++	if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
++		*offload_trigger |= QCA808X_LED_SPEED100_ON;
++	if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
++		*offload_trigger |= QCA808X_LED_SPEED1000_ON;
++	if (test_bit(TRIGGER_NETDEV_LINK_2500, &rules))
++		*offload_trigger |= QCA808X_LED_SPEED2500_ON;
++	if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
++		*offload_trigger |= QCA808X_LED_HALF_DUPLEX_ON;
++	if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
++		*offload_trigger |= QCA808X_LED_FULL_DUPLEX_ON;
++
++	if (rules && !*offload_trigger)
++		return -EOPNOTSUPP;
++
++	/* Enable BLINK_CHECK_BYPASS by default to make the LED
++	 * blink even with duplex or speed mode not enabled.
++	 */
++	*offload_trigger |= QCA808X_LED_BLINK_CHECK_BYPASS;
++
++	return 0;
++}
++
++static int qca808x_led_hw_control_enable(struct phy_device *phydev, u8 index)
++{
++	u16 reg;
++
++	if (index > 2)
++		return -EINVAL;
++
++	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
++
++	return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
++				  QCA808X_LED_FORCE_EN);
++}
++
++static int qca808x_led_hw_is_supported(struct phy_device *phydev, u8 index,
++				       unsigned long rules)
++{
++	u16 offload_trigger = 0;
++
++	if (index > 2)
++		return -EINVAL;
++
++	return qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
++}
++
++static int qca808x_led_hw_control_set(struct phy_device *phydev, u8 index,
++				      unsigned long rules)
++{
++	u16 reg, offload_trigger = 0;
++	int ret;
++
++	if (index > 2)
++		return -EINVAL;
++
++	reg = QCA808X_MMD7_LED_CTRL(index);
++
++	ret = qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
++	if (ret)
++		return ret;
++
++	ret = qca808x_led_hw_control_enable(phydev, index);
++	if (ret)
++		return ret;
++
++	return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
++			      QCA808X_LED_PATTERN_MASK,
++			      offload_trigger);
++}
++
++static bool qca808x_led_hw_control_status(struct phy_device *phydev, u8 index)
++{
++	u16 reg;
++	int val;
++
++	if (index > 2)
++		return false;
++
++	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
++
++	val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
++
++	return !(val & QCA808X_LED_FORCE_EN);
++}
++
++static int qca808x_led_hw_control_get(struct phy_device *phydev, u8 index,
++				      unsigned long *rules)
++{
++	u16 reg;
++	int val;
++
++	if (index > 2)
++		return -EINVAL;
++
++	/* Check if we have hw control enabled */
++	if (qca808x_led_hw_control_status(phydev, index))
++		return -EINVAL;
++
++	reg = QCA808X_MMD7_LED_CTRL(index);
++
++	val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
++	if (val & QCA808X_LED_TX_BLINK)
++		set_bit(TRIGGER_NETDEV_TX, rules);
++	if (val & QCA808X_LED_RX_BLINK)
++		set_bit(TRIGGER_NETDEV_RX, rules);
++	if (val & QCA808X_LED_SPEED10_ON)
++		set_bit(TRIGGER_NETDEV_LINK_10, rules);
++	if (val & QCA808X_LED_SPEED100_ON)
++		set_bit(TRIGGER_NETDEV_LINK_100, rules);
++	if (val & QCA808X_LED_SPEED1000_ON)
++		set_bit(TRIGGER_NETDEV_LINK_1000, rules);
++	if (val & QCA808X_LED_SPEED2500_ON)
++		set_bit(TRIGGER_NETDEV_LINK_2500, rules);
++	if (val & QCA808X_LED_HALF_DUPLEX_ON)
++		set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
++	if (val & QCA808X_LED_FULL_DUPLEX_ON)
++		set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
++
++	return 0;
++}
++
++static int qca808x_led_hw_control_reset(struct phy_device *phydev, u8 index)
++{
++	u16 reg;
++
++	if (index > 2)
++		return -EINVAL;
++
++	reg = QCA808X_MMD7_LED_CTRL(index);
++
++	return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
++				  QCA808X_LED_PATTERN_MASK);
++}
++
++static int qca808x_led_brightness_set(struct phy_device *phydev,
++				      u8 index, enum led_brightness value)
++{
++	u16 reg;
++	int ret;
++
++	if (index > 2)
++		return -EINVAL;
++
++	if (!value) {
++		ret = qca808x_led_hw_control_reset(phydev, index);
++		if (ret)
++			return ret;
++	}
++
++	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
++
++	return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
++			      QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
++			      QCA808X_LED_FORCE_EN | value ? QCA808X_LED_FORCE_ON :
++							     QCA808X_LED_FORCE_OFF);
++}
++
++static int qca808x_led_blink_set(struct phy_device *phydev, u8 index,
++				 unsigned long *delay_on,
++				 unsigned long *delay_off)
++{
++	int ret;
++	u16 reg;
++
++	if (index > 2)
++		return -EINVAL;
++
++	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
++
++	/* Set blink to 50% off, 50% on at 4Hz by default */
++	ret = phy_modify_mmd(phydev, MDIO_MMD_AN, QCA808X_MMD7_LED_GLOBAL,
++			     QCA808X_LED_BLINK_FREQ_MASK | QCA808X_LED_BLINK_DUTY_MASK,
++			     QCA808X_LED_BLINK_FREQ_4HZ | QCA808X_LED_BLINK_DUTY_50_50);
++	if (ret)
++		return ret;
++
++	/* We use BLINK_1 for normal blinking */
++	ret = phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
++			     QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
++			     QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_BLINK_1);
++	if (ret)
++		return ret;
++
++	/* We set blink to 4Hz, aka 250ms */
++	*delay_on = 250 / 2;
++	*delay_off = 250 / 2;
++
++	return 0;
++}
++
++static int qca808x_led_polarity_set(struct phy_device *phydev, int index,
++				    unsigned long modes)
++{
++	struct at803x_priv *priv = phydev->priv;
++	bool active_low = false;
++	u32 mode;
++
++	for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
++		switch (mode) {
++		case PHY_LED_ACTIVE_LOW:
++			active_low = true;
++			break;
++		default:
++			return -EINVAL;
++		}
++	}
++
++	/* PHY polarity is global and can't be set per LED.
++	 * To detect this, check if last requested polarity mode
++	 * match the new one.
++	 */
++	if (priv->led_polarity_mode >= 0 &&
++	    priv->led_polarity_mode != active_low) {
++		phydev_err(phydev, "PHY polarity is global. Mismatched polarity on different LED\n");
++		return -EINVAL;
++	}
++
++	/* Save the last PHY polarity mode */
++	priv->led_polarity_mode = active_low;
++
++	return phy_modify_mmd(phydev, MDIO_MMD_AN,
++			      QCA808X_MMD7_LED_POLARITY_CTRL,
++			      QCA808X_LED_ACTIVE_HIGH,
++			      active_low ? 0 : QCA808X_LED_ACTIVE_HIGH);
++}
++
+ static struct phy_driver at803x_driver[] = {
+ {
+ 	/* Qualcomm Atheros AR8035 */
+@@ -2411,6 +2732,12 @@ static struct phy_driver at803x_driver[]
+ 	.cable_test_start	= qca808x_cable_test_start,
+ 	.cable_test_get_status	= qca808x_cable_test_get_status,
+ 	.link_change_notify	= qca808x_link_change_notify,
++	.led_brightness_set	= qca808x_led_brightness_set,
++	.led_blink_set		= qca808x_led_blink_set,
++	.led_hw_is_supported	= qca808x_led_hw_is_supported,
++	.led_hw_control_set	= qca808x_led_hw_control_set,
++	.led_hw_control_get	= qca808x_led_hw_control_get,
++	.led_polarity_set	= qca808x_led_polarity_set,
+ }, };
+ 
+ module_phy_driver(at803x_driver);

+ 5598 - 0
target/linux/generic/backport-6.6/713-v6.9-01-net-phy-move-at803x-PHY-driver-to-dedicated-director.patch

@@ -0,0 +1,5598 @@
+From 9e56ff53b4115875667760445b028357848b4748 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Mon, 29 Jan 2024 15:15:19 +0100
+Subject: [PATCH 1/5] net: phy: move at803x PHY driver to dedicated directory
+
+In preparation for addition of other Qcom PHY and to tidy things up,
+move the at803x PHY driver to dedicated directory.
+
+The same order in the Kconfig selection is saved.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Link: https://lore.kernel.org/r/[email protected]
+Signed-off-by: Jakub Kicinski <[email protected]>
+---
+ drivers/net/phy/Kconfig             | 7 +------
+ drivers/net/phy/Makefile            | 2 +-
+ drivers/net/phy/qcom/Kconfig        | 7 +++++++
+ drivers/net/phy/qcom/Makefile       | 2 ++
+ drivers/net/phy/{ => qcom}/at803x.c | 0
+ 5 files changed, 11 insertions(+), 7 deletions(-)
+ create mode 100644 drivers/net/phy/qcom/Kconfig
+ create mode 100644 drivers/net/phy/qcom/Makefile
+ rename drivers/net/phy/{ => qcom}/at803x.c (100%)
+
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -318,12 +318,7 @@ config NCN26000_PHY
+ 	  Currently supports the NCN26000 10BASE-T1S Industrial PHY
+ 	  with MII interface.
+ 
+-config AT803X_PHY
+-	tristate "Qualcomm Atheros AR803X PHYs and QCA833x PHYs"
+-	depends on REGULATOR
+-	help
+-	  Currently supports the AR8030, AR8031, AR8033, AR8035 and internal
+-	  QCA8337(Internal qca8k PHY) model
++source "drivers/net/phy/qcom/Kconfig"
+ 
+ config QSEMI_PHY
+ 	tristate "Quality Semiconductor PHYs"
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -36,7 +36,6 @@ obj-$(CONFIG_ADIN_PHY)		+= adin.o
+ obj-$(CONFIG_ADIN1100_PHY)	+= adin1100.o
+ obj-$(CONFIG_AMD_PHY)		+= amd.o
+ obj-$(CONFIG_AQUANTIA_PHY)	+= aquantia/
+-obj-$(CONFIG_AT803X_PHY)	+= at803x.o
+ obj-$(CONFIG_AX88796B_PHY)	+= ax88796b.o
+ obj-$(CONFIG_BCM54140_PHY)	+= bcm54140.o
+ obj-$(CONFIG_BCM63XX_PHY)	+= bcm63xx.o
+@@ -82,6 +81,7 @@ obj-$(CONFIG_NCN26000_PHY)	+= ncn26000.o
+ obj-$(CONFIG_NXP_C45_TJA11XX_PHY)	+= nxp-c45-tja11xx.o
+ obj-$(CONFIG_NXP_CBTX_PHY)	+= nxp-cbtx.o
+ obj-$(CONFIG_NXP_TJA11XX_PHY)	+= nxp-tja11xx.o
++obj-y				+= qcom/
+ obj-$(CONFIG_QSEMI_PHY)		+= qsemi.o
+ obj-$(CONFIG_REALTEK_PHY)	+= realtek.o
+ obj-$(CONFIG_RENESAS_PHY)	+= uPD60620.o
+--- /dev/null
++++ b/drivers/net/phy/qcom/Kconfig
+@@ -0,0 +1,7 @@
++# SPDX-License-Identifier: GPL-2.0-only
++config AT803X_PHY
++	tristate "Qualcomm Atheros AR803X PHYs and QCA833x PHYs"
++	depends on REGULATOR
++	help
++	  Currently supports the AR8030, AR8031, AR8033, AR8035 and internal
++	  QCA8337(Internal qca8k PHY) model
+--- /dev/null
++++ b/drivers/net/phy/qcom/Makefile
+@@ -0,0 +1,2 @@
++# SPDX-License-Identifier: GPL-2.0
++obj-$(CONFIG_AT803X_PHY)	+= at803x.o
+--- a/drivers/net/phy/at803x.c
++++ /dev/null
+@@ -1,2759 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0+
+-/*
+- * drivers/net/phy/at803x.c
+- *
+- * Driver for Qualcomm Atheros AR803x PHY
+- *
+- * Author: Matus Ujhelyi <[email protected]>
+- */
+-
+-#include <linux/phy.h>
+-#include <linux/module.h>
+-#include <linux/string.h>
+-#include <linux/netdevice.h>
+-#include <linux/etherdevice.h>
+-#include <linux/ethtool_netlink.h>
+-#include <linux/bitfield.h>
+-#include <linux/regulator/of_regulator.h>
+-#include <linux/regulator/driver.h>
+-#include <linux/regulator/consumer.h>
+-#include <linux/of.h>
+-#include <linux/phylink.h>
+-#include <linux/sfp.h>
+-#include <dt-bindings/net/qca-ar803x.h>
+-
+-#define AT803X_SPECIFIC_FUNCTION_CONTROL	0x10
+-#define AT803X_SFC_ASSERT_CRS			BIT(11)
+-#define AT803X_SFC_FORCE_LINK			BIT(10)
+-#define AT803X_SFC_MDI_CROSSOVER_MODE_M		GENMASK(6, 5)
+-#define AT803X_SFC_AUTOMATIC_CROSSOVER		0x3
+-#define AT803X_SFC_MANUAL_MDIX			0x1
+-#define AT803X_SFC_MANUAL_MDI			0x0
+-#define AT803X_SFC_SQE_TEST			BIT(2)
+-#define AT803X_SFC_POLARITY_REVERSAL		BIT(1)
+-#define AT803X_SFC_DISABLE_JABBER		BIT(0)
+-
+-#define AT803X_SPECIFIC_STATUS			0x11
+-#define AT803X_SS_SPEED_MASK			GENMASK(15, 14)
+-#define AT803X_SS_SPEED_1000			2
+-#define AT803X_SS_SPEED_100			1
+-#define AT803X_SS_SPEED_10			0
+-#define AT803X_SS_DUPLEX			BIT(13)
+-#define AT803X_SS_SPEED_DUPLEX_RESOLVED		BIT(11)
+-#define AT803X_SS_MDIX				BIT(6)
+-
+-#define QCA808X_SS_SPEED_MASK			GENMASK(9, 7)
+-#define QCA808X_SS_SPEED_2500			4
+-
+-#define AT803X_INTR_ENABLE			0x12
+-#define AT803X_INTR_ENABLE_AUTONEG_ERR		BIT(15)
+-#define AT803X_INTR_ENABLE_SPEED_CHANGED	BIT(14)
+-#define AT803X_INTR_ENABLE_DUPLEX_CHANGED	BIT(13)
+-#define AT803X_INTR_ENABLE_PAGE_RECEIVED	BIT(12)
+-#define AT803X_INTR_ENABLE_LINK_FAIL		BIT(11)
+-#define AT803X_INTR_ENABLE_LINK_SUCCESS		BIT(10)
+-#define AT803X_INTR_ENABLE_LINK_FAIL_BX		BIT(8)
+-#define AT803X_INTR_ENABLE_LINK_SUCCESS_BX	BIT(7)
+-#define AT803X_INTR_ENABLE_WIRESPEED_DOWNGRADE	BIT(5)
+-#define AT803X_INTR_ENABLE_POLARITY_CHANGED	BIT(1)
+-#define AT803X_INTR_ENABLE_WOL			BIT(0)
+-
+-#define AT803X_INTR_STATUS			0x13
+-
+-#define AT803X_SMART_SPEED			0x14
+-#define AT803X_SMART_SPEED_ENABLE		BIT(5)
+-#define AT803X_SMART_SPEED_RETRY_LIMIT_MASK	GENMASK(4, 2)
+-#define AT803X_SMART_SPEED_BYPASS_TIMER		BIT(1)
+-#define AT803X_CDT				0x16
+-#define AT803X_CDT_MDI_PAIR_MASK		GENMASK(9, 8)
+-#define AT803X_CDT_ENABLE_TEST			BIT(0)
+-#define AT803X_CDT_STATUS			0x1c
+-#define AT803X_CDT_STATUS_STAT_NORMAL		0
+-#define AT803X_CDT_STATUS_STAT_SHORT		1
+-#define AT803X_CDT_STATUS_STAT_OPEN		2
+-#define AT803X_CDT_STATUS_STAT_FAIL		3
+-#define AT803X_CDT_STATUS_STAT_MASK		GENMASK(9, 8)
+-#define AT803X_CDT_STATUS_DELTA_TIME_MASK	GENMASK(7, 0)
+-#define AT803X_LED_CONTROL			0x18
+-
+-#define AT803X_PHY_MMD3_WOL_CTRL		0x8012
+-#define AT803X_WOL_EN				BIT(5)
+-#define AT803X_LOC_MAC_ADDR_0_15_OFFSET		0x804C
+-#define AT803X_LOC_MAC_ADDR_16_31_OFFSET	0x804B
+-#define AT803X_LOC_MAC_ADDR_32_47_OFFSET	0x804A
+-#define AT803X_REG_CHIP_CONFIG			0x1f
+-#define AT803X_BT_BX_REG_SEL			0x8000
+-
+-#define AT803X_DEBUG_ADDR			0x1D
+-#define AT803X_DEBUG_DATA			0x1E
+-
+-#define AT803X_MODE_CFG_MASK			0x0F
+-#define AT803X_MODE_CFG_BASET_RGMII		0x00
+-#define AT803X_MODE_CFG_BASET_SGMII		0x01
+-#define AT803X_MODE_CFG_BX1000_RGMII_50OHM	0x02
+-#define AT803X_MODE_CFG_BX1000_RGMII_75OHM	0x03
+-#define AT803X_MODE_CFG_BX1000_CONV_50OHM	0x04
+-#define AT803X_MODE_CFG_BX1000_CONV_75OHM	0x05
+-#define AT803X_MODE_CFG_FX100_RGMII_50OHM	0x06
+-#define AT803X_MODE_CFG_FX100_CONV_50OHM	0x07
+-#define AT803X_MODE_CFG_RGMII_AUTO_MDET		0x0B
+-#define AT803X_MODE_CFG_FX100_RGMII_75OHM	0x0E
+-#define AT803X_MODE_CFG_FX100_CONV_75OHM	0x0F
+-
+-#define AT803X_PSSR				0x11	/*PHY-Specific Status Register*/
+-#define AT803X_PSSR_MR_AN_COMPLETE		0x0200
+-
+-#define AT803X_DEBUG_ANALOG_TEST_CTRL		0x00
+-#define QCA8327_DEBUG_MANU_CTRL_EN		BIT(2)
+-#define QCA8337_DEBUG_MANU_CTRL_EN		GENMASK(3, 2)
+-#define AT803X_DEBUG_RX_CLK_DLY_EN		BIT(15)
+-
+-#define AT803X_DEBUG_SYSTEM_CTRL_MODE		0x05
+-#define AT803X_DEBUG_TX_CLK_DLY_EN		BIT(8)
+-
+-#define AT803X_DEBUG_REG_HIB_CTRL		0x0b
+-#define   AT803X_DEBUG_HIB_CTRL_SEL_RST_80U	BIT(10)
+-#define   AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE	BIT(13)
+-#define   AT803X_DEBUG_HIB_CTRL_PS_HIB_EN	BIT(15)
+-
+-#define AT803X_DEBUG_REG_3C			0x3C
+-
+-#define AT803X_DEBUG_REG_GREEN			0x3D
+-#define   AT803X_DEBUG_GATE_CLK_IN1000		BIT(6)
+-
+-#define AT803X_DEBUG_REG_1F			0x1F
+-#define AT803X_DEBUG_PLL_ON			BIT(2)
+-#define AT803X_DEBUG_RGMII_1V8			BIT(3)
+-
+-#define MDIO_AZ_DEBUG				0x800D
+-
+-/* AT803x supports either the XTAL input pad, an internal PLL or the
+- * DSP as clock reference for the clock output pad. The XTAL reference
+- * is only used for 25 MHz output, all other frequencies need the PLL.
+- * The DSP as a clock reference is used in synchronous ethernet
+- * applications.
+- *
+- * By default the PLL is only enabled if there is a link. Otherwise
+- * the PHY will go into low power state and disabled the PLL. You can
+- * set the PLL_ON bit (see debug register 0x1f) to keep the PLL always
+- * enabled.
+- */
+-#define AT803X_MMD7_CLK25M			0x8016
+-#define AT803X_CLK_OUT_MASK			GENMASK(4, 2)
+-#define AT803X_CLK_OUT_25MHZ_XTAL		0
+-#define AT803X_CLK_OUT_25MHZ_DSP		1
+-#define AT803X_CLK_OUT_50MHZ_PLL		2
+-#define AT803X_CLK_OUT_50MHZ_DSP		3
+-#define AT803X_CLK_OUT_62_5MHZ_PLL		4
+-#define AT803X_CLK_OUT_62_5MHZ_DSP		5
+-#define AT803X_CLK_OUT_125MHZ_PLL		6
+-#define AT803X_CLK_OUT_125MHZ_DSP		7
+-
+-/* The AR8035 has another mask which is compatible with the AR8031/AR8033 mask
+- * but doesn't support choosing between XTAL/PLL and DSP.
+- */
+-#define AT8035_CLK_OUT_MASK			GENMASK(4, 3)
+-
+-#define AT803X_CLK_OUT_STRENGTH_MASK		GENMASK(8, 7)
+-#define AT803X_CLK_OUT_STRENGTH_FULL		0
+-#define AT803X_CLK_OUT_STRENGTH_HALF		1
+-#define AT803X_CLK_OUT_STRENGTH_QUARTER		2
+-
+-#define AT803X_DEFAULT_DOWNSHIFT		5
+-#define AT803X_MIN_DOWNSHIFT			2
+-#define AT803X_MAX_DOWNSHIFT			9
+-
+-#define AT803X_MMD3_SMARTEEE_CTL1		0x805b
+-#define AT803X_MMD3_SMARTEEE_CTL2		0x805c
+-#define AT803X_MMD3_SMARTEEE_CTL3		0x805d
+-#define AT803X_MMD3_SMARTEEE_CTL3_LPI_EN	BIT(8)
+-
+-#define ATH9331_PHY_ID				0x004dd041
+-#define ATH8030_PHY_ID				0x004dd076
+-#define ATH8031_PHY_ID				0x004dd074
+-#define ATH8032_PHY_ID				0x004dd023
+-#define ATH8035_PHY_ID				0x004dd072
+-#define AT8030_PHY_ID_MASK			0xffffffef
+-
+-#define QCA8081_PHY_ID				0x004dd101
+-
+-#define QCA8327_A_PHY_ID			0x004dd033
+-#define QCA8327_B_PHY_ID			0x004dd034
+-#define QCA8337_PHY_ID				0x004dd036
+-#define QCA9561_PHY_ID				0x004dd042
+-#define QCA8K_PHY_ID_MASK			0xffffffff
+-
+-#define QCA8K_DEVFLAGS_REVISION_MASK		GENMASK(2, 0)
+-
+-#define AT803X_PAGE_FIBER			0
+-#define AT803X_PAGE_COPPER			1
+-
+-/* don't turn off internal PLL */
+-#define AT803X_KEEP_PLL_ENABLED			BIT(0)
+-#define AT803X_DISABLE_SMARTEEE			BIT(1)
+-
+-/* disable hibernation mode */
+-#define AT803X_DISABLE_HIBERNATION_MODE		BIT(2)
+-
+-/* ADC threshold */
+-#define QCA808X_PHY_DEBUG_ADC_THRESHOLD		0x2c80
+-#define QCA808X_ADC_THRESHOLD_MASK		GENMASK(7, 0)
+-#define QCA808X_ADC_THRESHOLD_80MV		0
+-#define QCA808X_ADC_THRESHOLD_100MV		0xf0
+-#define QCA808X_ADC_THRESHOLD_200MV		0x0f
+-#define QCA808X_ADC_THRESHOLD_300MV		0xff
+-
+-/* CLD control */
+-#define QCA808X_PHY_MMD3_ADDR_CLD_CTRL7		0x8007
+-#define QCA808X_8023AZ_AFE_CTRL_MASK		GENMASK(8, 4)
+-#define QCA808X_8023AZ_AFE_EN			0x90
+-
+-/* AZ control */
+-#define QCA808X_PHY_MMD3_AZ_TRAINING_CTRL	0x8008
+-#define QCA808X_MMD3_AZ_TRAINING_VAL		0x1c32
+-
+-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB	0x8014
+-#define QCA808X_MSE_THRESHOLD_20DB_VALUE	0x529
+-
+-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB	0x800E
+-#define QCA808X_MSE_THRESHOLD_17DB_VALUE	0x341
+-
+-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB	0x801E
+-#define QCA808X_MSE_THRESHOLD_27DB_VALUE	0x419
+-
+-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB	0x8020
+-#define QCA808X_MSE_THRESHOLD_28DB_VALUE	0x341
+-
+-#define QCA808X_PHY_MMD7_TOP_OPTION1		0x901c
+-#define QCA808X_TOP_OPTION1_DATA		0x0
+-
+-#define QCA808X_PHY_MMD3_DEBUG_1		0xa100
+-#define QCA808X_MMD3_DEBUG_1_VALUE		0x9203
+-#define QCA808X_PHY_MMD3_DEBUG_2		0xa101
+-#define QCA808X_MMD3_DEBUG_2_VALUE		0x48ad
+-#define QCA808X_PHY_MMD3_DEBUG_3		0xa103
+-#define QCA808X_MMD3_DEBUG_3_VALUE		0x1698
+-#define QCA808X_PHY_MMD3_DEBUG_4		0xa105
+-#define QCA808X_MMD3_DEBUG_4_VALUE		0x8001
+-#define QCA808X_PHY_MMD3_DEBUG_5		0xa106
+-#define QCA808X_MMD3_DEBUG_5_VALUE		0x1111
+-#define QCA808X_PHY_MMD3_DEBUG_6		0xa011
+-#define QCA808X_MMD3_DEBUG_6_VALUE		0x5f85
+-
+-/* master/slave seed config */
+-#define QCA808X_PHY_DEBUG_LOCAL_SEED		9
+-#define QCA808X_MASTER_SLAVE_SEED_ENABLE	BIT(1)
+-#define QCA808X_MASTER_SLAVE_SEED_CFG		GENMASK(12, 2)
+-#define QCA808X_MASTER_SLAVE_SEED_RANGE		0x32
+-
+-/* Hibernation yields lower power consumpiton in contrast with normal operation mode.
+- * when the copper cable is unplugged, the PHY enters into hibernation mode in about 10s.
+- */
+-#define QCA808X_DBG_AN_TEST			0xb
+-#define QCA808X_HIBERNATION_EN			BIT(15)
+-
+-#define QCA808X_CDT_ENABLE_TEST			BIT(15)
+-#define QCA808X_CDT_INTER_CHECK_DIS		BIT(13)
+-#define QCA808X_CDT_STATUS			BIT(11)
+-#define QCA808X_CDT_LENGTH_UNIT			BIT(10)
+-
+-#define QCA808X_MMD3_CDT_STATUS			0x8064
+-#define QCA808X_MMD3_CDT_DIAG_PAIR_A		0x8065
+-#define QCA808X_MMD3_CDT_DIAG_PAIR_B		0x8066
+-#define QCA808X_MMD3_CDT_DIAG_PAIR_C		0x8067
+-#define QCA808X_MMD3_CDT_DIAG_PAIR_D		0x8068
+-#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT	GENMASK(15, 8)
+-#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT	GENMASK(7, 0)
+-
+-#define QCA808X_CDT_CODE_PAIR_A			GENMASK(15, 12)
+-#define QCA808X_CDT_CODE_PAIR_B			GENMASK(11, 8)
+-#define QCA808X_CDT_CODE_PAIR_C			GENMASK(7, 4)
+-#define QCA808X_CDT_CODE_PAIR_D			GENMASK(3, 0)
+-
+-#define QCA808X_CDT_STATUS_STAT_TYPE		GENMASK(1, 0)
+-#define QCA808X_CDT_STATUS_STAT_FAIL		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
+-#define QCA808X_CDT_STATUS_STAT_NORMAL		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
+-#define QCA808X_CDT_STATUS_STAT_SAME_OPEN	FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
+-#define QCA808X_CDT_STATUS_STAT_SAME_SHORT	FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
+-
+-#define QCA808X_CDT_STATUS_STAT_MDI		GENMASK(3, 2)
+-#define QCA808X_CDT_STATUS_STAT_MDI1		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
+-#define QCA808X_CDT_STATUS_STAT_MDI2		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
+-#define QCA808X_CDT_STATUS_STAT_MDI3		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
+-
+-/* NORMAL are MDI with type set to 0 */
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI1
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+-									 QCA808X_CDT_STATUS_STAT_MDI1)
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+-									 QCA808X_CDT_STATUS_STAT_MDI1)
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI2
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+-									 QCA808X_CDT_STATUS_STAT_MDI2)
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+-									 QCA808X_CDT_STATUS_STAT_MDI2)
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI3
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+-									 QCA808X_CDT_STATUS_STAT_MDI3)
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+-									 QCA808X_CDT_STATUS_STAT_MDI3)
+-
+-/* Added for reference of existence but should be handled by wait_for_completion already */
+-#define QCA808X_CDT_STATUS_STAT_BUSY		(BIT(1) | BIT(3))
+-
+-#define QCA808X_MMD7_LED_GLOBAL			0x8073
+-#define QCA808X_LED_BLINK_1			GENMASK(11, 6)
+-#define QCA808X_LED_BLINK_2			GENMASK(5, 0)
+-/* Values are the same for both BLINK_1 and BLINK_2 */
+-#define QCA808X_LED_BLINK_FREQ_MASK		GENMASK(5, 3)
+-#define QCA808X_LED_BLINK_FREQ_2HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x0)
+-#define QCA808X_LED_BLINK_FREQ_4HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x1)
+-#define QCA808X_LED_BLINK_FREQ_8HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x2)
+-#define QCA808X_LED_BLINK_FREQ_16HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x3)
+-#define QCA808X_LED_BLINK_FREQ_32HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x4)
+-#define QCA808X_LED_BLINK_FREQ_64HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x5)
+-#define QCA808X_LED_BLINK_FREQ_128HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x6)
+-#define QCA808X_LED_BLINK_FREQ_256HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x7)
+-#define QCA808X_LED_BLINK_DUTY_MASK		GENMASK(2, 0)
+-#define QCA808X_LED_BLINK_DUTY_50_50		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x0)
+-#define QCA808X_LED_BLINK_DUTY_75_25		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x1)
+-#define QCA808X_LED_BLINK_DUTY_25_75		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x2)
+-#define QCA808X_LED_BLINK_DUTY_33_67		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x3)
+-#define QCA808X_LED_BLINK_DUTY_67_33		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x4)
+-#define QCA808X_LED_BLINK_DUTY_17_83		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x5)
+-#define QCA808X_LED_BLINK_DUTY_83_17		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x6)
+-#define QCA808X_LED_BLINK_DUTY_8_92		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x7)
+-
+-#define QCA808X_MMD7_LED2_CTRL			0x8074
+-#define QCA808X_MMD7_LED2_FORCE_CTRL		0x8075
+-#define QCA808X_MMD7_LED1_CTRL			0x8076
+-#define QCA808X_MMD7_LED1_FORCE_CTRL		0x8077
+-#define QCA808X_MMD7_LED0_CTRL			0x8078
+-#define QCA808X_MMD7_LED_CTRL(x)		(0x8078 - ((x) * 2))
+-
+-/* LED hw control pattern is the same for every LED */
+-#define QCA808X_LED_PATTERN_MASK		GENMASK(15, 0)
+-#define QCA808X_LED_SPEED2500_ON		BIT(15)
+-#define QCA808X_LED_SPEED2500_BLINK		BIT(14)
+-/* Follow blink trigger even if duplex or speed condition doesn't match */
+-#define QCA808X_LED_BLINK_CHECK_BYPASS		BIT(13)
+-#define QCA808X_LED_FULL_DUPLEX_ON		BIT(12)
+-#define QCA808X_LED_HALF_DUPLEX_ON		BIT(11)
+-#define QCA808X_LED_TX_BLINK			BIT(10)
+-#define QCA808X_LED_RX_BLINK			BIT(9)
+-#define QCA808X_LED_TX_ON_10MS			BIT(8)
+-#define QCA808X_LED_RX_ON_10MS			BIT(7)
+-#define QCA808X_LED_SPEED1000_ON		BIT(6)
+-#define QCA808X_LED_SPEED100_ON			BIT(5)
+-#define QCA808X_LED_SPEED10_ON			BIT(4)
+-#define QCA808X_LED_COLLISION_BLINK		BIT(3)
+-#define QCA808X_LED_SPEED1000_BLINK		BIT(2)
+-#define QCA808X_LED_SPEED100_BLINK		BIT(1)
+-#define QCA808X_LED_SPEED10_BLINK		BIT(0)
+-
+-#define QCA808X_MMD7_LED0_FORCE_CTRL		0x8079
+-#define QCA808X_MMD7_LED_FORCE_CTRL(x)		(0x8079 - ((x) * 2))
+-
+-/* LED force ctrl is the same for every LED
+- * No documentation exist for this, not even internal one
+- * with NDA as QCOM gives only info about configuring
+- * hw control pattern rules and doesn't indicate any way
+- * to force the LED to specific mode.
+- * These define comes from reverse and testing and maybe
+- * lack of some info or some info are not entirely correct.
+- * For the basic LED control and hw control these finding
+- * are enough to support LED control in all the required APIs.
+- *
+- * On doing some comparison with implementation with qca807x,
+- * it was found that it's 1:1 equal to it and confirms all the
+- * reverse done. It was also found further specification with the
+- * force mode and the blink modes.
+- */
+-#define QCA808X_LED_FORCE_EN			BIT(15)
+-#define QCA808X_LED_FORCE_MODE_MASK		GENMASK(14, 13)
+-#define QCA808X_LED_FORCE_BLINK_1		FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x3)
+-#define QCA808X_LED_FORCE_BLINK_2		FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x2)
+-#define QCA808X_LED_FORCE_ON			FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x1)
+-#define QCA808X_LED_FORCE_OFF			FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x0)
+-
+-#define QCA808X_MMD7_LED_POLARITY_CTRL		0x901a
+-/* QSDK sets by default 0x46 to this reg that sets BIT 6 for
+- * LED to active high. It's not clear what BIT 3 and BIT 4 does.
+- */
+-#define QCA808X_LED_ACTIVE_HIGH			BIT(6)
+-
+-/* QCA808X 1G chip type */
+-#define QCA808X_PHY_MMD7_CHIP_TYPE		0x901d
+-#define QCA808X_PHY_CHIP_TYPE_1G		BIT(0)
+-
+-#define QCA8081_PHY_SERDES_MMD1_FIFO_CTRL	0x9072
+-#define QCA8081_PHY_FIFO_RSTN			BIT(11)
+-
+-MODULE_DESCRIPTION("Qualcomm Atheros AR803x and QCA808X PHY driver");
+-MODULE_AUTHOR("Matus Ujhelyi");
+-MODULE_LICENSE("GPL");
+-
+-enum stat_access_type {
+-	PHY,
+-	MMD
+-};
+-
+-struct at803x_hw_stat {
+-	const char *string;
+-	u8 reg;
+-	u32 mask;
+-	enum stat_access_type access_type;
+-};
+-
+-static struct at803x_hw_stat qca83xx_hw_stats[] = {
+-	{ "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
+-	{ "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
+-	{ "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
+-};
+-
+-struct at803x_ss_mask {
+-	u16 speed_mask;
+-	u8 speed_shift;
+-};
+-
+-struct at803x_priv {
+-	int flags;
+-	u16 clk_25m_reg;
+-	u16 clk_25m_mask;
+-	u8 smarteee_lpi_tw_1g;
+-	u8 smarteee_lpi_tw_100m;
+-	bool is_fiber;
+-	bool is_1000basex;
+-	struct regulator_dev *vddio_rdev;
+-	struct regulator_dev *vddh_rdev;
+-	u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
+-	int led_polarity_mode;
+-};
+-
+-struct at803x_context {
+-	u16 bmcr;
+-	u16 advertise;
+-	u16 control1000;
+-	u16 int_enable;
+-	u16 smart_speed;
+-	u16 led_control;
+-};
+-
+-static int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data)
+-{
+-	int ret;
+-
+-	ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
+-	if (ret < 0)
+-		return ret;
+-
+-	return phy_write(phydev, AT803X_DEBUG_DATA, data);
+-}
+-
+-static int at803x_debug_reg_read(struct phy_device *phydev, u16 reg)
+-{
+-	int ret;
+-
+-	ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
+-	if (ret < 0)
+-		return ret;
+-
+-	return phy_read(phydev, AT803X_DEBUG_DATA);
+-}
+-
+-static int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
+-				 u16 clear, u16 set)
+-{
+-	u16 val;
+-	int ret;
+-
+-	ret = at803x_debug_reg_read(phydev, reg);
+-	if (ret < 0)
+-		return ret;
+-
+-	val = ret & 0xffff;
+-	val &= ~clear;
+-	val |= set;
+-
+-	return phy_write(phydev, AT803X_DEBUG_DATA, val);
+-}
+-
+-static int at803x_write_page(struct phy_device *phydev, int page)
+-{
+-	int mask;
+-	int set;
+-
+-	if (page == AT803X_PAGE_COPPER) {
+-		set = AT803X_BT_BX_REG_SEL;
+-		mask = 0;
+-	} else {
+-		set = 0;
+-		mask = AT803X_BT_BX_REG_SEL;
+-	}
+-
+-	return __phy_modify(phydev, AT803X_REG_CHIP_CONFIG, mask, set);
+-}
+-
+-static int at803x_read_page(struct phy_device *phydev)
+-{
+-	int ccr = __phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+-
+-	if (ccr < 0)
+-		return ccr;
+-
+-	if (ccr & AT803X_BT_BX_REG_SEL)
+-		return AT803X_PAGE_COPPER;
+-
+-	return AT803X_PAGE_FIBER;
+-}
+-
+-static int at803x_enable_rx_delay(struct phy_device *phydev)
+-{
+-	return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0,
+-				     AT803X_DEBUG_RX_CLK_DLY_EN);
+-}
+-
+-static int at803x_enable_tx_delay(struct phy_device *phydev)
+-{
+-	return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0,
+-				     AT803X_DEBUG_TX_CLK_DLY_EN);
+-}
+-
+-static int at803x_disable_rx_delay(struct phy_device *phydev)
+-{
+-	return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+-				     AT803X_DEBUG_RX_CLK_DLY_EN, 0);
+-}
+-
+-static int at803x_disable_tx_delay(struct phy_device *phydev)
+-{
+-	return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE,
+-				     AT803X_DEBUG_TX_CLK_DLY_EN, 0);
+-}
+-
+-/* save relevant PHY registers to private copy */
+-static void at803x_context_save(struct phy_device *phydev,
+-				struct at803x_context *context)
+-{
+-	context->bmcr = phy_read(phydev, MII_BMCR);
+-	context->advertise = phy_read(phydev, MII_ADVERTISE);
+-	context->control1000 = phy_read(phydev, MII_CTRL1000);
+-	context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE);
+-	context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED);
+-	context->led_control = phy_read(phydev, AT803X_LED_CONTROL);
+-}
+-
+-/* restore relevant PHY registers from private copy */
+-static void at803x_context_restore(struct phy_device *phydev,
+-				   const struct at803x_context *context)
+-{
+-	phy_write(phydev, MII_BMCR, context->bmcr);
+-	phy_write(phydev, MII_ADVERTISE, context->advertise);
+-	phy_write(phydev, MII_CTRL1000, context->control1000);
+-	phy_write(phydev, AT803X_INTR_ENABLE, context->int_enable);
+-	phy_write(phydev, AT803X_SMART_SPEED, context->smart_speed);
+-	phy_write(phydev, AT803X_LED_CONTROL, context->led_control);
+-}
+-
+-static int at803x_set_wol(struct phy_device *phydev,
+-			  struct ethtool_wolinfo *wol)
+-{
+-	int ret, irq_enabled;
+-
+-	if (wol->wolopts & WAKE_MAGIC) {
+-		struct net_device *ndev = phydev->attached_dev;
+-		const u8 *mac;
+-		unsigned int i;
+-		static const unsigned int offsets[] = {
+-			AT803X_LOC_MAC_ADDR_32_47_OFFSET,
+-			AT803X_LOC_MAC_ADDR_16_31_OFFSET,
+-			AT803X_LOC_MAC_ADDR_0_15_OFFSET,
+-		};
+-
+-		if (!ndev)
+-			return -ENODEV;
+-
+-		mac = (const u8 *)ndev->dev_addr;
+-
+-		if (!is_valid_ether_addr(mac))
+-			return -EINVAL;
+-
+-		for (i = 0; i < 3; i++)
+-			phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
+-				      mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
+-
+-		/* Enable WOL interrupt */
+-		ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
+-		if (ret)
+-			return ret;
+-	} else {
+-		/* Disable WOL interrupt */
+-		ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	/* Clear WOL status */
+-	ret = phy_read(phydev, AT803X_INTR_STATUS);
+-	if (ret < 0)
+-		return ret;
+-
+-	/* Check if there are other interrupts except for WOL triggered when PHY is
+-	 * in interrupt mode, only the interrupts enabled by AT803X_INTR_ENABLE can
+-	 * be passed up to the interrupt PIN.
+-	 */
+-	irq_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
+-	if (irq_enabled < 0)
+-		return irq_enabled;
+-
+-	irq_enabled &= ~AT803X_INTR_ENABLE_WOL;
+-	if (ret & irq_enabled && !phy_polling_mode(phydev))
+-		phy_trigger_machine(phydev);
+-
+-	return 0;
+-}
+-
+-static void at803x_get_wol(struct phy_device *phydev,
+-			   struct ethtool_wolinfo *wol)
+-{
+-	int value;
+-
+-	wol->supported = WAKE_MAGIC;
+-	wol->wolopts = 0;
+-
+-	value = phy_read(phydev, AT803X_INTR_ENABLE);
+-	if (value < 0)
+-		return;
+-
+-	if (value & AT803X_INTR_ENABLE_WOL)
+-		wol->wolopts |= WAKE_MAGIC;
+-}
+-
+-static int qca83xx_get_sset_count(struct phy_device *phydev)
+-{
+-	return ARRAY_SIZE(qca83xx_hw_stats);
+-}
+-
+-static void qca83xx_get_strings(struct phy_device *phydev, u8 *data)
+-{
+-	int i;
+-
+-	for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++) {
+-		strscpy(data + i * ETH_GSTRING_LEN,
+-			qca83xx_hw_stats[i].string, ETH_GSTRING_LEN);
+-	}
+-}
+-
+-static u64 qca83xx_get_stat(struct phy_device *phydev, int i)
+-{
+-	struct at803x_hw_stat stat = qca83xx_hw_stats[i];
+-	struct at803x_priv *priv = phydev->priv;
+-	int val;
+-	u64 ret;
+-
+-	if (stat.access_type == MMD)
+-		val = phy_read_mmd(phydev, MDIO_MMD_PCS, stat.reg);
+-	else
+-		val = phy_read(phydev, stat.reg);
+-
+-	if (val < 0) {
+-		ret = U64_MAX;
+-	} else {
+-		val = val & stat.mask;
+-		priv->stats[i] += val;
+-		ret = priv->stats[i];
+-	}
+-
+-	return ret;
+-}
+-
+-static void qca83xx_get_stats(struct phy_device *phydev,
+-			      struct ethtool_stats *stats, u64 *data)
+-{
+-	int i;
+-
+-	for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++)
+-		data[i] = qca83xx_get_stat(phydev, i);
+-}
+-
+-static int at803x_suspend(struct phy_device *phydev)
+-{
+-	int value;
+-	int wol_enabled;
+-
+-	value = phy_read(phydev, AT803X_INTR_ENABLE);
+-	wol_enabled = value & AT803X_INTR_ENABLE_WOL;
+-
+-	if (wol_enabled)
+-		value = BMCR_ISOLATE;
+-	else
+-		value = BMCR_PDOWN;
+-
+-	phy_modify(phydev, MII_BMCR, 0, value);
+-
+-	return 0;
+-}
+-
+-static int at803x_resume(struct phy_device *phydev)
+-{
+-	return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
+-}
+-
+-static int at803x_parse_dt(struct phy_device *phydev)
+-{
+-	struct device_node *node = phydev->mdio.dev.of_node;
+-	struct at803x_priv *priv = phydev->priv;
+-	u32 freq, strength, tw;
+-	unsigned int sel;
+-	int ret;
+-
+-	if (!IS_ENABLED(CONFIG_OF_MDIO))
+-		return 0;
+-
+-	if (of_property_read_bool(node, "qca,disable-smarteee"))
+-		priv->flags |= AT803X_DISABLE_SMARTEEE;
+-
+-	if (of_property_read_bool(node, "qca,disable-hibernation-mode"))
+-		priv->flags |= AT803X_DISABLE_HIBERNATION_MODE;
+-
+-	if (!of_property_read_u32(node, "qca,smarteee-tw-us-1g", &tw)) {
+-		if (!tw || tw > 255) {
+-			phydev_err(phydev, "invalid qca,smarteee-tw-us-1g\n");
+-			return -EINVAL;
+-		}
+-		priv->smarteee_lpi_tw_1g = tw;
+-	}
+-
+-	if (!of_property_read_u32(node, "qca,smarteee-tw-us-100m", &tw)) {
+-		if (!tw || tw > 255) {
+-			phydev_err(phydev, "invalid qca,smarteee-tw-us-100m\n");
+-			return -EINVAL;
+-		}
+-		priv->smarteee_lpi_tw_100m = tw;
+-	}
+-
+-	ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq);
+-	if (!ret) {
+-		switch (freq) {
+-		case 25000000:
+-			sel = AT803X_CLK_OUT_25MHZ_XTAL;
+-			break;
+-		case 50000000:
+-			sel = AT803X_CLK_OUT_50MHZ_PLL;
+-			break;
+-		case 62500000:
+-			sel = AT803X_CLK_OUT_62_5MHZ_PLL;
+-			break;
+-		case 125000000:
+-			sel = AT803X_CLK_OUT_125MHZ_PLL;
+-			break;
+-		default:
+-			phydev_err(phydev, "invalid qca,clk-out-frequency\n");
+-			return -EINVAL;
+-		}
+-
+-		priv->clk_25m_reg |= FIELD_PREP(AT803X_CLK_OUT_MASK, sel);
+-		priv->clk_25m_mask |= AT803X_CLK_OUT_MASK;
+-	}
+-
+-	ret = of_property_read_u32(node, "qca,clk-out-strength", &strength);
+-	if (!ret) {
+-		priv->clk_25m_mask |= AT803X_CLK_OUT_STRENGTH_MASK;
+-		switch (strength) {
+-		case AR803X_STRENGTH_FULL:
+-			priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_FULL;
+-			break;
+-		case AR803X_STRENGTH_HALF:
+-			priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_HALF;
+-			break;
+-		case AR803X_STRENGTH_QUARTER:
+-			priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_QUARTER;
+-			break;
+-		default:
+-			phydev_err(phydev, "invalid qca,clk-out-strength\n");
+-			return -EINVAL;
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+-static int at803x_probe(struct phy_device *phydev)
+-{
+-	struct device *dev = &phydev->mdio.dev;
+-	struct at803x_priv *priv;
+-	int ret;
+-
+-	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+-	if (!priv)
+-		return -ENOMEM;
+-
+-	/* Init LED polarity mode to -1 */
+-	priv->led_polarity_mode = -1;
+-
+-	phydev->priv = priv;
+-
+-	ret = at803x_parse_dt(phydev);
+-	if (ret)
+-		return ret;
+-
+-	return 0;
+-}
+-
+-static int at803x_get_features(struct phy_device *phydev)
+-{
+-	struct at803x_priv *priv = phydev->priv;
+-	int err;
+-
+-	err = genphy_read_abilities(phydev);
+-	if (err)
+-		return err;
+-
+-	if (phydev->drv->phy_id != ATH8031_PHY_ID)
+-		return 0;
+-
+-	/* AR8031/AR8033 have different status registers
+-	 * for copper and fiber operation. However, the
+-	 * extended status register is the same for both
+-	 * operation modes.
+-	 *
+-	 * As a result of that, ESTATUS_1000_XFULL is set
+-	 * to 1 even when operating in copper TP mode.
+-	 *
+-	 * Remove this mode from the supported link modes
+-	 * when not operating in 1000BaseX mode.
+-	 */
+-	if (!priv->is_1000basex)
+-		linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+-				   phydev->supported);
+-
+-	return 0;
+-}
+-
+-static int at803x_smarteee_config(struct phy_device *phydev)
+-{
+-	struct at803x_priv *priv = phydev->priv;
+-	u16 mask = 0, val = 0;
+-	int ret;
+-
+-	if (priv->flags & AT803X_DISABLE_SMARTEEE)
+-		return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+-				      AT803X_MMD3_SMARTEEE_CTL3,
+-				      AT803X_MMD3_SMARTEEE_CTL3_LPI_EN, 0);
+-
+-	if (priv->smarteee_lpi_tw_1g) {
+-		mask |= 0xff00;
+-		val |= priv->smarteee_lpi_tw_1g << 8;
+-	}
+-	if (priv->smarteee_lpi_tw_100m) {
+-		mask |= 0x00ff;
+-		val |= priv->smarteee_lpi_tw_100m;
+-	}
+-	if (!mask)
+-		return 0;
+-
+-	ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL1,
+-			     mask, val);
+-	if (ret)
+-		return ret;
+-
+-	return phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL3,
+-			      AT803X_MMD3_SMARTEEE_CTL3_LPI_EN,
+-			      AT803X_MMD3_SMARTEEE_CTL3_LPI_EN);
+-}
+-
+-static int at803x_clk_out_config(struct phy_device *phydev)
+-{
+-	struct at803x_priv *priv = phydev->priv;
+-
+-	if (!priv->clk_25m_mask)
+-		return 0;
+-
+-	return phy_modify_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M,
+-			      priv->clk_25m_mask, priv->clk_25m_reg);
+-}
+-
+-static int at8031_pll_config(struct phy_device *phydev)
+-{
+-	struct at803x_priv *priv = phydev->priv;
+-
+-	/* The default after hardware reset is PLL OFF. After a soft reset, the
+-	 * values are retained.
+-	 */
+-	if (priv->flags & AT803X_KEEP_PLL_ENABLED)
+-		return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+-					     0, AT803X_DEBUG_PLL_ON);
+-	else
+-		return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+-					     AT803X_DEBUG_PLL_ON, 0);
+-}
+-
+-static int at803x_hibernation_mode_config(struct phy_device *phydev)
+-{
+-	struct at803x_priv *priv = phydev->priv;
+-
+-	/* The default after hardware reset is hibernation mode enabled. After
+-	 * software reset, the value is retained.
+-	 */
+-	if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE))
+-		return 0;
+-
+-	return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
+-					 AT803X_DEBUG_HIB_CTRL_PS_HIB_EN, 0);
+-}
+-
+-static int at803x_config_init(struct phy_device *phydev)
+-{
+-	int ret;
+-
+-	/* The RX and TX delay default is:
+-	 *   after HW reset: RX delay enabled and TX delay disabled
+-	 *   after SW reset: RX delay enabled, while TX delay retains the
+-	 *   value before reset.
+-	 */
+-	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+-	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+-		ret = at803x_enable_rx_delay(phydev);
+-	else
+-		ret = at803x_disable_rx_delay(phydev);
+-	if (ret < 0)
+-		return ret;
+-
+-	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+-	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+-		ret = at803x_enable_tx_delay(phydev);
+-	else
+-		ret = at803x_disable_tx_delay(phydev);
+-	if (ret < 0)
+-		return ret;
+-
+-	ret = at803x_smarteee_config(phydev);
+-	if (ret < 0)
+-		return ret;
+-
+-	ret = at803x_clk_out_config(phydev);
+-	if (ret < 0)
+-		return ret;
+-
+-	ret = at803x_hibernation_mode_config(phydev);
+-	if (ret < 0)
+-		return ret;
+-
+-	/* Ar803x extended next page bit is enabled by default. Cisco
+-	 * multigig switches read this bit and attempt to negotiate 10Gbps
+-	 * rates even if the next page bit is disabled. This is incorrect
+-	 * behaviour but we still need to accommodate it. XNP is only needed
+-	 * for 10Gbps support, so disable XNP.
+-	 */
+-	return phy_modify(phydev, MII_ADVERTISE, MDIO_AN_CTRL1_XNP, 0);
+-}
+-
+-static int at803x_ack_interrupt(struct phy_device *phydev)
+-{
+-	int err;
+-
+-	err = phy_read(phydev, AT803X_INTR_STATUS);
+-
+-	return (err < 0) ? err : 0;
+-}
+-
+-static int at803x_config_intr(struct phy_device *phydev)
+-{
+-	int err;
+-	int value;
+-
+-	value = phy_read(phydev, AT803X_INTR_ENABLE);
+-
+-	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+-		/* Clear any pending interrupts */
+-		err = at803x_ack_interrupt(phydev);
+-		if (err)
+-			return err;
+-
+-		value |= AT803X_INTR_ENABLE_AUTONEG_ERR;
+-		value |= AT803X_INTR_ENABLE_SPEED_CHANGED;
+-		value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
+-		value |= AT803X_INTR_ENABLE_LINK_FAIL;
+-		value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
+-
+-		err = phy_write(phydev, AT803X_INTR_ENABLE, value);
+-	} else {
+-		err = phy_write(phydev, AT803X_INTR_ENABLE, 0);
+-		if (err)
+-			return err;
+-
+-		/* Clear any pending interrupts */
+-		err = at803x_ack_interrupt(phydev);
+-	}
+-
+-	return err;
+-}
+-
+-static irqreturn_t at803x_handle_interrupt(struct phy_device *phydev)
+-{
+-	int irq_status, int_enabled;
+-
+-	irq_status = phy_read(phydev, AT803X_INTR_STATUS);
+-	if (irq_status < 0) {
+-		phy_error(phydev);
+-		return IRQ_NONE;
+-	}
+-
+-	/* Read the current enabled interrupts */
+-	int_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
+-	if (int_enabled < 0) {
+-		phy_error(phydev);
+-		return IRQ_NONE;
+-	}
+-
+-	/* See if this was one of our enabled interrupts */
+-	if (!(irq_status & int_enabled))
+-		return IRQ_NONE;
+-
+-	phy_trigger_machine(phydev);
+-
+-	return IRQ_HANDLED;
+-}
+-
+-static void at803x_link_change_notify(struct phy_device *phydev)
+-{
+-	/*
+-	 * Conduct a hardware reset for AT8030 every time a link loss is
+-	 * signalled. This is necessary to circumvent a hardware bug that
+-	 * occurs when the cable is unplugged while TX packets are pending
+-	 * in the FIFO. In such cases, the FIFO enters an error mode it
+-	 * cannot recover from by software.
+-	 */
+-	if (phydev->state == PHY_NOLINK && phydev->mdio.reset_gpio) {
+-		struct at803x_context context;
+-
+-		at803x_context_save(phydev, &context);
+-
+-		phy_device_reset(phydev, 1);
+-		usleep_range(1000, 2000);
+-		phy_device_reset(phydev, 0);
+-		usleep_range(1000, 2000);
+-
+-		at803x_context_restore(phydev, &context);
+-
+-		phydev_dbg(phydev, "%s(): phy was reset\n", __func__);
+-	}
+-}
+-
+-static int at803x_read_specific_status(struct phy_device *phydev,
+-				       struct at803x_ss_mask ss_mask)
+-{
+-	int ss;
+-
+-	/* Read the AT8035 PHY-Specific Status register, which indicates the
+-	 * speed and duplex that the PHY is actually using, irrespective of
+-	 * whether we are in autoneg mode or not.
+-	 */
+-	ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
+-	if (ss < 0)
+-		return ss;
+-
+-	if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
+-		int sfc, speed;
+-
+-		sfc = phy_read(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL);
+-		if (sfc < 0)
+-			return sfc;
+-
+-		speed = ss & ss_mask.speed_mask;
+-		speed >>= ss_mask.speed_shift;
+-
+-		switch (speed) {
+-		case AT803X_SS_SPEED_10:
+-			phydev->speed = SPEED_10;
+-			break;
+-		case AT803X_SS_SPEED_100:
+-			phydev->speed = SPEED_100;
+-			break;
+-		case AT803X_SS_SPEED_1000:
+-			phydev->speed = SPEED_1000;
+-			break;
+-		case QCA808X_SS_SPEED_2500:
+-			phydev->speed = SPEED_2500;
+-			break;
+-		}
+-		if (ss & AT803X_SS_DUPLEX)
+-			phydev->duplex = DUPLEX_FULL;
+-		else
+-			phydev->duplex = DUPLEX_HALF;
+-
+-		if (ss & AT803X_SS_MDIX)
+-			phydev->mdix = ETH_TP_MDI_X;
+-		else
+-			phydev->mdix = ETH_TP_MDI;
+-
+-		switch (FIELD_GET(AT803X_SFC_MDI_CROSSOVER_MODE_M, sfc)) {
+-		case AT803X_SFC_MANUAL_MDI:
+-			phydev->mdix_ctrl = ETH_TP_MDI;
+-			break;
+-		case AT803X_SFC_MANUAL_MDIX:
+-			phydev->mdix_ctrl = ETH_TP_MDI_X;
+-			break;
+-		case AT803X_SFC_AUTOMATIC_CROSSOVER:
+-			phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+-			break;
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+-static int at803x_read_status(struct phy_device *phydev)
+-{
+-	struct at803x_ss_mask ss_mask = { 0 };
+-	int err, old_link = phydev->link;
+-
+-	/* Update the link, but return if there was an error */
+-	err = genphy_update_link(phydev);
+-	if (err)
+-		return err;
+-
+-	/* why bother the PHY if nothing can have changed */
+-	if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+-		return 0;
+-
+-	phydev->speed = SPEED_UNKNOWN;
+-	phydev->duplex = DUPLEX_UNKNOWN;
+-	phydev->pause = 0;
+-	phydev->asym_pause = 0;
+-
+-	err = genphy_read_lpa(phydev);
+-	if (err < 0)
+-		return err;
+-
+-	ss_mask.speed_mask = AT803X_SS_SPEED_MASK;
+-	ss_mask.speed_shift = __bf_shf(AT803X_SS_SPEED_MASK);
+-	err = at803x_read_specific_status(phydev, ss_mask);
+-	if (err < 0)
+-		return err;
+-
+-	if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
+-		phy_resolve_aneg_pause(phydev);
+-
+-	return 0;
+-}
+-
+-static int at803x_config_mdix(struct phy_device *phydev, u8 ctrl)
+-{
+-	u16 val;
+-
+-	switch (ctrl) {
+-	case ETH_TP_MDI:
+-		val = AT803X_SFC_MANUAL_MDI;
+-		break;
+-	case ETH_TP_MDI_X:
+-		val = AT803X_SFC_MANUAL_MDIX;
+-		break;
+-	case ETH_TP_MDI_AUTO:
+-		val = AT803X_SFC_AUTOMATIC_CROSSOVER;
+-		break;
+-	default:
+-		return 0;
+-	}
+-
+-	return phy_modify_changed(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL,
+-			  AT803X_SFC_MDI_CROSSOVER_MODE_M,
+-			  FIELD_PREP(AT803X_SFC_MDI_CROSSOVER_MODE_M, val));
+-}
+-
+-static int at803x_prepare_config_aneg(struct phy_device *phydev)
+-{
+-	int ret;
+-
+-	ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
+-	if (ret < 0)
+-		return ret;
+-
+-	/* Changes of the midx bits are disruptive to the normal operation;
+-	 * therefore any changes to these registers must be followed by a
+-	 * software reset to take effect.
+-	 */
+-	if (ret == 1) {
+-		ret = genphy_soft_reset(phydev);
+-		if (ret < 0)
+-			return ret;
+-	}
+-
+-	return 0;
+-}
+-
+-static int at803x_config_aneg(struct phy_device *phydev)
+-{
+-	struct at803x_priv *priv = phydev->priv;
+-	int ret;
+-
+-	ret = at803x_prepare_config_aneg(phydev);
+-	if (ret)
+-		return ret;
+-
+-	if (priv->is_1000basex)
+-		return genphy_c37_config_aneg(phydev);
+-
+-	return genphy_config_aneg(phydev);
+-}
+-
+-static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
+-{
+-	int val;
+-
+-	val = phy_read(phydev, AT803X_SMART_SPEED);
+-	if (val < 0)
+-		return val;
+-
+-	if (val & AT803X_SMART_SPEED_ENABLE)
+-		*d = FIELD_GET(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, val) + 2;
+-	else
+-		*d = DOWNSHIFT_DEV_DISABLE;
+-
+-	return 0;
+-}
+-
+-static int at803x_set_downshift(struct phy_device *phydev, u8 cnt)
+-{
+-	u16 mask, set;
+-	int ret;
+-
+-	switch (cnt) {
+-	case DOWNSHIFT_DEV_DEFAULT_COUNT:
+-		cnt = AT803X_DEFAULT_DOWNSHIFT;
+-		fallthrough;
+-	case AT803X_MIN_DOWNSHIFT ... AT803X_MAX_DOWNSHIFT:
+-		set = AT803X_SMART_SPEED_ENABLE |
+-		      AT803X_SMART_SPEED_BYPASS_TIMER |
+-		      FIELD_PREP(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, cnt - 2);
+-		mask = AT803X_SMART_SPEED_RETRY_LIMIT_MASK;
+-		break;
+-	case DOWNSHIFT_DEV_DISABLE:
+-		set = 0;
+-		mask = AT803X_SMART_SPEED_ENABLE |
+-		       AT803X_SMART_SPEED_BYPASS_TIMER;
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	ret = phy_modify_changed(phydev, AT803X_SMART_SPEED, mask, set);
+-
+-	/* After changing the smart speed settings, we need to perform a
+-	 * software reset, use phy_init_hw() to make sure we set the
+-	 * reapply any values which might got lost during software reset.
+-	 */
+-	if (ret == 1)
+-		ret = phy_init_hw(phydev);
+-
+-	return ret;
+-}
+-
+-static int at803x_get_tunable(struct phy_device *phydev,
+-			      struct ethtool_tunable *tuna, void *data)
+-{
+-	switch (tuna->id) {
+-	case ETHTOOL_PHY_DOWNSHIFT:
+-		return at803x_get_downshift(phydev, data);
+-	default:
+-		return -EOPNOTSUPP;
+-	}
+-}
+-
+-static int at803x_set_tunable(struct phy_device *phydev,
+-			      struct ethtool_tunable *tuna, const void *data)
+-{
+-	switch (tuna->id) {
+-	case ETHTOOL_PHY_DOWNSHIFT:
+-		return at803x_set_downshift(phydev, *(const u8 *)data);
+-	default:
+-		return -EOPNOTSUPP;
+-	}
+-}
+-
+-static int at803x_cable_test_result_trans(u16 status)
+-{
+-	switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
+-	case AT803X_CDT_STATUS_STAT_NORMAL:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+-	case AT803X_CDT_STATUS_STAT_SHORT:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+-	case AT803X_CDT_STATUS_STAT_OPEN:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+-	case AT803X_CDT_STATUS_STAT_FAIL:
+-	default:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+-	}
+-}
+-
+-static bool at803x_cdt_test_failed(u16 status)
+-{
+-	return FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status) ==
+-		AT803X_CDT_STATUS_STAT_FAIL;
+-}
+-
+-static bool at803x_cdt_fault_length_valid(u16 status)
+-{
+-	switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
+-	case AT803X_CDT_STATUS_STAT_OPEN:
+-	case AT803X_CDT_STATUS_STAT_SHORT:
+-		return true;
+-	}
+-	return false;
+-}
+-
+-static int at803x_cdt_fault_length(int dt)
+-{
+-	/* According to the datasheet the distance to the fault is
+-	 * DELTA_TIME * 0.824 meters.
+-	 *
+-	 * The author suspect the correct formula is:
+-	 *
+-	 *   fault_distance = DELTA_TIME * (c * VF) / 125MHz / 2
+-	 *
+-	 * where c is the speed of light, VF is the velocity factor of
+-	 * the twisted pair cable, 125MHz the counter frequency and
+-	 * we need to divide by 2 because the hardware will measure the
+-	 * round trip time to the fault and back to the PHY.
+-	 *
+-	 * With a VF of 0.69 we get the factor 0.824 mentioned in the
+-	 * datasheet.
+-	 */
+-	return (dt * 824) / 10;
+-}
+-
+-static int at803x_cdt_start(struct phy_device *phydev,
+-			    u32 cdt_start)
+-{
+-	return phy_write(phydev, AT803X_CDT, cdt_start);
+-}
+-
+-static int at803x_cdt_wait_for_completion(struct phy_device *phydev,
+-					  u32 cdt_en)
+-{
+-	int val, ret;
+-
+-	/* One test run takes about 25ms */
+-	ret = phy_read_poll_timeout(phydev, AT803X_CDT, val,
+-				    !(val & cdt_en),
+-				    30000, 100000, true);
+-
+-	return ret < 0 ? ret : 0;
+-}
+-
+-static int at803x_cable_test_one_pair(struct phy_device *phydev, int pair)
+-{
+-	static const int ethtool_pair[] = {
+-		ETHTOOL_A_CABLE_PAIR_A,
+-		ETHTOOL_A_CABLE_PAIR_B,
+-		ETHTOOL_A_CABLE_PAIR_C,
+-		ETHTOOL_A_CABLE_PAIR_D,
+-	};
+-	int ret, val;
+-
+-	val = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
+-	      AT803X_CDT_ENABLE_TEST;
+-	ret = at803x_cdt_start(phydev, val);
+-	if (ret)
+-		return ret;
+-
+-	ret = at803x_cdt_wait_for_completion(phydev, AT803X_CDT_ENABLE_TEST);
+-	if (ret)
+-		return ret;
+-
+-	val = phy_read(phydev, AT803X_CDT_STATUS);
+-	if (val < 0)
+-		return val;
+-
+-	if (at803x_cdt_test_failed(val))
+-		return 0;
+-
+-	ethnl_cable_test_result(phydev, ethtool_pair[pair],
+-				at803x_cable_test_result_trans(val));
+-
+-	if (at803x_cdt_fault_length_valid(val)) {
+-		val = FIELD_GET(AT803X_CDT_STATUS_DELTA_TIME_MASK, val);
+-		ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
+-					      at803x_cdt_fault_length(val));
+-	}
+-
+-	return 1;
+-}
+-
+-static int at803x_cable_test_get_status(struct phy_device *phydev,
+-					bool *finished, unsigned long pair_mask)
+-{
+-	int retries = 20;
+-	int pair, ret;
+-
+-	*finished = false;
+-
+-	/* According to the datasheet the CDT can be performed when
+-	 * there is no link partner or when the link partner is
+-	 * auto-negotiating. Starting the test will restart the AN
+-	 * automatically. It seems that doing this repeatedly we will
+-	 * get a slot where our link partner won't disturb our
+-	 * measurement.
+-	 */
+-	while (pair_mask && retries--) {
+-		for_each_set_bit(pair, &pair_mask, 4) {
+-			ret = at803x_cable_test_one_pair(phydev, pair);
+-			if (ret < 0)
+-				return ret;
+-			if (ret)
+-				clear_bit(pair, &pair_mask);
+-		}
+-		if (pair_mask)
+-			msleep(250);
+-	}
+-
+-	*finished = true;
+-
+-	return 0;
+-}
+-
+-static void at803x_cable_test_autoneg(struct phy_device *phydev)
+-{
+-	/* Enable auto-negotiation, but advertise no capabilities, no link
+-	 * will be established. A restart of the auto-negotiation is not
+-	 * required, because the cable test will automatically break the link.
+-	 */
+-	phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
+-	phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
+-}
+-
+-static int at803x_cable_test_start(struct phy_device *phydev)
+-{
+-	at803x_cable_test_autoneg(phydev);
+-	/* we do all the (time consuming) work later */
+-	return 0;
+-}
+-
+-static int at8031_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
+-					    unsigned int selector)
+-{
+-	struct phy_device *phydev = rdev_get_drvdata(rdev);
+-
+-	if (selector)
+-		return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+-					     0, AT803X_DEBUG_RGMII_1V8);
+-	else
+-		return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+-					     AT803X_DEBUG_RGMII_1V8, 0);
+-}
+-
+-static int at8031_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
+-{
+-	struct phy_device *phydev = rdev_get_drvdata(rdev);
+-	int val;
+-
+-	val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
+-	if (val < 0)
+-		return val;
+-
+-	return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
+-}
+-
+-static const struct regulator_ops vddio_regulator_ops = {
+-	.list_voltage = regulator_list_voltage_table,
+-	.set_voltage_sel = at8031_rgmii_reg_set_voltage_sel,
+-	.get_voltage_sel = at8031_rgmii_reg_get_voltage_sel,
+-};
+-
+-static const unsigned int vddio_voltage_table[] = {
+-	1500000,
+-	1800000,
+-};
+-
+-static const struct regulator_desc vddio_desc = {
+-	.name = "vddio",
+-	.of_match = of_match_ptr("vddio-regulator"),
+-	.n_voltages = ARRAY_SIZE(vddio_voltage_table),
+-	.volt_table = vddio_voltage_table,
+-	.ops = &vddio_regulator_ops,
+-	.type = REGULATOR_VOLTAGE,
+-	.owner = THIS_MODULE,
+-};
+-
+-static const struct regulator_ops vddh_regulator_ops = {
+-};
+-
+-static const struct regulator_desc vddh_desc = {
+-	.name = "vddh",
+-	.of_match = of_match_ptr("vddh-regulator"),
+-	.n_voltages = 1,
+-	.fixed_uV = 2500000,
+-	.ops = &vddh_regulator_ops,
+-	.type = REGULATOR_VOLTAGE,
+-	.owner = THIS_MODULE,
+-};
+-
+-static int at8031_register_regulators(struct phy_device *phydev)
+-{
+-	struct at803x_priv *priv = phydev->priv;
+-	struct device *dev = &phydev->mdio.dev;
+-	struct regulator_config config = { };
+-
+-	config.dev = dev;
+-	config.driver_data = phydev;
+-
+-	priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
+-	if (IS_ERR(priv->vddio_rdev)) {
+-		phydev_err(phydev, "failed to register VDDIO regulator\n");
+-		return PTR_ERR(priv->vddio_rdev);
+-	}
+-
+-	priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
+-	if (IS_ERR(priv->vddh_rdev)) {
+-		phydev_err(phydev, "failed to register VDDH regulator\n");
+-		return PTR_ERR(priv->vddh_rdev);
+-	}
+-
+-	return 0;
+-}
+-
+-static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+-{
+-	struct phy_device *phydev = upstream;
+-	__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
+-	__ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+-	DECLARE_PHY_INTERFACE_MASK(interfaces);
+-	phy_interface_t iface;
+-
+-	linkmode_zero(phy_support);
+-	phylink_set(phy_support, 1000baseX_Full);
+-	phylink_set(phy_support, 1000baseT_Full);
+-	phylink_set(phy_support, Autoneg);
+-	phylink_set(phy_support, Pause);
+-	phylink_set(phy_support, Asym_Pause);
+-
+-	linkmode_zero(sfp_support);
+-	sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
+-	/* Some modules support 10G modes as well as others we support.
+-	 * Mask out non-supported modes so the correct interface is picked.
+-	 */
+-	linkmode_and(sfp_support, phy_support, sfp_support);
+-
+-	if (linkmode_empty(sfp_support)) {
+-		dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
+-		return -EINVAL;
+-	}
+-
+-	iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
+-
+-	/* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
+-	 * interface for use with SFP modules.
+-	 * However, some copper modules detected as having a preferred SGMII
+-	 * interface do default to and function in 1000Base-X mode, so just
+-	 * print a warning and allow such modules, as they may have some chance
+-	 * of working.
+-	 */
+-	if (iface == PHY_INTERFACE_MODE_SGMII)
+-		dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
+-	else if (iface != PHY_INTERFACE_MODE_1000BASEX)
+-		return -EINVAL;
+-
+-	return 0;
+-}
+-
+-static const struct sfp_upstream_ops at8031_sfp_ops = {
+-	.attach = phy_sfp_attach,
+-	.detach = phy_sfp_detach,
+-	.module_insert = at8031_sfp_insert,
+-};
+-
+-static int at8031_parse_dt(struct phy_device *phydev)
+-{
+-	struct device_node *node = phydev->mdio.dev.of_node;
+-	struct at803x_priv *priv = phydev->priv;
+-	int ret;
+-
+-	if (of_property_read_bool(node, "qca,keep-pll-enabled"))
+-		priv->flags |= AT803X_KEEP_PLL_ENABLED;
+-
+-	ret = at8031_register_regulators(phydev);
+-	if (ret < 0)
+-		return ret;
+-
+-	ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
+-						 "vddio");
+-	if (ret) {
+-		phydev_err(phydev, "failed to get VDDIO regulator\n");
+-		return ret;
+-	}
+-
+-	/* Only AR8031/8033 support 1000Base-X for SFP modules */
+-	return phy_sfp_probe(phydev, &at8031_sfp_ops);
+-}
+-
+-static int at8031_probe(struct phy_device *phydev)
+-{
+-	struct at803x_priv *priv = phydev->priv;
+-	int mode_cfg;
+-	int ccr;
+-	int ret;
+-
+-	ret = at803x_probe(phydev);
+-	if (ret)
+-		return ret;
+-
+-	/* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
+-	 * options.
+-	 */
+-	ret = at8031_parse_dt(phydev);
+-	if (ret)
+-		return ret;
+-
+-	ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+-	if (ccr < 0)
+-		return ccr;
+-	mode_cfg = ccr & AT803X_MODE_CFG_MASK;
+-
+-	switch (mode_cfg) {
+-	case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
+-	case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
+-		priv->is_1000basex = true;
+-		fallthrough;
+-	case AT803X_MODE_CFG_FX100_RGMII_50OHM:
+-	case AT803X_MODE_CFG_FX100_RGMII_75OHM:
+-		priv->is_fiber = true;
+-		break;
+-	}
+-
+-	/* Disable WoL in 1588 register which is enabled
+-	 * by default
+-	 */
+-	return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+-			      AT803X_PHY_MMD3_WOL_CTRL,
+-			      AT803X_WOL_EN, 0);
+-}
+-
+-static int at8031_config_init(struct phy_device *phydev)
+-{
+-	struct at803x_priv *priv = phydev->priv;
+-	int ret;
+-
+-	/* Some bootloaders leave the fiber page selected.
+-	 * Switch to the appropriate page (fiber or copper), as otherwise we
+-	 * read the PHY capabilities from the wrong page.
+-	 */
+-	phy_lock_mdio_bus(phydev);
+-	ret = at803x_write_page(phydev,
+-				priv->is_fiber ? AT803X_PAGE_FIBER :
+-						 AT803X_PAGE_COPPER);
+-	phy_unlock_mdio_bus(phydev);
+-	if (ret)
+-		return ret;
+-
+-	ret = at8031_pll_config(phydev);
+-	if (ret < 0)
+-		return ret;
+-
+-	return at803x_config_init(phydev);
+-}
+-
+-static int at8031_set_wol(struct phy_device *phydev,
+-			  struct ethtool_wolinfo *wol)
+-{
+-	int ret;
+-
+-	/* First setup MAC address and enable WOL interrupt */
+-	ret = at803x_set_wol(phydev, wol);
+-	if (ret)
+-		return ret;
+-
+-	if (wol->wolopts & WAKE_MAGIC)
+-		/* Enable WOL function for 1588 */
+-		ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+-				     AT803X_PHY_MMD3_WOL_CTRL,
+-				     0, AT803X_WOL_EN);
+-	else
+-		/* Disable WoL function for 1588 */
+-		ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+-				     AT803X_PHY_MMD3_WOL_CTRL,
+-				     AT803X_WOL_EN, 0);
+-
+-	return ret;
+-}
+-
+-static int at8031_config_intr(struct phy_device *phydev)
+-{
+-	struct at803x_priv *priv = phydev->priv;
+-	int err, value = 0;
+-
+-	if (phydev->interrupts == PHY_INTERRUPT_ENABLED &&
+-	    priv->is_fiber) {
+-		/* Clear any pending interrupts */
+-		err = at803x_ack_interrupt(phydev);
+-		if (err)
+-			return err;
+-
+-		value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
+-		value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
+-
+-		err = phy_set_bits(phydev, AT803X_INTR_ENABLE, value);
+-		if (err)
+-			return err;
+-	}
+-
+-	return at803x_config_intr(phydev);
+-}
+-
+-/* AR8031 and AR8033 share the same read status logic */
+-static int at8031_read_status(struct phy_device *phydev)
+-{
+-	struct at803x_priv *priv = phydev->priv;
+-
+-	if (priv->is_1000basex)
+-		return genphy_c37_read_status(phydev);
+-
+-	return at803x_read_status(phydev);
+-}
+-
+-/* AR8031 and AR8035 share the same cable test get status reg */
+-static int at8031_cable_test_get_status(struct phy_device *phydev,
+-					bool *finished)
+-{
+-	return at803x_cable_test_get_status(phydev, finished, 0xf);
+-}
+-
+-/* AR8031 and AR8035 share the same cable test start logic */
+-static int at8031_cable_test_start(struct phy_device *phydev)
+-{
+-	at803x_cable_test_autoneg(phydev);
+-	phy_write(phydev, MII_CTRL1000, 0);
+-	/* we do all the (time consuming) work later */
+-	return 0;
+-}
+-
+-/* AR8032, AR9331 and QCA9561 share the same cable test get status reg */
+-static int at8032_cable_test_get_status(struct phy_device *phydev,
+-					bool *finished)
+-{
+-	return at803x_cable_test_get_status(phydev, finished, 0x3);
+-}
+-
+-static int at8035_parse_dt(struct phy_device *phydev)
+-{
+-	struct at803x_priv *priv = phydev->priv;
+-
+-	/* Mask is set by the generic at803x_parse_dt
+-	 * if property is set. Assume property is set
+-	 * with the mask not zero.
+-	 */
+-	if (priv->clk_25m_mask) {
+-		/* Fixup for the AR8030/AR8035. This chip has another mask and
+-		 * doesn't support the DSP reference. Eg. the lowest bit of the
+-		 * mask. The upper two bits select the same frequencies. Mask
+-		 * the lowest bit here.
+-		 *
+-		 * Warning:
+-		 *   There was no datasheet for the AR8030 available so this is
+-		 *   just a guess. But the AR8035 is listed as pin compatible
+-		 *   to the AR8030 so there might be a good chance it works on
+-		 *   the AR8030 too.
+-		 */
+-		priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
+-		priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
+-	}
+-
+-	return 0;
+-}
+-
+-/* AR8030 and AR8035 shared the same special mask for clk_25m */
+-static int at8035_probe(struct phy_device *phydev)
+-{
+-	int ret;
+-
+-	ret = at803x_probe(phydev);
+-	if (ret)
+-		return ret;
+-
+-	return at8035_parse_dt(phydev);
+-}
+-
+-static int qca83xx_config_init(struct phy_device *phydev)
+-{
+-	u8 switch_revision;
+-
+-	switch_revision = phydev->dev_flags & QCA8K_DEVFLAGS_REVISION_MASK;
+-
+-	switch (switch_revision) {
+-	case 1:
+-		/* For 100M waveform */
+-		at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea);
+-		/* Turn on Gigabit clock */
+-		at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0);
+-		break;
+-
+-	case 2:
+-		phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0);
+-		fallthrough;
+-	case 4:
+-		phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f);
+-		at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860);
+-		at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46);
+-		at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000);
+-		break;
+-	}
+-
+-	/* Following original QCA sourcecode set port to prefer master */
+-	phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
+-
+-	return 0;
+-}
+-
+-static int qca8327_config_init(struct phy_device *phydev)
+-{
+-	/* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
+-	 * Disable on init and enable only with 100m speed following
+-	 * qca original source code.
+-	 */
+-	at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+-			      QCA8327_DEBUG_MANU_CTRL_EN, 0);
+-
+-	return qca83xx_config_init(phydev);
+-}
+-
+-static void qca83xx_link_change_notify(struct phy_device *phydev)
+-{
+-	/* Set DAC Amplitude adjustment to +6% for 100m on link running */
+-	if (phydev->state == PHY_RUNNING) {
+-		if (phydev->speed == SPEED_100)
+-			at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+-					      QCA8327_DEBUG_MANU_CTRL_EN,
+-					      QCA8327_DEBUG_MANU_CTRL_EN);
+-	} else {
+-		/* Reset DAC Amplitude adjustment */
+-		at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+-				      QCA8327_DEBUG_MANU_CTRL_EN, 0);
+-	}
+-}
+-
+-static int qca83xx_resume(struct phy_device *phydev)
+-{
+-	int ret, val;
+-
+-	/* Skip reset if not suspended */
+-	if (!phydev->suspended)
+-		return 0;
+-
+-	/* Reinit the port, reset values set by suspend */
+-	qca83xx_config_init(phydev);
+-
+-	/* Reset the port on port resume */
+-	phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
+-
+-	/* On resume from suspend the switch execute a reset and
+-	 * restart auto-negotiation. Wait for reset to complete.
+-	 */
+-	ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
+-				    50000, 600000, true);
+-	if (ret)
+-		return ret;
+-
+-	usleep_range(1000, 2000);
+-
+-	return 0;
+-}
+-
+-static int qca83xx_suspend(struct phy_device *phydev)
+-{
+-	at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
+-			      AT803X_DEBUG_GATE_CLK_IN1000, 0);
+-
+-	at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
+-			      AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE |
+-			      AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0);
+-
+-	return 0;
+-}
+-
+-static int qca8337_suspend(struct phy_device *phydev)
+-{
+-	/* Only QCA8337 support actual suspend. */
+-	genphy_suspend(phydev);
+-
+-	return qca83xx_suspend(phydev);
+-}
+-
+-static int qca8327_suspend(struct phy_device *phydev)
+-{
+-	u16 mask = 0;
+-
+-	/* QCA8327 cause port unreliability when phy suspend
+-	 * is set.
+-	 */
+-	mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
+-	phy_modify(phydev, MII_BMCR, mask, 0);
+-
+-	return qca83xx_suspend(phydev);
+-}
+-
+-static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
+-{
+-	int ret;
+-
+-	/* Enable fast retrain */
+-	ret = genphy_c45_fast_retrain(phydev, true);
+-	if (ret)
+-		return ret;
+-
+-	phy_write_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_TOP_OPTION1,
+-		      QCA808X_TOP_OPTION1_DATA);
+-	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB,
+-		      QCA808X_MSE_THRESHOLD_20DB_VALUE);
+-	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB,
+-		      QCA808X_MSE_THRESHOLD_17DB_VALUE);
+-	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB,
+-		      QCA808X_MSE_THRESHOLD_27DB_VALUE);
+-	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB,
+-		      QCA808X_MSE_THRESHOLD_28DB_VALUE);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_1,
+-		      QCA808X_MMD3_DEBUG_1_VALUE);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_4,
+-		      QCA808X_MMD3_DEBUG_4_VALUE);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_5,
+-		      QCA808X_MMD3_DEBUG_5_VALUE);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_3,
+-		      QCA808X_MMD3_DEBUG_3_VALUE);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_6,
+-		      QCA808X_MMD3_DEBUG_6_VALUE);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_2,
+-		      QCA808X_MMD3_DEBUG_2_VALUE);
+-
+-	return 0;
+-}
+-
+-static int qca808x_phy_ms_seed_enable(struct phy_device *phydev, bool enable)
+-{
+-	u16 seed_value;
+-
+-	if (!enable)
+-		return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
+-				QCA808X_MASTER_SLAVE_SEED_ENABLE, 0);
+-
+-	seed_value = get_random_u32_below(QCA808X_MASTER_SLAVE_SEED_RANGE);
+-	return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
+-			QCA808X_MASTER_SLAVE_SEED_CFG | QCA808X_MASTER_SLAVE_SEED_ENABLE,
+-			FIELD_PREP(QCA808X_MASTER_SLAVE_SEED_CFG, seed_value) |
+-			QCA808X_MASTER_SLAVE_SEED_ENABLE);
+-}
+-
+-static bool qca808x_is_prefer_master(struct phy_device *phydev)
+-{
+-	return (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_FORCE) ||
+-		(phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_PREFERRED);
+-}
+-
+-static bool qca808x_has_fast_retrain_or_slave_seed(struct phy_device *phydev)
+-{
+-	return linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
+-}
+-
+-static int qca808x_config_init(struct phy_device *phydev)
+-{
+-	int ret;
+-
+-	/* Active adc&vga on 802.3az for the link 1000M and 100M */
+-	ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7,
+-			     QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
+-	if (ret)
+-		return ret;
+-
+-	/* Adjust the threshold on 802.3az for the link 1000M */
+-	ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+-			    QCA808X_PHY_MMD3_AZ_TRAINING_CTRL,
+-			    QCA808X_MMD3_AZ_TRAINING_VAL);
+-	if (ret)
+-		return ret;
+-
+-	if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
+-		/* Config the fast retrain for the link 2500M */
+-		ret = qca808x_phy_fast_retrain_config(phydev);
+-		if (ret)
+-			return ret;
+-
+-		ret = genphy_read_master_slave(phydev);
+-		if (ret < 0)
+-			return ret;
+-
+-		if (!qca808x_is_prefer_master(phydev)) {
+-			/* Enable seed and configure lower ramdom seed to make phy
+-			 * linked as slave mode.
+-			 */
+-			ret = qca808x_phy_ms_seed_enable(phydev, true);
+-			if (ret)
+-				return ret;
+-		}
+-	}
+-
+-	/* Configure adc threshold as 100mv for the link 10M */
+-	return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD,
+-				     QCA808X_ADC_THRESHOLD_MASK,
+-				     QCA808X_ADC_THRESHOLD_100MV);
+-}
+-
+-static int qca808x_read_status(struct phy_device *phydev)
+-{
+-	struct at803x_ss_mask ss_mask = { 0 };
+-	int ret;
+-
+-	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
+-	if (ret < 0)
+-		return ret;
+-
+-	linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->lp_advertising,
+-			 ret & MDIO_AN_10GBT_STAT_LP2_5G);
+-
+-	ret = genphy_read_status(phydev);
+-	if (ret)
+-		return ret;
+-
+-	/* qca8081 takes the different bits for speed value from at803x */
+-	ss_mask.speed_mask = QCA808X_SS_SPEED_MASK;
+-	ss_mask.speed_shift = __bf_shf(QCA808X_SS_SPEED_MASK);
+-	ret = at803x_read_specific_status(phydev, ss_mask);
+-	if (ret < 0)
+-		return ret;
+-
+-	if (phydev->link) {
+-		if (phydev->speed == SPEED_2500)
+-			phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+-		else
+-			phydev->interface = PHY_INTERFACE_MODE_SGMII;
+-	} else {
+-		/* generate seed as a lower random value to make PHY linked as SLAVE easily,
+-		 * except for master/slave configuration fault detected or the master mode
+-		 * preferred.
+-		 *
+-		 * the reason for not putting this code into the function link_change_notify is
+-		 * the corner case where the link partner is also the qca8081 PHY and the seed
+-		 * value is configured as the same value, the link can't be up and no link change
+-		 * occurs.
+-		 */
+-		if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
+-			if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR ||
+-			    qca808x_is_prefer_master(phydev)) {
+-				qca808x_phy_ms_seed_enable(phydev, false);
+-			} else {
+-				qca808x_phy_ms_seed_enable(phydev, true);
+-			}
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+-static int qca808x_soft_reset(struct phy_device *phydev)
+-{
+-	int ret;
+-
+-	ret = genphy_soft_reset(phydev);
+-	if (ret < 0)
+-		return ret;
+-
+-	if (qca808x_has_fast_retrain_or_slave_seed(phydev))
+-		ret = qca808x_phy_ms_seed_enable(phydev, true);
+-
+-	return ret;
+-}
+-
+-static bool qca808x_cdt_fault_length_valid(int cdt_code)
+-{
+-	switch (cdt_code) {
+-	case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+-	case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+-		return true;
+-	default:
+-		return false;
+-	}
+-}
+-
+-static int qca808x_cable_test_result_trans(int cdt_code)
+-{
+-	switch (cdt_code) {
+-	case QCA808X_CDT_STATUS_STAT_NORMAL:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+-	case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+-	case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
+-	case QCA808X_CDT_STATUS_STAT_FAIL:
+-	default:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+-	}
+-}
+-
+-static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
+-				    int result)
+-{
+-	int val;
+-	u32 cdt_length_reg = 0;
+-
+-	switch (pair) {
+-	case ETHTOOL_A_CABLE_PAIR_A:
+-		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_A;
+-		break;
+-	case ETHTOOL_A_CABLE_PAIR_B:
+-		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_B;
+-		break;
+-	case ETHTOOL_A_CABLE_PAIR_C:
+-		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_C;
+-		break;
+-	case ETHTOOL_A_CABLE_PAIR_D:
+-		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_D;
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_PCS, cdt_length_reg);
+-	if (val < 0)
+-		return val;
+-
+-	if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
+-		val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
+-	else
+-		val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
+-
+-	return at803x_cdt_fault_length(val);
+-}
+-
+-static int qca808x_cable_test_start(struct phy_device *phydev)
+-{
+-	int ret;
+-
+-	/* perform CDT with the following configs:
+-	 * 1. disable hibernation.
+-	 * 2. force PHY working in MDI mode.
+-	 * 3. for PHY working in 1000BaseT.
+-	 * 4. configure the threshold.
+-	 */
+-
+-	ret = at803x_debug_reg_mask(phydev, QCA808X_DBG_AN_TEST, QCA808X_HIBERNATION_EN, 0);
+-	if (ret < 0)
+-		return ret;
+-
+-	ret = at803x_config_mdix(phydev, ETH_TP_MDI);
+-	if (ret < 0)
+-		return ret;
+-
+-	/* Force 1000base-T needs to configure PMA/PMD and MII_BMCR */
+-	phydev->duplex = DUPLEX_FULL;
+-	phydev->speed = SPEED_1000;
+-	ret = genphy_c45_pma_setup_forced(phydev);
+-	if (ret < 0)
+-		return ret;
+-
+-	ret = genphy_setup_forced(phydev);
+-	if (ret < 0)
+-		return ret;
+-
+-	/* configure the thresholds for open, short, pair ok test */
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8074, 0xc040);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8076, 0xc040);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8077, 0xa060);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8078, 0xc050);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807a, 0xc060);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807e, 0xb060);
+-
+-	return 0;
+-}
+-
+-static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
+-					      u16 status)
+-{
+-	int length, result;
+-	u16 pair_code;
+-
+-	switch (pair) {
+-	case ETHTOOL_A_CABLE_PAIR_A:
+-		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
+-		break;
+-	case ETHTOOL_A_CABLE_PAIR_B:
+-		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
+-		break;
+-	case ETHTOOL_A_CABLE_PAIR_C:
+-		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
+-		break;
+-	case ETHTOOL_A_CABLE_PAIR_D:
+-		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	result = qca808x_cable_test_result_trans(pair_code);
+-	ethnl_cable_test_result(phydev, pair, result);
+-
+-	if (qca808x_cdt_fault_length_valid(pair_code)) {
+-		length = qca808x_cdt_fault_length(phydev, pair, result);
+-		ethnl_cable_test_fault_length(phydev, pair, length);
+-	}
+-
+-	return 0;
+-}
+-
+-static int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
+-{
+-	int ret, val;
+-
+-	*finished = false;
+-
+-	val = QCA808X_CDT_ENABLE_TEST |
+-	      QCA808X_CDT_LENGTH_UNIT;
+-	ret = at803x_cdt_start(phydev, val);
+-	if (ret)
+-		return ret;
+-
+-	ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
+-	if (ret)
+-		return ret;
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA808X_MMD3_CDT_STATUS);
+-	if (val < 0)
+-		return val;
+-
+-	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
+-	if (ret)
+-		return ret;
+-
+-	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
+-	if (ret)
+-		return ret;
+-
+-	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
+-	if (ret)
+-		return ret;
+-
+-	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
+-	if (ret)
+-		return ret;
+-
+-	*finished = true;
+-
+-	return 0;
+-}
+-
+-static int qca808x_get_features(struct phy_device *phydev)
+-{
+-	int ret;
+-
+-	ret = genphy_c45_pma_read_abilities(phydev);
+-	if (ret)
+-		return ret;
+-
+-	/* The autoneg ability is not existed in bit3 of MMD7.1,
+-	 * but it is supported by qca808x PHY, so we add it here
+-	 * manually.
+-	 */
+-	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+-
+-	/* As for the qca8081 1G version chip, the 2500baseT ability is also
+-	 * existed in the bit0 of MMD1.21, we need to remove it manually if
+-	 * it is the qca8081 1G chip according to the bit0 of MMD7.0x901d.
+-	 */
+-	ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_CHIP_TYPE);
+-	if (ret < 0)
+-		return ret;
+-
+-	if (QCA808X_PHY_CHIP_TYPE_1G & ret)
+-		linkmode_clear_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
+-
+-	return 0;
+-}
+-
+-static int qca808x_config_aneg(struct phy_device *phydev)
+-{
+-	int phy_ctrl = 0;
+-	int ret;
+-
+-	ret = at803x_prepare_config_aneg(phydev);
+-	if (ret)
+-		return ret;
+-
+-	/* The reg MII_BMCR also needs to be configured for force mode, the
+-	 * genphy_config_aneg is also needed.
+-	 */
+-	if (phydev->autoneg == AUTONEG_DISABLE)
+-		genphy_c45_pma_setup_forced(phydev);
+-
+-	if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
+-		phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
+-
+-	ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+-				     MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
+-	if (ret < 0)
+-		return ret;
+-
+-	return __genphy_config_aneg(phydev, ret);
+-}
+-
+-static void qca808x_link_change_notify(struct phy_device *phydev)
+-{
+-	/* Assert interface sgmii fifo on link down, deassert it on link up,
+-	 * the interface device address is always phy address added by 1.
+-	 */
+-	mdiobus_c45_modify_changed(phydev->mdio.bus, phydev->mdio.addr + 1,
+-				   MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
+-				   QCA8081_PHY_FIFO_RSTN,
+-				   phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
+-}
+-
+-static int qca808x_led_parse_netdev(struct phy_device *phydev, unsigned long rules,
+-				    u16 *offload_trigger)
+-{
+-	/* Parsing specific to netdev trigger */
+-	if (test_bit(TRIGGER_NETDEV_TX, &rules))
+-		*offload_trigger |= QCA808X_LED_TX_BLINK;
+-	if (test_bit(TRIGGER_NETDEV_RX, &rules))
+-		*offload_trigger |= QCA808X_LED_RX_BLINK;
+-	if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+-		*offload_trigger |= QCA808X_LED_SPEED10_ON;
+-	if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+-		*offload_trigger |= QCA808X_LED_SPEED100_ON;
+-	if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+-		*offload_trigger |= QCA808X_LED_SPEED1000_ON;
+-	if (test_bit(TRIGGER_NETDEV_LINK_2500, &rules))
+-		*offload_trigger |= QCA808X_LED_SPEED2500_ON;
+-	if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+-		*offload_trigger |= QCA808X_LED_HALF_DUPLEX_ON;
+-	if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+-		*offload_trigger |= QCA808X_LED_FULL_DUPLEX_ON;
+-
+-	if (rules && !*offload_trigger)
+-		return -EOPNOTSUPP;
+-
+-	/* Enable BLINK_CHECK_BYPASS by default to make the LED
+-	 * blink even with duplex or speed mode not enabled.
+-	 */
+-	*offload_trigger |= QCA808X_LED_BLINK_CHECK_BYPASS;
+-
+-	return 0;
+-}
+-
+-static int qca808x_led_hw_control_enable(struct phy_device *phydev, u8 index)
+-{
+-	u16 reg;
+-
+-	if (index > 2)
+-		return -EINVAL;
+-
+-	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+-
+-	return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
+-				  QCA808X_LED_FORCE_EN);
+-}
+-
+-static int qca808x_led_hw_is_supported(struct phy_device *phydev, u8 index,
+-				       unsigned long rules)
+-{
+-	u16 offload_trigger = 0;
+-
+-	if (index > 2)
+-		return -EINVAL;
+-
+-	return qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
+-}
+-
+-static int qca808x_led_hw_control_set(struct phy_device *phydev, u8 index,
+-				      unsigned long rules)
+-{
+-	u16 reg, offload_trigger = 0;
+-	int ret;
+-
+-	if (index > 2)
+-		return -EINVAL;
+-
+-	reg = QCA808X_MMD7_LED_CTRL(index);
+-
+-	ret = qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
+-	if (ret)
+-		return ret;
+-
+-	ret = qca808x_led_hw_control_enable(phydev, index);
+-	if (ret)
+-		return ret;
+-
+-	return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+-			      QCA808X_LED_PATTERN_MASK,
+-			      offload_trigger);
+-}
+-
+-static bool qca808x_led_hw_control_status(struct phy_device *phydev, u8 index)
+-{
+-	u16 reg;
+-	int val;
+-
+-	if (index > 2)
+-		return false;
+-
+-	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+-
+-	return !(val & QCA808X_LED_FORCE_EN);
+-}
+-
+-static int qca808x_led_hw_control_get(struct phy_device *phydev, u8 index,
+-				      unsigned long *rules)
+-{
+-	u16 reg;
+-	int val;
+-
+-	if (index > 2)
+-		return -EINVAL;
+-
+-	/* Check if we have hw control enabled */
+-	if (qca808x_led_hw_control_status(phydev, index))
+-		return -EINVAL;
+-
+-	reg = QCA808X_MMD7_LED_CTRL(index);
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+-	if (val & QCA808X_LED_TX_BLINK)
+-		set_bit(TRIGGER_NETDEV_TX, rules);
+-	if (val & QCA808X_LED_RX_BLINK)
+-		set_bit(TRIGGER_NETDEV_RX, rules);
+-	if (val & QCA808X_LED_SPEED10_ON)
+-		set_bit(TRIGGER_NETDEV_LINK_10, rules);
+-	if (val & QCA808X_LED_SPEED100_ON)
+-		set_bit(TRIGGER_NETDEV_LINK_100, rules);
+-	if (val & QCA808X_LED_SPEED1000_ON)
+-		set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+-	if (val & QCA808X_LED_SPEED2500_ON)
+-		set_bit(TRIGGER_NETDEV_LINK_2500, rules);
+-	if (val & QCA808X_LED_HALF_DUPLEX_ON)
+-		set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+-	if (val & QCA808X_LED_FULL_DUPLEX_ON)
+-		set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+-
+-	return 0;
+-}
+-
+-static int qca808x_led_hw_control_reset(struct phy_device *phydev, u8 index)
+-{
+-	u16 reg;
+-
+-	if (index > 2)
+-		return -EINVAL;
+-
+-	reg = QCA808X_MMD7_LED_CTRL(index);
+-
+-	return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
+-				  QCA808X_LED_PATTERN_MASK);
+-}
+-
+-static int qca808x_led_brightness_set(struct phy_device *phydev,
+-				      u8 index, enum led_brightness value)
+-{
+-	u16 reg;
+-	int ret;
+-
+-	if (index > 2)
+-		return -EINVAL;
+-
+-	if (!value) {
+-		ret = qca808x_led_hw_control_reset(phydev, index);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+-
+-	return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+-			      QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
+-			      QCA808X_LED_FORCE_EN | value ? QCA808X_LED_FORCE_ON :
+-							     QCA808X_LED_FORCE_OFF);
+-}
+-
+-static int qca808x_led_blink_set(struct phy_device *phydev, u8 index,
+-				 unsigned long *delay_on,
+-				 unsigned long *delay_off)
+-{
+-	int ret;
+-	u16 reg;
+-
+-	if (index > 2)
+-		return -EINVAL;
+-
+-	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+-
+-	/* Set blink to 50% off, 50% on at 4Hz by default */
+-	ret = phy_modify_mmd(phydev, MDIO_MMD_AN, QCA808X_MMD7_LED_GLOBAL,
+-			     QCA808X_LED_BLINK_FREQ_MASK | QCA808X_LED_BLINK_DUTY_MASK,
+-			     QCA808X_LED_BLINK_FREQ_4HZ | QCA808X_LED_BLINK_DUTY_50_50);
+-	if (ret)
+-		return ret;
+-
+-	/* We use BLINK_1 for normal blinking */
+-	ret = phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+-			     QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
+-			     QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_BLINK_1);
+-	if (ret)
+-		return ret;
+-
+-	/* We set blink to 4Hz, aka 250ms */
+-	*delay_on = 250 / 2;
+-	*delay_off = 250 / 2;
+-
+-	return 0;
+-}
+-
+-static int qca808x_led_polarity_set(struct phy_device *phydev, int index,
+-				    unsigned long modes)
+-{
+-	struct at803x_priv *priv = phydev->priv;
+-	bool active_low = false;
+-	u32 mode;
+-
+-	for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+-		switch (mode) {
+-		case PHY_LED_ACTIVE_LOW:
+-			active_low = true;
+-			break;
+-		default:
+-			return -EINVAL;
+-		}
+-	}
+-
+-	/* PHY polarity is global and can't be set per LED.
+-	 * To detect this, check if last requested polarity mode
+-	 * match the new one.
+-	 */
+-	if (priv->led_polarity_mode >= 0 &&
+-	    priv->led_polarity_mode != active_low) {
+-		phydev_err(phydev, "PHY polarity is global. Mismatched polarity on different LED\n");
+-		return -EINVAL;
+-	}
+-
+-	/* Save the last PHY polarity mode */
+-	priv->led_polarity_mode = active_low;
+-
+-	return phy_modify_mmd(phydev, MDIO_MMD_AN,
+-			      QCA808X_MMD7_LED_POLARITY_CTRL,
+-			      QCA808X_LED_ACTIVE_HIGH,
+-			      active_low ? 0 : QCA808X_LED_ACTIVE_HIGH);
+-}
+-
+-static struct phy_driver at803x_driver[] = {
+-{
+-	/* Qualcomm Atheros AR8035 */
+-	PHY_ID_MATCH_EXACT(ATH8035_PHY_ID),
+-	.name			= "Qualcomm Atheros AR8035",
+-	.flags			= PHY_POLL_CABLE_TEST,
+-	.probe			= at8035_probe,
+-	.config_aneg		= at803x_config_aneg,
+-	.config_init		= at803x_config_init,
+-	.soft_reset		= genphy_soft_reset,
+-	.set_wol		= at803x_set_wol,
+-	.get_wol		= at803x_get_wol,
+-	.suspend		= at803x_suspend,
+-	.resume			= at803x_resume,
+-	/* PHY_GBIT_FEATURES */
+-	.read_status		= at803x_read_status,
+-	.config_intr		= at803x_config_intr,
+-	.handle_interrupt	= at803x_handle_interrupt,
+-	.get_tunable		= at803x_get_tunable,
+-	.set_tunable		= at803x_set_tunable,
+-	.cable_test_start	= at8031_cable_test_start,
+-	.cable_test_get_status	= at8031_cable_test_get_status,
+-}, {
+-	/* Qualcomm Atheros AR8030 */
+-	.phy_id			= ATH8030_PHY_ID,
+-	.name			= "Qualcomm Atheros AR8030",
+-	.phy_id_mask		= AT8030_PHY_ID_MASK,
+-	.probe			= at8035_probe,
+-	.config_init		= at803x_config_init,
+-	.link_change_notify	= at803x_link_change_notify,
+-	.set_wol		= at803x_set_wol,
+-	.get_wol		= at803x_get_wol,
+-	.suspend		= at803x_suspend,
+-	.resume			= at803x_resume,
+-	/* PHY_BASIC_FEATURES */
+-	.config_intr		= at803x_config_intr,
+-	.handle_interrupt	= at803x_handle_interrupt,
+-}, {
+-	/* Qualcomm Atheros AR8031/AR8033 */
+-	PHY_ID_MATCH_EXACT(ATH8031_PHY_ID),
+-	.name			= "Qualcomm Atheros AR8031/AR8033",
+-	.flags			= PHY_POLL_CABLE_TEST,
+-	.probe			= at8031_probe,
+-	.config_init		= at8031_config_init,
+-	.config_aneg		= at803x_config_aneg,
+-	.soft_reset		= genphy_soft_reset,
+-	.set_wol		= at8031_set_wol,
+-	.get_wol		= at803x_get_wol,
+-	.suspend		= at803x_suspend,
+-	.resume			= at803x_resume,
+-	.read_page		= at803x_read_page,
+-	.write_page		= at803x_write_page,
+-	.get_features		= at803x_get_features,
+-	.read_status		= at8031_read_status,
+-	.config_intr		= at8031_config_intr,
+-	.handle_interrupt	= at803x_handle_interrupt,
+-	.get_tunable		= at803x_get_tunable,
+-	.set_tunable		= at803x_set_tunable,
+-	.cable_test_start	= at8031_cable_test_start,
+-	.cable_test_get_status	= at8031_cable_test_get_status,
+-}, {
+-	/* Qualcomm Atheros AR8032 */
+-	PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
+-	.name			= "Qualcomm Atheros AR8032",
+-	.probe			= at803x_probe,
+-	.flags			= PHY_POLL_CABLE_TEST,
+-	.config_init		= at803x_config_init,
+-	.link_change_notify	= at803x_link_change_notify,
+-	.suspend		= at803x_suspend,
+-	.resume			= at803x_resume,
+-	/* PHY_BASIC_FEATURES */
+-	.config_intr		= at803x_config_intr,
+-	.handle_interrupt	= at803x_handle_interrupt,
+-	.cable_test_start	= at803x_cable_test_start,
+-	.cable_test_get_status	= at8032_cable_test_get_status,
+-}, {
+-	/* ATHEROS AR9331 */
+-	PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
+-	.name			= "Qualcomm Atheros AR9331 built-in PHY",
+-	.probe			= at803x_probe,
+-	.suspend		= at803x_suspend,
+-	.resume			= at803x_resume,
+-	.flags			= PHY_POLL_CABLE_TEST,
+-	/* PHY_BASIC_FEATURES */
+-	.config_intr		= at803x_config_intr,
+-	.handle_interrupt	= at803x_handle_interrupt,
+-	.cable_test_start	= at803x_cable_test_start,
+-	.cable_test_get_status	= at8032_cable_test_get_status,
+-	.read_status		= at803x_read_status,
+-	.soft_reset		= genphy_soft_reset,
+-	.config_aneg		= at803x_config_aneg,
+-}, {
+-	/* Qualcomm Atheros QCA9561 */
+-	PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
+-	.name			= "Qualcomm Atheros QCA9561 built-in PHY",
+-	.probe			= at803x_probe,
+-	.suspend		= at803x_suspend,
+-	.resume			= at803x_resume,
+-	.flags			= PHY_POLL_CABLE_TEST,
+-	/* PHY_BASIC_FEATURES */
+-	.config_intr		= at803x_config_intr,
+-	.handle_interrupt	= at803x_handle_interrupt,
+-	.cable_test_start	= at803x_cable_test_start,
+-	.cable_test_get_status	= at8032_cable_test_get_status,
+-	.read_status		= at803x_read_status,
+-	.soft_reset		= genphy_soft_reset,
+-	.config_aneg		= at803x_config_aneg,
+-}, {
+-	/* QCA8337 */
+-	.phy_id			= QCA8337_PHY_ID,
+-	.phy_id_mask		= QCA8K_PHY_ID_MASK,
+-	.name			= "Qualcomm Atheros 8337 internal PHY",
+-	/* PHY_GBIT_FEATURES */
+-	.probe			= at803x_probe,
+-	.flags			= PHY_IS_INTERNAL,
+-	.config_init		= qca83xx_config_init,
+-	.soft_reset		= genphy_soft_reset,
+-	.get_sset_count		= qca83xx_get_sset_count,
+-	.get_strings		= qca83xx_get_strings,
+-	.get_stats		= qca83xx_get_stats,
+-	.suspend		= qca8337_suspend,
+-	.resume			= qca83xx_resume,
+-}, {
+-	/* QCA8327-A from switch QCA8327-AL1A */
+-	.phy_id			= QCA8327_A_PHY_ID,
+-	.phy_id_mask		= QCA8K_PHY_ID_MASK,
+-	.name			= "Qualcomm Atheros 8327-A internal PHY",
+-	/* PHY_GBIT_FEATURES */
+-	.link_change_notify	= qca83xx_link_change_notify,
+-	.probe			= at803x_probe,
+-	.flags			= PHY_IS_INTERNAL,
+-	.config_init		= qca8327_config_init,
+-	.soft_reset		= genphy_soft_reset,
+-	.get_sset_count		= qca83xx_get_sset_count,
+-	.get_strings		= qca83xx_get_strings,
+-	.get_stats		= qca83xx_get_stats,
+-	.suspend		= qca8327_suspend,
+-	.resume			= qca83xx_resume,
+-}, {
+-	/* QCA8327-B from switch QCA8327-BL1A */
+-	.phy_id			= QCA8327_B_PHY_ID,
+-	.phy_id_mask		= QCA8K_PHY_ID_MASK,
+-	.name			= "Qualcomm Atheros 8327-B internal PHY",
+-	/* PHY_GBIT_FEATURES */
+-	.link_change_notify	= qca83xx_link_change_notify,
+-	.probe			= at803x_probe,
+-	.flags			= PHY_IS_INTERNAL,
+-	.config_init		= qca8327_config_init,
+-	.soft_reset		= genphy_soft_reset,
+-	.get_sset_count		= qca83xx_get_sset_count,
+-	.get_strings		= qca83xx_get_strings,
+-	.get_stats		= qca83xx_get_stats,
+-	.suspend		= qca8327_suspend,
+-	.resume			= qca83xx_resume,
+-}, {
+-	/* Qualcomm QCA8081 */
+-	PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
+-	.name			= "Qualcomm QCA8081",
+-	.flags			= PHY_POLL_CABLE_TEST,
+-	.probe			= at803x_probe,
+-	.config_intr		= at803x_config_intr,
+-	.handle_interrupt	= at803x_handle_interrupt,
+-	.get_tunable		= at803x_get_tunable,
+-	.set_tunable		= at803x_set_tunable,
+-	.set_wol		= at803x_set_wol,
+-	.get_wol		= at803x_get_wol,
+-	.get_features		= qca808x_get_features,
+-	.config_aneg		= qca808x_config_aneg,
+-	.suspend		= genphy_suspend,
+-	.resume			= genphy_resume,
+-	.read_status		= qca808x_read_status,
+-	.config_init		= qca808x_config_init,
+-	.soft_reset		= qca808x_soft_reset,
+-	.cable_test_start	= qca808x_cable_test_start,
+-	.cable_test_get_status	= qca808x_cable_test_get_status,
+-	.link_change_notify	= qca808x_link_change_notify,
+-	.led_brightness_set	= qca808x_led_brightness_set,
+-	.led_blink_set		= qca808x_led_blink_set,
+-	.led_hw_is_supported	= qca808x_led_hw_is_supported,
+-	.led_hw_control_set	= qca808x_led_hw_control_set,
+-	.led_hw_control_get	= qca808x_led_hw_control_get,
+-	.led_polarity_set	= qca808x_led_polarity_set,
+-}, };
+-
+-module_phy_driver(at803x_driver);
+-
+-static struct mdio_device_id __maybe_unused atheros_tbl[] = {
+-	{ ATH8030_PHY_ID, AT8030_PHY_ID_MASK },
+-	{ PHY_ID_MATCH_EXACT(ATH8031_PHY_ID) },
+-	{ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
+-	{ PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
+-	{ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
+-	{ PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
+-	{ PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
+-	{ PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
+-	{ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) },
+-	{ PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
+-	{ }
+-};
+-
+-MODULE_DEVICE_TABLE(mdio, atheros_tbl);
+--- /dev/null
++++ b/drivers/net/phy/qcom/at803x.c
+@@ -0,0 +1,2759 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * drivers/net/phy/at803x.c
++ *
++ * Driver for Qualcomm Atheros AR803x PHY
++ *
++ * Author: Matus Ujhelyi <[email protected]>
++ */
++
++#include <linux/phy.h>
++#include <linux/module.h>
++#include <linux/string.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/ethtool_netlink.h>
++#include <linux/bitfield.h>
++#include <linux/regulator/of_regulator.h>
++#include <linux/regulator/driver.h>
++#include <linux/regulator/consumer.h>
++#include <linux/of.h>
++#include <linux/phylink.h>
++#include <linux/sfp.h>
++#include <dt-bindings/net/qca-ar803x.h>
++
++#define AT803X_SPECIFIC_FUNCTION_CONTROL	0x10
++#define AT803X_SFC_ASSERT_CRS			BIT(11)
++#define AT803X_SFC_FORCE_LINK			BIT(10)
++#define AT803X_SFC_MDI_CROSSOVER_MODE_M		GENMASK(6, 5)
++#define AT803X_SFC_AUTOMATIC_CROSSOVER		0x3
++#define AT803X_SFC_MANUAL_MDIX			0x1
++#define AT803X_SFC_MANUAL_MDI			0x0
++#define AT803X_SFC_SQE_TEST			BIT(2)
++#define AT803X_SFC_POLARITY_REVERSAL		BIT(1)
++#define AT803X_SFC_DISABLE_JABBER		BIT(0)
++
++#define AT803X_SPECIFIC_STATUS			0x11
++#define AT803X_SS_SPEED_MASK			GENMASK(15, 14)
++#define AT803X_SS_SPEED_1000			2
++#define AT803X_SS_SPEED_100			1
++#define AT803X_SS_SPEED_10			0
++#define AT803X_SS_DUPLEX			BIT(13)
++#define AT803X_SS_SPEED_DUPLEX_RESOLVED		BIT(11)
++#define AT803X_SS_MDIX				BIT(6)
++
++#define QCA808X_SS_SPEED_MASK			GENMASK(9, 7)
++#define QCA808X_SS_SPEED_2500			4
++
++#define AT803X_INTR_ENABLE			0x12
++#define AT803X_INTR_ENABLE_AUTONEG_ERR		BIT(15)
++#define AT803X_INTR_ENABLE_SPEED_CHANGED	BIT(14)
++#define AT803X_INTR_ENABLE_DUPLEX_CHANGED	BIT(13)
++#define AT803X_INTR_ENABLE_PAGE_RECEIVED	BIT(12)
++#define AT803X_INTR_ENABLE_LINK_FAIL		BIT(11)
++#define AT803X_INTR_ENABLE_LINK_SUCCESS		BIT(10)
++#define AT803X_INTR_ENABLE_LINK_FAIL_BX		BIT(8)
++#define AT803X_INTR_ENABLE_LINK_SUCCESS_BX	BIT(7)
++#define AT803X_INTR_ENABLE_WIRESPEED_DOWNGRADE	BIT(5)
++#define AT803X_INTR_ENABLE_POLARITY_CHANGED	BIT(1)
++#define AT803X_INTR_ENABLE_WOL			BIT(0)
++
++#define AT803X_INTR_STATUS			0x13
++
++#define AT803X_SMART_SPEED			0x14
++#define AT803X_SMART_SPEED_ENABLE		BIT(5)
++#define AT803X_SMART_SPEED_RETRY_LIMIT_MASK	GENMASK(4, 2)
++#define AT803X_SMART_SPEED_BYPASS_TIMER		BIT(1)
++#define AT803X_CDT				0x16
++#define AT803X_CDT_MDI_PAIR_MASK		GENMASK(9, 8)
++#define AT803X_CDT_ENABLE_TEST			BIT(0)
++#define AT803X_CDT_STATUS			0x1c
++#define AT803X_CDT_STATUS_STAT_NORMAL		0
++#define AT803X_CDT_STATUS_STAT_SHORT		1
++#define AT803X_CDT_STATUS_STAT_OPEN		2
++#define AT803X_CDT_STATUS_STAT_FAIL		3
++#define AT803X_CDT_STATUS_STAT_MASK		GENMASK(9, 8)
++#define AT803X_CDT_STATUS_DELTA_TIME_MASK	GENMASK(7, 0)
++#define AT803X_LED_CONTROL			0x18
++
++#define AT803X_PHY_MMD3_WOL_CTRL		0x8012
++#define AT803X_WOL_EN				BIT(5)
++#define AT803X_LOC_MAC_ADDR_0_15_OFFSET		0x804C
++#define AT803X_LOC_MAC_ADDR_16_31_OFFSET	0x804B
++#define AT803X_LOC_MAC_ADDR_32_47_OFFSET	0x804A
++#define AT803X_REG_CHIP_CONFIG			0x1f
++#define AT803X_BT_BX_REG_SEL			0x8000
++
++#define AT803X_DEBUG_ADDR			0x1D
++#define AT803X_DEBUG_DATA			0x1E
++
++#define AT803X_MODE_CFG_MASK			0x0F
++#define AT803X_MODE_CFG_BASET_RGMII		0x00
++#define AT803X_MODE_CFG_BASET_SGMII		0x01
++#define AT803X_MODE_CFG_BX1000_RGMII_50OHM	0x02
++#define AT803X_MODE_CFG_BX1000_RGMII_75OHM	0x03
++#define AT803X_MODE_CFG_BX1000_CONV_50OHM	0x04
++#define AT803X_MODE_CFG_BX1000_CONV_75OHM	0x05
++#define AT803X_MODE_CFG_FX100_RGMII_50OHM	0x06
++#define AT803X_MODE_CFG_FX100_CONV_50OHM	0x07
++#define AT803X_MODE_CFG_RGMII_AUTO_MDET		0x0B
++#define AT803X_MODE_CFG_FX100_RGMII_75OHM	0x0E
++#define AT803X_MODE_CFG_FX100_CONV_75OHM	0x0F
++
++#define AT803X_PSSR				0x11	/*PHY-Specific Status Register*/
++#define AT803X_PSSR_MR_AN_COMPLETE		0x0200
++
++#define AT803X_DEBUG_ANALOG_TEST_CTRL		0x00
++#define QCA8327_DEBUG_MANU_CTRL_EN		BIT(2)
++#define QCA8337_DEBUG_MANU_CTRL_EN		GENMASK(3, 2)
++#define AT803X_DEBUG_RX_CLK_DLY_EN		BIT(15)
++
++#define AT803X_DEBUG_SYSTEM_CTRL_MODE		0x05
++#define AT803X_DEBUG_TX_CLK_DLY_EN		BIT(8)
++
++#define AT803X_DEBUG_REG_HIB_CTRL		0x0b
++#define   AT803X_DEBUG_HIB_CTRL_SEL_RST_80U	BIT(10)
++#define   AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE	BIT(13)
++#define   AT803X_DEBUG_HIB_CTRL_PS_HIB_EN	BIT(15)
++
++#define AT803X_DEBUG_REG_3C			0x3C
++
++#define AT803X_DEBUG_REG_GREEN			0x3D
++#define   AT803X_DEBUG_GATE_CLK_IN1000		BIT(6)
++
++#define AT803X_DEBUG_REG_1F			0x1F
++#define AT803X_DEBUG_PLL_ON			BIT(2)
++#define AT803X_DEBUG_RGMII_1V8			BIT(3)
++
++#define MDIO_AZ_DEBUG				0x800D
++
++/* AT803x supports either the XTAL input pad, an internal PLL or the
++ * DSP as clock reference for the clock output pad. The XTAL reference
++ * is only used for 25 MHz output, all other frequencies need the PLL.
++ * The DSP as a clock reference is used in synchronous ethernet
++ * applications.
++ *
++ * By default the PLL is only enabled if there is a link. Otherwise
++ * the PHY will go into low power state and disabled the PLL. You can
++ * set the PLL_ON bit (see debug register 0x1f) to keep the PLL always
++ * enabled.
++ */
++#define AT803X_MMD7_CLK25M			0x8016
++#define AT803X_CLK_OUT_MASK			GENMASK(4, 2)
++#define AT803X_CLK_OUT_25MHZ_XTAL		0
++#define AT803X_CLK_OUT_25MHZ_DSP		1
++#define AT803X_CLK_OUT_50MHZ_PLL		2
++#define AT803X_CLK_OUT_50MHZ_DSP		3
++#define AT803X_CLK_OUT_62_5MHZ_PLL		4
++#define AT803X_CLK_OUT_62_5MHZ_DSP		5
++#define AT803X_CLK_OUT_125MHZ_PLL		6
++#define AT803X_CLK_OUT_125MHZ_DSP		7
++
++/* The AR8035 has another mask which is compatible with the AR8031/AR8033 mask
++ * but doesn't support choosing between XTAL/PLL and DSP.
++ */
++#define AT8035_CLK_OUT_MASK			GENMASK(4, 3)
++
++#define AT803X_CLK_OUT_STRENGTH_MASK		GENMASK(8, 7)
++#define AT803X_CLK_OUT_STRENGTH_FULL		0
++#define AT803X_CLK_OUT_STRENGTH_HALF		1
++#define AT803X_CLK_OUT_STRENGTH_QUARTER		2
++
++#define AT803X_DEFAULT_DOWNSHIFT		5
++#define AT803X_MIN_DOWNSHIFT			2
++#define AT803X_MAX_DOWNSHIFT			9
++
++#define AT803X_MMD3_SMARTEEE_CTL1		0x805b
++#define AT803X_MMD3_SMARTEEE_CTL2		0x805c
++#define AT803X_MMD3_SMARTEEE_CTL3		0x805d
++#define AT803X_MMD3_SMARTEEE_CTL3_LPI_EN	BIT(8)
++
++#define ATH9331_PHY_ID				0x004dd041
++#define ATH8030_PHY_ID				0x004dd076
++#define ATH8031_PHY_ID				0x004dd074
++#define ATH8032_PHY_ID				0x004dd023
++#define ATH8035_PHY_ID				0x004dd072
++#define AT8030_PHY_ID_MASK			0xffffffef
++
++#define QCA8081_PHY_ID				0x004dd101
++
++#define QCA8327_A_PHY_ID			0x004dd033
++#define QCA8327_B_PHY_ID			0x004dd034
++#define QCA8337_PHY_ID				0x004dd036
++#define QCA9561_PHY_ID				0x004dd042
++#define QCA8K_PHY_ID_MASK			0xffffffff
++
++#define QCA8K_DEVFLAGS_REVISION_MASK		GENMASK(2, 0)
++
++#define AT803X_PAGE_FIBER			0
++#define AT803X_PAGE_COPPER			1
++
++/* don't turn off internal PLL */
++#define AT803X_KEEP_PLL_ENABLED			BIT(0)
++#define AT803X_DISABLE_SMARTEEE			BIT(1)
++
++/* disable hibernation mode */
++#define AT803X_DISABLE_HIBERNATION_MODE		BIT(2)
++
++/* ADC threshold */
++#define QCA808X_PHY_DEBUG_ADC_THRESHOLD		0x2c80
++#define QCA808X_ADC_THRESHOLD_MASK		GENMASK(7, 0)
++#define QCA808X_ADC_THRESHOLD_80MV		0
++#define QCA808X_ADC_THRESHOLD_100MV		0xf0
++#define QCA808X_ADC_THRESHOLD_200MV		0x0f
++#define QCA808X_ADC_THRESHOLD_300MV		0xff
++
++/* CLD control */
++#define QCA808X_PHY_MMD3_ADDR_CLD_CTRL7		0x8007
++#define QCA808X_8023AZ_AFE_CTRL_MASK		GENMASK(8, 4)
++#define QCA808X_8023AZ_AFE_EN			0x90
++
++/* AZ control */
++#define QCA808X_PHY_MMD3_AZ_TRAINING_CTRL	0x8008
++#define QCA808X_MMD3_AZ_TRAINING_VAL		0x1c32
++
++#define QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB	0x8014
++#define QCA808X_MSE_THRESHOLD_20DB_VALUE	0x529
++
++#define QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB	0x800E
++#define QCA808X_MSE_THRESHOLD_17DB_VALUE	0x341
++
++#define QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB	0x801E
++#define QCA808X_MSE_THRESHOLD_27DB_VALUE	0x419
++
++#define QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB	0x8020
++#define QCA808X_MSE_THRESHOLD_28DB_VALUE	0x341
++
++#define QCA808X_PHY_MMD7_TOP_OPTION1		0x901c
++#define QCA808X_TOP_OPTION1_DATA		0x0
++
++#define QCA808X_PHY_MMD3_DEBUG_1		0xa100
++#define QCA808X_MMD3_DEBUG_1_VALUE		0x9203
++#define QCA808X_PHY_MMD3_DEBUG_2		0xa101
++#define QCA808X_MMD3_DEBUG_2_VALUE		0x48ad
++#define QCA808X_PHY_MMD3_DEBUG_3		0xa103
++#define QCA808X_MMD3_DEBUG_3_VALUE		0x1698
++#define QCA808X_PHY_MMD3_DEBUG_4		0xa105
++#define QCA808X_MMD3_DEBUG_4_VALUE		0x8001
++#define QCA808X_PHY_MMD3_DEBUG_5		0xa106
++#define QCA808X_MMD3_DEBUG_5_VALUE		0x1111
++#define QCA808X_PHY_MMD3_DEBUG_6		0xa011
++#define QCA808X_MMD3_DEBUG_6_VALUE		0x5f85
++
++/* master/slave seed config */
++#define QCA808X_PHY_DEBUG_LOCAL_SEED		9
++#define QCA808X_MASTER_SLAVE_SEED_ENABLE	BIT(1)
++#define QCA808X_MASTER_SLAVE_SEED_CFG		GENMASK(12, 2)
++#define QCA808X_MASTER_SLAVE_SEED_RANGE		0x32
++
++/* Hibernation yields lower power consumpiton in contrast with normal operation mode.
++ * when the copper cable is unplugged, the PHY enters into hibernation mode in about 10s.
++ */
++#define QCA808X_DBG_AN_TEST			0xb
++#define QCA808X_HIBERNATION_EN			BIT(15)
++
++#define QCA808X_CDT_ENABLE_TEST			BIT(15)
++#define QCA808X_CDT_INTER_CHECK_DIS		BIT(13)
++#define QCA808X_CDT_STATUS			BIT(11)
++#define QCA808X_CDT_LENGTH_UNIT			BIT(10)
++
++#define QCA808X_MMD3_CDT_STATUS			0x8064
++#define QCA808X_MMD3_CDT_DIAG_PAIR_A		0x8065
++#define QCA808X_MMD3_CDT_DIAG_PAIR_B		0x8066
++#define QCA808X_MMD3_CDT_DIAG_PAIR_C		0x8067
++#define QCA808X_MMD3_CDT_DIAG_PAIR_D		0x8068
++#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT	GENMASK(15, 8)
++#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT	GENMASK(7, 0)
++
++#define QCA808X_CDT_CODE_PAIR_A			GENMASK(15, 12)
++#define QCA808X_CDT_CODE_PAIR_B			GENMASK(11, 8)
++#define QCA808X_CDT_CODE_PAIR_C			GENMASK(7, 4)
++#define QCA808X_CDT_CODE_PAIR_D			GENMASK(3, 0)
++
++#define QCA808X_CDT_STATUS_STAT_TYPE		GENMASK(1, 0)
++#define QCA808X_CDT_STATUS_STAT_FAIL		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
++#define QCA808X_CDT_STATUS_STAT_NORMAL		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
++#define QCA808X_CDT_STATUS_STAT_SAME_OPEN	FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
++#define QCA808X_CDT_STATUS_STAT_SAME_SHORT	FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
++
++#define QCA808X_CDT_STATUS_STAT_MDI		GENMASK(3, 2)
++#define QCA808X_CDT_STATUS_STAT_MDI1		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
++#define QCA808X_CDT_STATUS_STAT_MDI2		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
++#define QCA808X_CDT_STATUS_STAT_MDI3		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
++
++/* NORMAL are MDI with type set to 0 */
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI1
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
++									 QCA808X_CDT_STATUS_STAT_MDI1)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
++									 QCA808X_CDT_STATUS_STAT_MDI1)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI2
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
++									 QCA808X_CDT_STATUS_STAT_MDI2)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
++									 QCA808X_CDT_STATUS_STAT_MDI2)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI3
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
++									 QCA808X_CDT_STATUS_STAT_MDI3)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
++									 QCA808X_CDT_STATUS_STAT_MDI3)
++
++/* Added for reference of existence but should be handled by wait_for_completion already */
++#define QCA808X_CDT_STATUS_STAT_BUSY		(BIT(1) | BIT(3))
++
++#define QCA808X_MMD7_LED_GLOBAL			0x8073
++#define QCA808X_LED_BLINK_1			GENMASK(11, 6)
++#define QCA808X_LED_BLINK_2			GENMASK(5, 0)
++/* Values are the same for both BLINK_1 and BLINK_2 */
++#define QCA808X_LED_BLINK_FREQ_MASK		GENMASK(5, 3)
++#define QCA808X_LED_BLINK_FREQ_2HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x0)
++#define QCA808X_LED_BLINK_FREQ_4HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x1)
++#define QCA808X_LED_BLINK_FREQ_8HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x2)
++#define QCA808X_LED_BLINK_FREQ_16HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x3)
++#define QCA808X_LED_BLINK_FREQ_32HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x4)
++#define QCA808X_LED_BLINK_FREQ_64HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x5)
++#define QCA808X_LED_BLINK_FREQ_128HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x6)
++#define QCA808X_LED_BLINK_FREQ_256HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x7)
++#define QCA808X_LED_BLINK_DUTY_MASK		GENMASK(2, 0)
++#define QCA808X_LED_BLINK_DUTY_50_50		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x0)
++#define QCA808X_LED_BLINK_DUTY_75_25		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x1)
++#define QCA808X_LED_BLINK_DUTY_25_75		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x2)
++#define QCA808X_LED_BLINK_DUTY_33_67		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x3)
++#define QCA808X_LED_BLINK_DUTY_67_33		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x4)
++#define QCA808X_LED_BLINK_DUTY_17_83		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x5)
++#define QCA808X_LED_BLINK_DUTY_83_17		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x6)
++#define QCA808X_LED_BLINK_DUTY_8_92		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x7)
++
++#define QCA808X_MMD7_LED2_CTRL			0x8074
++#define QCA808X_MMD7_LED2_FORCE_CTRL		0x8075
++#define QCA808X_MMD7_LED1_CTRL			0x8076
++#define QCA808X_MMD7_LED1_FORCE_CTRL		0x8077
++#define QCA808X_MMD7_LED0_CTRL			0x8078
++#define QCA808X_MMD7_LED_CTRL(x)		(0x8078 - ((x) * 2))
++
++/* LED hw control pattern is the same for every LED */
++#define QCA808X_LED_PATTERN_MASK		GENMASK(15, 0)
++#define QCA808X_LED_SPEED2500_ON		BIT(15)
++#define QCA808X_LED_SPEED2500_BLINK		BIT(14)
++/* Follow blink trigger even if duplex or speed condition doesn't match */
++#define QCA808X_LED_BLINK_CHECK_BYPASS		BIT(13)
++#define QCA808X_LED_FULL_DUPLEX_ON		BIT(12)
++#define QCA808X_LED_HALF_DUPLEX_ON		BIT(11)
++#define QCA808X_LED_TX_BLINK			BIT(10)
++#define QCA808X_LED_RX_BLINK			BIT(9)
++#define QCA808X_LED_TX_ON_10MS			BIT(8)
++#define QCA808X_LED_RX_ON_10MS			BIT(7)
++#define QCA808X_LED_SPEED1000_ON		BIT(6)
++#define QCA808X_LED_SPEED100_ON			BIT(5)
++#define QCA808X_LED_SPEED10_ON			BIT(4)
++#define QCA808X_LED_COLLISION_BLINK		BIT(3)
++#define QCA808X_LED_SPEED1000_BLINK		BIT(2)
++#define QCA808X_LED_SPEED100_BLINK		BIT(1)
++#define QCA808X_LED_SPEED10_BLINK		BIT(0)
++
++#define QCA808X_MMD7_LED0_FORCE_CTRL		0x8079
++#define QCA808X_MMD7_LED_FORCE_CTRL(x)		(0x8079 - ((x) * 2))
++
++/* LED force ctrl is the same for every LED
++ * No documentation exist for this, not even internal one
++ * with NDA as QCOM gives only info about configuring
++ * hw control pattern rules and doesn't indicate any way
++ * to force the LED to specific mode.
++ * These define comes from reverse and testing and maybe
++ * lack of some info or some info are not entirely correct.
++ * For the basic LED control and hw control these finding
++ * are enough to support LED control in all the required APIs.
++ *
++ * On doing some comparison with implementation with qca807x,
++ * it was found that it's 1:1 equal to it and confirms all the
++ * reverse done. It was also found further specification with the
++ * force mode and the blink modes.
++ */
++#define QCA808X_LED_FORCE_EN			BIT(15)
++#define QCA808X_LED_FORCE_MODE_MASK		GENMASK(14, 13)
++#define QCA808X_LED_FORCE_BLINK_1		FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x3)
++#define QCA808X_LED_FORCE_BLINK_2		FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x2)
++#define QCA808X_LED_FORCE_ON			FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x1)
++#define QCA808X_LED_FORCE_OFF			FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x0)
++
++#define QCA808X_MMD7_LED_POLARITY_CTRL		0x901a
++/* QSDK sets by default 0x46 to this reg that sets BIT 6 for
++ * LED to active high. It's not clear what BIT 3 and BIT 4 does.
++ */
++#define QCA808X_LED_ACTIVE_HIGH			BIT(6)
++
++/* QCA808X 1G chip type */
++#define QCA808X_PHY_MMD7_CHIP_TYPE		0x901d
++#define QCA808X_PHY_CHIP_TYPE_1G		BIT(0)
++
++#define QCA8081_PHY_SERDES_MMD1_FIFO_CTRL	0x9072
++#define QCA8081_PHY_FIFO_RSTN			BIT(11)
++
++MODULE_DESCRIPTION("Qualcomm Atheros AR803x and QCA808X PHY driver");
++MODULE_AUTHOR("Matus Ujhelyi");
++MODULE_LICENSE("GPL");
++
++enum stat_access_type {
++	PHY,
++	MMD
++};
++
++struct at803x_hw_stat {
++	const char *string;
++	u8 reg;
++	u32 mask;
++	enum stat_access_type access_type;
++};
++
++static struct at803x_hw_stat qca83xx_hw_stats[] = {
++	{ "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
++	{ "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
++	{ "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
++};
++
++struct at803x_ss_mask {
++	u16 speed_mask;
++	u8 speed_shift;
++};
++
++struct at803x_priv {
++	int flags;
++	u16 clk_25m_reg;
++	u16 clk_25m_mask;
++	u8 smarteee_lpi_tw_1g;
++	u8 smarteee_lpi_tw_100m;
++	bool is_fiber;
++	bool is_1000basex;
++	struct regulator_dev *vddio_rdev;
++	struct regulator_dev *vddh_rdev;
++	u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
++	int led_polarity_mode;
++};
++
++struct at803x_context {
++	u16 bmcr;
++	u16 advertise;
++	u16 control1000;
++	u16 int_enable;
++	u16 smart_speed;
++	u16 led_control;
++};
++
++static int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data)
++{
++	int ret;
++
++	ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
++	if (ret < 0)
++		return ret;
++
++	return phy_write(phydev, AT803X_DEBUG_DATA, data);
++}
++
++static int at803x_debug_reg_read(struct phy_device *phydev, u16 reg)
++{
++	int ret;
++
++	ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
++	if (ret < 0)
++		return ret;
++
++	return phy_read(phydev, AT803X_DEBUG_DATA);
++}
++
++static int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
++				 u16 clear, u16 set)
++{
++	u16 val;
++	int ret;
++
++	ret = at803x_debug_reg_read(phydev, reg);
++	if (ret < 0)
++		return ret;
++
++	val = ret & 0xffff;
++	val &= ~clear;
++	val |= set;
++
++	return phy_write(phydev, AT803X_DEBUG_DATA, val);
++}
++
++static int at803x_write_page(struct phy_device *phydev, int page)
++{
++	int mask;
++	int set;
++
++	if (page == AT803X_PAGE_COPPER) {
++		set = AT803X_BT_BX_REG_SEL;
++		mask = 0;
++	} else {
++		set = 0;
++		mask = AT803X_BT_BX_REG_SEL;
++	}
++
++	return __phy_modify(phydev, AT803X_REG_CHIP_CONFIG, mask, set);
++}
++
++static int at803x_read_page(struct phy_device *phydev)
++{
++	int ccr = __phy_read(phydev, AT803X_REG_CHIP_CONFIG);
++
++	if (ccr < 0)
++		return ccr;
++
++	if (ccr & AT803X_BT_BX_REG_SEL)
++		return AT803X_PAGE_COPPER;
++
++	return AT803X_PAGE_FIBER;
++}
++
++static int at803x_enable_rx_delay(struct phy_device *phydev)
++{
++	return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0,
++				     AT803X_DEBUG_RX_CLK_DLY_EN);
++}
++
++static int at803x_enable_tx_delay(struct phy_device *phydev)
++{
++	return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0,
++				     AT803X_DEBUG_TX_CLK_DLY_EN);
++}
++
++static int at803x_disable_rx_delay(struct phy_device *phydev)
++{
++	return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
++				     AT803X_DEBUG_RX_CLK_DLY_EN, 0);
++}
++
++static int at803x_disable_tx_delay(struct phy_device *phydev)
++{
++	return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE,
++				     AT803X_DEBUG_TX_CLK_DLY_EN, 0);
++}
++
++/* save relevant PHY registers to private copy */
++static void at803x_context_save(struct phy_device *phydev,
++				struct at803x_context *context)
++{
++	context->bmcr = phy_read(phydev, MII_BMCR);
++	context->advertise = phy_read(phydev, MII_ADVERTISE);
++	context->control1000 = phy_read(phydev, MII_CTRL1000);
++	context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE);
++	context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED);
++	context->led_control = phy_read(phydev, AT803X_LED_CONTROL);
++}
++
++/* restore relevant PHY registers from private copy */
++static void at803x_context_restore(struct phy_device *phydev,
++				   const struct at803x_context *context)
++{
++	phy_write(phydev, MII_BMCR, context->bmcr);
++	phy_write(phydev, MII_ADVERTISE, context->advertise);
++	phy_write(phydev, MII_CTRL1000, context->control1000);
++	phy_write(phydev, AT803X_INTR_ENABLE, context->int_enable);
++	phy_write(phydev, AT803X_SMART_SPEED, context->smart_speed);
++	phy_write(phydev, AT803X_LED_CONTROL, context->led_control);
++}
++
++static int at803x_set_wol(struct phy_device *phydev,
++			  struct ethtool_wolinfo *wol)
++{
++	int ret, irq_enabled;
++
++	if (wol->wolopts & WAKE_MAGIC) {
++		struct net_device *ndev = phydev->attached_dev;
++		const u8 *mac;
++		unsigned int i;
++		static const unsigned int offsets[] = {
++			AT803X_LOC_MAC_ADDR_32_47_OFFSET,
++			AT803X_LOC_MAC_ADDR_16_31_OFFSET,
++			AT803X_LOC_MAC_ADDR_0_15_OFFSET,
++		};
++
++		if (!ndev)
++			return -ENODEV;
++
++		mac = (const u8 *)ndev->dev_addr;
++
++		if (!is_valid_ether_addr(mac))
++			return -EINVAL;
++
++		for (i = 0; i < 3; i++)
++			phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
++				      mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
++
++		/* Enable WOL interrupt */
++		ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
++		if (ret)
++			return ret;
++	} else {
++		/* Disable WOL interrupt */
++		ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
++		if (ret)
++			return ret;
++	}
++
++	/* Clear WOL status */
++	ret = phy_read(phydev, AT803X_INTR_STATUS);
++	if (ret < 0)
++		return ret;
++
++	/* Check if there are other interrupts except for WOL triggered when PHY is
++	 * in interrupt mode, only the interrupts enabled by AT803X_INTR_ENABLE can
++	 * be passed up to the interrupt PIN.
++	 */
++	irq_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
++	if (irq_enabled < 0)
++		return irq_enabled;
++
++	irq_enabled &= ~AT803X_INTR_ENABLE_WOL;
++	if (ret & irq_enabled && !phy_polling_mode(phydev))
++		phy_trigger_machine(phydev);
++
++	return 0;
++}
++
++static void at803x_get_wol(struct phy_device *phydev,
++			   struct ethtool_wolinfo *wol)
++{
++	int value;
++
++	wol->supported = WAKE_MAGIC;
++	wol->wolopts = 0;
++
++	value = phy_read(phydev, AT803X_INTR_ENABLE);
++	if (value < 0)
++		return;
++
++	if (value & AT803X_INTR_ENABLE_WOL)
++		wol->wolopts |= WAKE_MAGIC;
++}
++
++static int qca83xx_get_sset_count(struct phy_device *phydev)
++{
++	return ARRAY_SIZE(qca83xx_hw_stats);
++}
++
++static void qca83xx_get_strings(struct phy_device *phydev, u8 *data)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++) {
++		strscpy(data + i * ETH_GSTRING_LEN,
++			qca83xx_hw_stats[i].string, ETH_GSTRING_LEN);
++	}
++}
++
++static u64 qca83xx_get_stat(struct phy_device *phydev, int i)
++{
++	struct at803x_hw_stat stat = qca83xx_hw_stats[i];
++	struct at803x_priv *priv = phydev->priv;
++	int val;
++	u64 ret;
++
++	if (stat.access_type == MMD)
++		val = phy_read_mmd(phydev, MDIO_MMD_PCS, stat.reg);
++	else
++		val = phy_read(phydev, stat.reg);
++
++	if (val < 0) {
++		ret = U64_MAX;
++	} else {
++		val = val & stat.mask;
++		priv->stats[i] += val;
++		ret = priv->stats[i];
++	}
++
++	return ret;
++}
++
++static void qca83xx_get_stats(struct phy_device *phydev,
++			      struct ethtool_stats *stats, u64 *data)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++)
++		data[i] = qca83xx_get_stat(phydev, i);
++}
++
++static int at803x_suspend(struct phy_device *phydev)
++{
++	int value;
++	int wol_enabled;
++
++	value = phy_read(phydev, AT803X_INTR_ENABLE);
++	wol_enabled = value & AT803X_INTR_ENABLE_WOL;
++
++	if (wol_enabled)
++		value = BMCR_ISOLATE;
++	else
++		value = BMCR_PDOWN;
++
++	phy_modify(phydev, MII_BMCR, 0, value);
++
++	return 0;
++}
++
++static int at803x_resume(struct phy_device *phydev)
++{
++	return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
++}
++
++static int at803x_parse_dt(struct phy_device *phydev)
++{
++	struct device_node *node = phydev->mdio.dev.of_node;
++	struct at803x_priv *priv = phydev->priv;
++	u32 freq, strength, tw;
++	unsigned int sel;
++	int ret;
++
++	if (!IS_ENABLED(CONFIG_OF_MDIO))
++		return 0;
++
++	if (of_property_read_bool(node, "qca,disable-smarteee"))
++		priv->flags |= AT803X_DISABLE_SMARTEEE;
++
++	if (of_property_read_bool(node, "qca,disable-hibernation-mode"))
++		priv->flags |= AT803X_DISABLE_HIBERNATION_MODE;
++
++	if (!of_property_read_u32(node, "qca,smarteee-tw-us-1g", &tw)) {
++		if (!tw || tw > 255) {
++			phydev_err(phydev, "invalid qca,smarteee-tw-us-1g\n");
++			return -EINVAL;
++		}
++		priv->smarteee_lpi_tw_1g = tw;
++	}
++
++	if (!of_property_read_u32(node, "qca,smarteee-tw-us-100m", &tw)) {
++		if (!tw || tw > 255) {
++			phydev_err(phydev, "invalid qca,smarteee-tw-us-100m\n");
++			return -EINVAL;
++		}
++		priv->smarteee_lpi_tw_100m = tw;
++	}
++
++	ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq);
++	if (!ret) {
++		switch (freq) {
++		case 25000000:
++			sel = AT803X_CLK_OUT_25MHZ_XTAL;
++			break;
++		case 50000000:
++			sel = AT803X_CLK_OUT_50MHZ_PLL;
++			break;
++		case 62500000:
++			sel = AT803X_CLK_OUT_62_5MHZ_PLL;
++			break;
++		case 125000000:
++			sel = AT803X_CLK_OUT_125MHZ_PLL;
++			break;
++		default:
++			phydev_err(phydev, "invalid qca,clk-out-frequency\n");
++			return -EINVAL;
++		}
++
++		priv->clk_25m_reg |= FIELD_PREP(AT803X_CLK_OUT_MASK, sel);
++		priv->clk_25m_mask |= AT803X_CLK_OUT_MASK;
++	}
++
++	ret = of_property_read_u32(node, "qca,clk-out-strength", &strength);
++	if (!ret) {
++		priv->clk_25m_mask |= AT803X_CLK_OUT_STRENGTH_MASK;
++		switch (strength) {
++		case AR803X_STRENGTH_FULL:
++			priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_FULL;
++			break;
++		case AR803X_STRENGTH_HALF:
++			priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_HALF;
++			break;
++		case AR803X_STRENGTH_QUARTER:
++			priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_QUARTER;
++			break;
++		default:
++			phydev_err(phydev, "invalid qca,clk-out-strength\n");
++			return -EINVAL;
++		}
++	}
++
++	return 0;
++}
++
++static int at803x_probe(struct phy_device *phydev)
++{
++	struct device *dev = &phydev->mdio.dev;
++	struct at803x_priv *priv;
++	int ret;
++
++	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
++
++	/* Init LED polarity mode to -1 */
++	priv->led_polarity_mode = -1;
++
++	phydev->priv = priv;
++
++	ret = at803x_parse_dt(phydev);
++	if (ret)
++		return ret;
++
++	return 0;
++}
++
++static int at803x_get_features(struct phy_device *phydev)
++{
++	struct at803x_priv *priv = phydev->priv;
++	int err;
++
++	err = genphy_read_abilities(phydev);
++	if (err)
++		return err;
++
++	if (phydev->drv->phy_id != ATH8031_PHY_ID)
++		return 0;
++
++	/* AR8031/AR8033 have different status registers
++	 * for copper and fiber operation. However, the
++	 * extended status register is the same for both
++	 * operation modes.
++	 *
++	 * As a result of that, ESTATUS_1000_XFULL is set
++	 * to 1 even when operating in copper TP mode.
++	 *
++	 * Remove this mode from the supported link modes
++	 * when not operating in 1000BaseX mode.
++	 */
++	if (!priv->is_1000basex)
++		linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
++				   phydev->supported);
++
++	return 0;
++}
++
++static int at803x_smarteee_config(struct phy_device *phydev)
++{
++	struct at803x_priv *priv = phydev->priv;
++	u16 mask = 0, val = 0;
++	int ret;
++
++	if (priv->flags & AT803X_DISABLE_SMARTEEE)
++		return phy_modify_mmd(phydev, MDIO_MMD_PCS,
++				      AT803X_MMD3_SMARTEEE_CTL3,
++				      AT803X_MMD3_SMARTEEE_CTL3_LPI_EN, 0);
++
++	if (priv->smarteee_lpi_tw_1g) {
++		mask |= 0xff00;
++		val |= priv->smarteee_lpi_tw_1g << 8;
++	}
++	if (priv->smarteee_lpi_tw_100m) {
++		mask |= 0x00ff;
++		val |= priv->smarteee_lpi_tw_100m;
++	}
++	if (!mask)
++		return 0;
++
++	ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL1,
++			     mask, val);
++	if (ret)
++		return ret;
++
++	return phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL3,
++			      AT803X_MMD3_SMARTEEE_CTL3_LPI_EN,
++			      AT803X_MMD3_SMARTEEE_CTL3_LPI_EN);
++}
++
++static int at803x_clk_out_config(struct phy_device *phydev)
++{
++	struct at803x_priv *priv = phydev->priv;
++
++	if (!priv->clk_25m_mask)
++		return 0;
++
++	return phy_modify_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M,
++			      priv->clk_25m_mask, priv->clk_25m_reg);
++}
++
++static int at8031_pll_config(struct phy_device *phydev)
++{
++	struct at803x_priv *priv = phydev->priv;
++
++	/* The default after hardware reset is PLL OFF. After a soft reset, the
++	 * values are retained.
++	 */
++	if (priv->flags & AT803X_KEEP_PLL_ENABLED)
++		return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
++					     0, AT803X_DEBUG_PLL_ON);
++	else
++		return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
++					     AT803X_DEBUG_PLL_ON, 0);
++}
++
++static int at803x_hibernation_mode_config(struct phy_device *phydev)
++{
++	struct at803x_priv *priv = phydev->priv;
++
++	/* The default after hardware reset is hibernation mode enabled. After
++	 * software reset, the value is retained.
++	 */
++	if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE))
++		return 0;
++
++	return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
++					 AT803X_DEBUG_HIB_CTRL_PS_HIB_EN, 0);
++}
++
++static int at803x_config_init(struct phy_device *phydev)
++{
++	int ret;
++
++	/* The RX and TX delay default is:
++	 *   after HW reset: RX delay enabled and TX delay disabled
++	 *   after SW reset: RX delay enabled, while TX delay retains the
++	 *   value before reset.
++	 */
++	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
++	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
++		ret = at803x_enable_rx_delay(phydev);
++	else
++		ret = at803x_disable_rx_delay(phydev);
++	if (ret < 0)
++		return ret;
++
++	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
++	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
++		ret = at803x_enable_tx_delay(phydev);
++	else
++		ret = at803x_disable_tx_delay(phydev);
++	if (ret < 0)
++		return ret;
++
++	ret = at803x_smarteee_config(phydev);
++	if (ret < 0)
++		return ret;
++
++	ret = at803x_clk_out_config(phydev);
++	if (ret < 0)
++		return ret;
++
++	ret = at803x_hibernation_mode_config(phydev);
++	if (ret < 0)
++		return ret;
++
++	/* Ar803x extended next page bit is enabled by default. Cisco
++	 * multigig switches read this bit and attempt to negotiate 10Gbps
++	 * rates even if the next page bit is disabled. This is incorrect
++	 * behaviour but we still need to accommodate it. XNP is only needed
++	 * for 10Gbps support, so disable XNP.
++	 */
++	return phy_modify(phydev, MII_ADVERTISE, MDIO_AN_CTRL1_XNP, 0);
++}
++
++static int at803x_ack_interrupt(struct phy_device *phydev)
++{
++	int err;
++
++	err = phy_read(phydev, AT803X_INTR_STATUS);
++
++	return (err < 0) ? err : 0;
++}
++
++static int at803x_config_intr(struct phy_device *phydev)
++{
++	int err;
++	int value;
++
++	value = phy_read(phydev, AT803X_INTR_ENABLE);
++
++	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
++		/* Clear any pending interrupts */
++		err = at803x_ack_interrupt(phydev);
++		if (err)
++			return err;
++
++		value |= AT803X_INTR_ENABLE_AUTONEG_ERR;
++		value |= AT803X_INTR_ENABLE_SPEED_CHANGED;
++		value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
++		value |= AT803X_INTR_ENABLE_LINK_FAIL;
++		value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
++
++		err = phy_write(phydev, AT803X_INTR_ENABLE, value);
++	} else {
++		err = phy_write(phydev, AT803X_INTR_ENABLE, 0);
++		if (err)
++			return err;
++
++		/* Clear any pending interrupts */
++		err = at803x_ack_interrupt(phydev);
++	}
++
++	return err;
++}
++
++static irqreturn_t at803x_handle_interrupt(struct phy_device *phydev)
++{
++	int irq_status, int_enabled;
++
++	irq_status = phy_read(phydev, AT803X_INTR_STATUS);
++	if (irq_status < 0) {
++		phy_error(phydev);
++		return IRQ_NONE;
++	}
++
++	/* Read the current enabled interrupts */
++	int_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
++	if (int_enabled < 0) {
++		phy_error(phydev);
++		return IRQ_NONE;
++	}
++
++	/* See if this was one of our enabled interrupts */
++	if (!(irq_status & int_enabled))
++		return IRQ_NONE;
++
++	phy_trigger_machine(phydev);
++
++	return IRQ_HANDLED;
++}
++
++static void at803x_link_change_notify(struct phy_device *phydev)
++{
++	/*
++	 * Conduct a hardware reset for AT8030 every time a link loss is
++	 * signalled. This is necessary to circumvent a hardware bug that
++	 * occurs when the cable is unplugged while TX packets are pending
++	 * in the FIFO. In such cases, the FIFO enters an error mode it
++	 * cannot recover from by software.
++	 */
++	if (phydev->state == PHY_NOLINK && phydev->mdio.reset_gpio) {
++		struct at803x_context context;
++
++		at803x_context_save(phydev, &context);
++
++		phy_device_reset(phydev, 1);
++		usleep_range(1000, 2000);
++		phy_device_reset(phydev, 0);
++		usleep_range(1000, 2000);
++
++		at803x_context_restore(phydev, &context);
++
++		phydev_dbg(phydev, "%s(): phy was reset\n", __func__);
++	}
++}
++
++static int at803x_read_specific_status(struct phy_device *phydev,
++				       struct at803x_ss_mask ss_mask)
++{
++	int ss;
++
++	/* Read the AT8035 PHY-Specific Status register, which indicates the
++	 * speed and duplex that the PHY is actually using, irrespective of
++	 * whether we are in autoneg mode or not.
++	 */
++	ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
++	if (ss < 0)
++		return ss;
++
++	if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
++		int sfc, speed;
++
++		sfc = phy_read(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL);
++		if (sfc < 0)
++			return sfc;
++
++		speed = ss & ss_mask.speed_mask;
++		speed >>= ss_mask.speed_shift;
++
++		switch (speed) {
++		case AT803X_SS_SPEED_10:
++			phydev->speed = SPEED_10;
++			break;
++		case AT803X_SS_SPEED_100:
++			phydev->speed = SPEED_100;
++			break;
++		case AT803X_SS_SPEED_1000:
++			phydev->speed = SPEED_1000;
++			break;
++		case QCA808X_SS_SPEED_2500:
++			phydev->speed = SPEED_2500;
++			break;
++		}
++		if (ss & AT803X_SS_DUPLEX)
++			phydev->duplex = DUPLEX_FULL;
++		else
++			phydev->duplex = DUPLEX_HALF;
++
++		if (ss & AT803X_SS_MDIX)
++			phydev->mdix = ETH_TP_MDI_X;
++		else
++			phydev->mdix = ETH_TP_MDI;
++
++		switch (FIELD_GET(AT803X_SFC_MDI_CROSSOVER_MODE_M, sfc)) {
++		case AT803X_SFC_MANUAL_MDI:
++			phydev->mdix_ctrl = ETH_TP_MDI;
++			break;
++		case AT803X_SFC_MANUAL_MDIX:
++			phydev->mdix_ctrl = ETH_TP_MDI_X;
++			break;
++		case AT803X_SFC_AUTOMATIC_CROSSOVER:
++			phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
++			break;
++		}
++	}
++
++	return 0;
++}
++
++static int at803x_read_status(struct phy_device *phydev)
++{
++	struct at803x_ss_mask ss_mask = { 0 };
++	int err, old_link = phydev->link;
++
++	/* Update the link, but return if there was an error */
++	err = genphy_update_link(phydev);
++	if (err)
++		return err;
++
++	/* why bother the PHY if nothing can have changed */
++	if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
++		return 0;
++
++	phydev->speed = SPEED_UNKNOWN;
++	phydev->duplex = DUPLEX_UNKNOWN;
++	phydev->pause = 0;
++	phydev->asym_pause = 0;
++
++	err = genphy_read_lpa(phydev);
++	if (err < 0)
++		return err;
++
++	ss_mask.speed_mask = AT803X_SS_SPEED_MASK;
++	ss_mask.speed_shift = __bf_shf(AT803X_SS_SPEED_MASK);
++	err = at803x_read_specific_status(phydev, ss_mask);
++	if (err < 0)
++		return err;
++
++	if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
++		phy_resolve_aneg_pause(phydev);
++
++	return 0;
++}
++
++static int at803x_config_mdix(struct phy_device *phydev, u8 ctrl)
++{
++	u16 val;
++
++	switch (ctrl) {
++	case ETH_TP_MDI:
++		val = AT803X_SFC_MANUAL_MDI;
++		break;
++	case ETH_TP_MDI_X:
++		val = AT803X_SFC_MANUAL_MDIX;
++		break;
++	case ETH_TP_MDI_AUTO:
++		val = AT803X_SFC_AUTOMATIC_CROSSOVER;
++		break;
++	default:
++		return 0;
++	}
++
++	return phy_modify_changed(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL,
++			  AT803X_SFC_MDI_CROSSOVER_MODE_M,
++			  FIELD_PREP(AT803X_SFC_MDI_CROSSOVER_MODE_M, val));
++}
++
++static int at803x_prepare_config_aneg(struct phy_device *phydev)
++{
++	int ret;
++
++	ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
++	if (ret < 0)
++		return ret;
++
++	/* Changes of the midx bits are disruptive to the normal operation;
++	 * therefore any changes to these registers must be followed by a
++	 * software reset to take effect.
++	 */
++	if (ret == 1) {
++		ret = genphy_soft_reset(phydev);
++		if (ret < 0)
++			return ret;
++	}
++
++	return 0;
++}
++
++static int at803x_config_aneg(struct phy_device *phydev)
++{
++	struct at803x_priv *priv = phydev->priv;
++	int ret;
++
++	ret = at803x_prepare_config_aneg(phydev);
++	if (ret)
++		return ret;
++
++	if (priv->is_1000basex)
++		return genphy_c37_config_aneg(phydev);
++
++	return genphy_config_aneg(phydev);
++}
++
++static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
++{
++	int val;
++
++	val = phy_read(phydev, AT803X_SMART_SPEED);
++	if (val < 0)
++		return val;
++
++	if (val & AT803X_SMART_SPEED_ENABLE)
++		*d = FIELD_GET(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, val) + 2;
++	else
++		*d = DOWNSHIFT_DEV_DISABLE;
++
++	return 0;
++}
++
++static int at803x_set_downshift(struct phy_device *phydev, u8 cnt)
++{
++	u16 mask, set;
++	int ret;
++
++	switch (cnt) {
++	case DOWNSHIFT_DEV_DEFAULT_COUNT:
++		cnt = AT803X_DEFAULT_DOWNSHIFT;
++		fallthrough;
++	case AT803X_MIN_DOWNSHIFT ... AT803X_MAX_DOWNSHIFT:
++		set = AT803X_SMART_SPEED_ENABLE |
++		      AT803X_SMART_SPEED_BYPASS_TIMER |
++		      FIELD_PREP(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, cnt - 2);
++		mask = AT803X_SMART_SPEED_RETRY_LIMIT_MASK;
++		break;
++	case DOWNSHIFT_DEV_DISABLE:
++		set = 0;
++		mask = AT803X_SMART_SPEED_ENABLE |
++		       AT803X_SMART_SPEED_BYPASS_TIMER;
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	ret = phy_modify_changed(phydev, AT803X_SMART_SPEED, mask, set);
++
++	/* After changing the smart speed settings, we need to perform a
++	 * software reset, use phy_init_hw() to make sure we set the
++	 * reapply any values which might got lost during software reset.
++	 */
++	if (ret == 1)
++		ret = phy_init_hw(phydev);
++
++	return ret;
++}
++
++static int at803x_get_tunable(struct phy_device *phydev,
++			      struct ethtool_tunable *tuna, void *data)
++{
++	switch (tuna->id) {
++	case ETHTOOL_PHY_DOWNSHIFT:
++		return at803x_get_downshift(phydev, data);
++	default:
++		return -EOPNOTSUPP;
++	}
++}
++
++static int at803x_set_tunable(struct phy_device *phydev,
++			      struct ethtool_tunable *tuna, const void *data)
++{
++	switch (tuna->id) {
++	case ETHTOOL_PHY_DOWNSHIFT:
++		return at803x_set_downshift(phydev, *(const u8 *)data);
++	default:
++		return -EOPNOTSUPP;
++	}
++}
++
++static int at803x_cable_test_result_trans(u16 status)
++{
++	switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
++	case AT803X_CDT_STATUS_STAT_NORMAL:
++		return ETHTOOL_A_CABLE_RESULT_CODE_OK;
++	case AT803X_CDT_STATUS_STAT_SHORT:
++		return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
++	case AT803X_CDT_STATUS_STAT_OPEN:
++		return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
++	case AT803X_CDT_STATUS_STAT_FAIL:
++	default:
++		return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
++	}
++}
++
++static bool at803x_cdt_test_failed(u16 status)
++{
++	return FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status) ==
++		AT803X_CDT_STATUS_STAT_FAIL;
++}
++
++static bool at803x_cdt_fault_length_valid(u16 status)
++{
++	switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
++	case AT803X_CDT_STATUS_STAT_OPEN:
++	case AT803X_CDT_STATUS_STAT_SHORT:
++		return true;
++	}
++	return false;
++}
++
++static int at803x_cdt_fault_length(int dt)
++{
++	/* According to the datasheet the distance to the fault is
++	 * DELTA_TIME * 0.824 meters.
++	 *
++	 * The author suspect the correct formula is:
++	 *
++	 *   fault_distance = DELTA_TIME * (c * VF) / 125MHz / 2
++	 *
++	 * where c is the speed of light, VF is the velocity factor of
++	 * the twisted pair cable, 125MHz the counter frequency and
++	 * we need to divide by 2 because the hardware will measure the
++	 * round trip time to the fault and back to the PHY.
++	 *
++	 * With a VF of 0.69 we get the factor 0.824 mentioned in the
++	 * datasheet.
++	 */
++	return (dt * 824) / 10;
++}
++
++static int at803x_cdt_start(struct phy_device *phydev,
++			    u32 cdt_start)
++{
++	return phy_write(phydev, AT803X_CDT, cdt_start);
++}
++
++static int at803x_cdt_wait_for_completion(struct phy_device *phydev,
++					  u32 cdt_en)
++{
++	int val, ret;
++
++	/* One test run takes about 25ms */
++	ret = phy_read_poll_timeout(phydev, AT803X_CDT, val,
++				    !(val & cdt_en),
++				    30000, 100000, true);
++
++	return ret < 0 ? ret : 0;
++}
++
++static int at803x_cable_test_one_pair(struct phy_device *phydev, int pair)
++{
++	static const int ethtool_pair[] = {
++		ETHTOOL_A_CABLE_PAIR_A,
++		ETHTOOL_A_CABLE_PAIR_B,
++		ETHTOOL_A_CABLE_PAIR_C,
++		ETHTOOL_A_CABLE_PAIR_D,
++	};
++	int ret, val;
++
++	val = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
++	      AT803X_CDT_ENABLE_TEST;
++	ret = at803x_cdt_start(phydev, val);
++	if (ret)
++		return ret;
++
++	ret = at803x_cdt_wait_for_completion(phydev, AT803X_CDT_ENABLE_TEST);
++	if (ret)
++		return ret;
++
++	val = phy_read(phydev, AT803X_CDT_STATUS);
++	if (val < 0)
++		return val;
++
++	if (at803x_cdt_test_failed(val))
++		return 0;
++
++	ethnl_cable_test_result(phydev, ethtool_pair[pair],
++				at803x_cable_test_result_trans(val));
++
++	if (at803x_cdt_fault_length_valid(val)) {
++		val = FIELD_GET(AT803X_CDT_STATUS_DELTA_TIME_MASK, val);
++		ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
++					      at803x_cdt_fault_length(val));
++	}
++
++	return 1;
++}
++
++static int at803x_cable_test_get_status(struct phy_device *phydev,
++					bool *finished, unsigned long pair_mask)
++{
++	int retries = 20;
++	int pair, ret;
++
++	*finished = false;
++
++	/* According to the datasheet the CDT can be performed when
++	 * there is no link partner or when the link partner is
++	 * auto-negotiating. Starting the test will restart the AN
++	 * automatically. It seems that doing this repeatedly we will
++	 * get a slot where our link partner won't disturb our
++	 * measurement.
++	 */
++	while (pair_mask && retries--) {
++		for_each_set_bit(pair, &pair_mask, 4) {
++			ret = at803x_cable_test_one_pair(phydev, pair);
++			if (ret < 0)
++				return ret;
++			if (ret)
++				clear_bit(pair, &pair_mask);
++		}
++		if (pair_mask)
++			msleep(250);
++	}
++
++	*finished = true;
++
++	return 0;
++}
++
++static void at803x_cable_test_autoneg(struct phy_device *phydev)
++{
++	/* Enable auto-negotiation, but advertise no capabilities, no link
++	 * will be established. A restart of the auto-negotiation is not
++	 * required, because the cable test will automatically break the link.
++	 */
++	phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
++	phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
++}
++
++static int at803x_cable_test_start(struct phy_device *phydev)
++{
++	at803x_cable_test_autoneg(phydev);
++	/* we do all the (time consuming) work later */
++	return 0;
++}
++
++static int at8031_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
++					    unsigned int selector)
++{
++	struct phy_device *phydev = rdev_get_drvdata(rdev);
++
++	if (selector)
++		return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
++					     0, AT803X_DEBUG_RGMII_1V8);
++	else
++		return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
++					     AT803X_DEBUG_RGMII_1V8, 0);
++}
++
++static int at8031_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
++{
++	struct phy_device *phydev = rdev_get_drvdata(rdev);
++	int val;
++
++	val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
++	if (val < 0)
++		return val;
++
++	return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
++}
++
++static const struct regulator_ops vddio_regulator_ops = {
++	.list_voltage = regulator_list_voltage_table,
++	.set_voltage_sel = at8031_rgmii_reg_set_voltage_sel,
++	.get_voltage_sel = at8031_rgmii_reg_get_voltage_sel,
++};
++
++static const unsigned int vddio_voltage_table[] = {
++	1500000,
++	1800000,
++};
++
++static const struct regulator_desc vddio_desc = {
++	.name = "vddio",
++	.of_match = of_match_ptr("vddio-regulator"),
++	.n_voltages = ARRAY_SIZE(vddio_voltage_table),
++	.volt_table = vddio_voltage_table,
++	.ops = &vddio_regulator_ops,
++	.type = REGULATOR_VOLTAGE,
++	.owner = THIS_MODULE,
++};
++
++static const struct regulator_ops vddh_regulator_ops = {
++};
++
++static const struct regulator_desc vddh_desc = {
++	.name = "vddh",
++	.of_match = of_match_ptr("vddh-regulator"),
++	.n_voltages = 1,
++	.fixed_uV = 2500000,
++	.ops = &vddh_regulator_ops,
++	.type = REGULATOR_VOLTAGE,
++	.owner = THIS_MODULE,
++};
++
++static int at8031_register_regulators(struct phy_device *phydev)
++{
++	struct at803x_priv *priv = phydev->priv;
++	struct device *dev = &phydev->mdio.dev;
++	struct regulator_config config = { };
++
++	config.dev = dev;
++	config.driver_data = phydev;
++
++	priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
++	if (IS_ERR(priv->vddio_rdev)) {
++		phydev_err(phydev, "failed to register VDDIO regulator\n");
++		return PTR_ERR(priv->vddio_rdev);
++	}
++
++	priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
++	if (IS_ERR(priv->vddh_rdev)) {
++		phydev_err(phydev, "failed to register VDDH regulator\n");
++		return PTR_ERR(priv->vddh_rdev);
++	}
++
++	return 0;
++}
++
++static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
++{
++	struct phy_device *phydev = upstream;
++	__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
++	__ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
++	DECLARE_PHY_INTERFACE_MASK(interfaces);
++	phy_interface_t iface;
++
++	linkmode_zero(phy_support);
++	phylink_set(phy_support, 1000baseX_Full);
++	phylink_set(phy_support, 1000baseT_Full);
++	phylink_set(phy_support, Autoneg);
++	phylink_set(phy_support, Pause);
++	phylink_set(phy_support, Asym_Pause);
++
++	linkmode_zero(sfp_support);
++	sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
++	/* Some modules support 10G modes as well as others we support.
++	 * Mask out non-supported modes so the correct interface is picked.
++	 */
++	linkmode_and(sfp_support, phy_support, sfp_support);
++
++	if (linkmode_empty(sfp_support)) {
++		dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
++		return -EINVAL;
++	}
++
++	iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
++
++	/* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
++	 * interface for use with SFP modules.
++	 * However, some copper modules detected as having a preferred SGMII
++	 * interface do default to and function in 1000Base-X mode, so just
++	 * print a warning and allow such modules, as they may have some chance
++	 * of working.
++	 */
++	if (iface == PHY_INTERFACE_MODE_SGMII)
++		dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
++	else if (iface != PHY_INTERFACE_MODE_1000BASEX)
++		return -EINVAL;
++
++	return 0;
++}
++
++static const struct sfp_upstream_ops at8031_sfp_ops = {
++	.attach = phy_sfp_attach,
++	.detach = phy_sfp_detach,
++	.module_insert = at8031_sfp_insert,
++};
++
++static int at8031_parse_dt(struct phy_device *phydev)
++{
++	struct device_node *node = phydev->mdio.dev.of_node;
++	struct at803x_priv *priv = phydev->priv;
++	int ret;
++
++	if (of_property_read_bool(node, "qca,keep-pll-enabled"))
++		priv->flags |= AT803X_KEEP_PLL_ENABLED;
++
++	ret = at8031_register_regulators(phydev);
++	if (ret < 0)
++		return ret;
++
++	ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
++						 "vddio");
++	if (ret) {
++		phydev_err(phydev, "failed to get VDDIO regulator\n");
++		return ret;
++	}
++
++	/* Only AR8031/8033 support 1000Base-X for SFP modules */
++	return phy_sfp_probe(phydev, &at8031_sfp_ops);
++}
++
++static int at8031_probe(struct phy_device *phydev)
++{
++	struct at803x_priv *priv = phydev->priv;
++	int mode_cfg;
++	int ccr;
++	int ret;
++
++	ret = at803x_probe(phydev);
++	if (ret)
++		return ret;
++
++	/* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
++	 * options.
++	 */
++	ret = at8031_parse_dt(phydev);
++	if (ret)
++		return ret;
++
++	ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
++	if (ccr < 0)
++		return ccr;
++	mode_cfg = ccr & AT803X_MODE_CFG_MASK;
++
++	switch (mode_cfg) {
++	case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
++	case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
++		priv->is_1000basex = true;
++		fallthrough;
++	case AT803X_MODE_CFG_FX100_RGMII_50OHM:
++	case AT803X_MODE_CFG_FX100_RGMII_75OHM:
++		priv->is_fiber = true;
++		break;
++	}
++
++	/* Disable WoL in 1588 register which is enabled
++	 * by default
++	 */
++	return phy_modify_mmd(phydev, MDIO_MMD_PCS,
++			      AT803X_PHY_MMD3_WOL_CTRL,
++			      AT803X_WOL_EN, 0);
++}
++
++static int at8031_config_init(struct phy_device *phydev)
++{
++	struct at803x_priv *priv = phydev->priv;
++	int ret;
++
++	/* Some bootloaders leave the fiber page selected.
++	 * Switch to the appropriate page (fiber or copper), as otherwise we
++	 * read the PHY capabilities from the wrong page.
++	 */
++	phy_lock_mdio_bus(phydev);
++	ret = at803x_write_page(phydev,
++				priv->is_fiber ? AT803X_PAGE_FIBER :
++						 AT803X_PAGE_COPPER);
++	phy_unlock_mdio_bus(phydev);
++	if (ret)
++		return ret;
++
++	ret = at8031_pll_config(phydev);
++	if (ret < 0)
++		return ret;
++
++	return at803x_config_init(phydev);
++}
++
++static int at8031_set_wol(struct phy_device *phydev,
++			  struct ethtool_wolinfo *wol)
++{
++	int ret;
++
++	/* First setup MAC address and enable WOL interrupt */
++	ret = at803x_set_wol(phydev, wol);
++	if (ret)
++		return ret;
++
++	if (wol->wolopts & WAKE_MAGIC)
++		/* Enable WOL function for 1588 */
++		ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
++				     AT803X_PHY_MMD3_WOL_CTRL,
++				     0, AT803X_WOL_EN);
++	else
++		/* Disable WoL function for 1588 */
++		ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
++				     AT803X_PHY_MMD3_WOL_CTRL,
++				     AT803X_WOL_EN, 0);
++
++	return ret;
++}
++
++static int at8031_config_intr(struct phy_device *phydev)
++{
++	struct at803x_priv *priv = phydev->priv;
++	int err, value = 0;
++
++	if (phydev->interrupts == PHY_INTERRUPT_ENABLED &&
++	    priv->is_fiber) {
++		/* Clear any pending interrupts */
++		err = at803x_ack_interrupt(phydev);
++		if (err)
++			return err;
++
++		value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
++		value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
++
++		err = phy_set_bits(phydev, AT803X_INTR_ENABLE, value);
++		if (err)
++			return err;
++	}
++
++	return at803x_config_intr(phydev);
++}
++
++/* AR8031 and AR8033 share the same read status logic */
++static int at8031_read_status(struct phy_device *phydev)
++{
++	struct at803x_priv *priv = phydev->priv;
++
++	if (priv->is_1000basex)
++		return genphy_c37_read_status(phydev);
++
++	return at803x_read_status(phydev);
++}
++
++/* AR8031 and AR8035 share the same cable test get status reg */
++static int at8031_cable_test_get_status(struct phy_device *phydev,
++					bool *finished)
++{
++	return at803x_cable_test_get_status(phydev, finished, 0xf);
++}
++
++/* AR8031 and AR8035 share the same cable test start logic */
++static int at8031_cable_test_start(struct phy_device *phydev)
++{
++	at803x_cable_test_autoneg(phydev);
++	phy_write(phydev, MII_CTRL1000, 0);
++	/* we do all the (time consuming) work later */
++	return 0;
++}
++
++/* AR8032, AR9331 and QCA9561 share the same cable test get status reg */
++static int at8032_cable_test_get_status(struct phy_device *phydev,
++					bool *finished)
++{
++	return at803x_cable_test_get_status(phydev, finished, 0x3);
++}
++
++static int at8035_parse_dt(struct phy_device *phydev)
++{
++	struct at803x_priv *priv = phydev->priv;
++
++	/* Mask is set by the generic at803x_parse_dt
++	 * if property is set. Assume property is set
++	 * with the mask not zero.
++	 */
++	if (priv->clk_25m_mask) {
++		/* Fixup for the AR8030/AR8035. This chip has another mask and
++		 * doesn't support the DSP reference. Eg. the lowest bit of the
++		 * mask. The upper two bits select the same frequencies. Mask
++		 * the lowest bit here.
++		 *
++		 * Warning:
++		 *   There was no datasheet for the AR8030 available so this is
++		 *   just a guess. But the AR8035 is listed as pin compatible
++		 *   to the AR8030 so there might be a good chance it works on
++		 *   the AR8030 too.
++		 */
++		priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
++		priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
++	}
++
++	return 0;
++}
++
++/* AR8030 and AR8035 shared the same special mask for clk_25m */
++static int at8035_probe(struct phy_device *phydev)
++{
++	int ret;
++
++	ret = at803x_probe(phydev);
++	if (ret)
++		return ret;
++
++	return at8035_parse_dt(phydev);
++}
++
++static int qca83xx_config_init(struct phy_device *phydev)
++{
++	u8 switch_revision;
++
++	switch_revision = phydev->dev_flags & QCA8K_DEVFLAGS_REVISION_MASK;
++
++	switch (switch_revision) {
++	case 1:
++		/* For 100M waveform */
++		at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea);
++		/* Turn on Gigabit clock */
++		at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0);
++		break;
++
++	case 2:
++		phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0);
++		fallthrough;
++	case 4:
++		phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f);
++		at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860);
++		at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46);
++		at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000);
++		break;
++	}
++
++	/* Following original QCA sourcecode set port to prefer master */
++	phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
++
++	return 0;
++}
++
++static int qca8327_config_init(struct phy_device *phydev)
++{
++	/* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
++	 * Disable on init and enable only with 100m speed following
++	 * qca original source code.
++	 */
++	at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
++			      QCA8327_DEBUG_MANU_CTRL_EN, 0);
++
++	return qca83xx_config_init(phydev);
++}
++
++static void qca83xx_link_change_notify(struct phy_device *phydev)
++{
++	/* Set DAC Amplitude adjustment to +6% for 100m on link running */
++	if (phydev->state == PHY_RUNNING) {
++		if (phydev->speed == SPEED_100)
++			at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
++					      QCA8327_DEBUG_MANU_CTRL_EN,
++					      QCA8327_DEBUG_MANU_CTRL_EN);
++	} else {
++		/* Reset DAC Amplitude adjustment */
++		at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
++				      QCA8327_DEBUG_MANU_CTRL_EN, 0);
++	}
++}
++
++static int qca83xx_resume(struct phy_device *phydev)
++{
++	int ret, val;
++
++	/* Skip reset if not suspended */
++	if (!phydev->suspended)
++		return 0;
++
++	/* Reinit the port, reset values set by suspend */
++	qca83xx_config_init(phydev);
++
++	/* Reset the port on port resume */
++	phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
++
++	/* On resume from suspend the switch execute a reset and
++	 * restart auto-negotiation. Wait for reset to complete.
++	 */
++	ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
++				    50000, 600000, true);
++	if (ret)
++		return ret;
++
++	usleep_range(1000, 2000);
++
++	return 0;
++}
++
++static int qca83xx_suspend(struct phy_device *phydev)
++{
++	at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
++			      AT803X_DEBUG_GATE_CLK_IN1000, 0);
++
++	at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
++			      AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE |
++			      AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0);
++
++	return 0;
++}
++
++static int qca8337_suspend(struct phy_device *phydev)
++{
++	/* Only QCA8337 support actual suspend. */
++	genphy_suspend(phydev);
++
++	return qca83xx_suspend(phydev);
++}
++
++static int qca8327_suspend(struct phy_device *phydev)
++{
++	u16 mask = 0;
++
++	/* QCA8327 cause port unreliability when phy suspend
++	 * is set.
++	 */
++	mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
++	phy_modify(phydev, MII_BMCR, mask, 0);
++
++	return qca83xx_suspend(phydev);
++}
++
++static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
++{
++	int ret;
++
++	/* Enable fast retrain */
++	ret = genphy_c45_fast_retrain(phydev, true);
++	if (ret)
++		return ret;
++
++	phy_write_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_TOP_OPTION1,
++		      QCA808X_TOP_OPTION1_DATA);
++	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB,
++		      QCA808X_MSE_THRESHOLD_20DB_VALUE);
++	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB,
++		      QCA808X_MSE_THRESHOLD_17DB_VALUE);
++	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB,
++		      QCA808X_MSE_THRESHOLD_27DB_VALUE);
++	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB,
++		      QCA808X_MSE_THRESHOLD_28DB_VALUE);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_1,
++		      QCA808X_MMD3_DEBUG_1_VALUE);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_4,
++		      QCA808X_MMD3_DEBUG_4_VALUE);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_5,
++		      QCA808X_MMD3_DEBUG_5_VALUE);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_3,
++		      QCA808X_MMD3_DEBUG_3_VALUE);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_6,
++		      QCA808X_MMD3_DEBUG_6_VALUE);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_2,
++		      QCA808X_MMD3_DEBUG_2_VALUE);
++
++	return 0;
++}
++
++static int qca808x_phy_ms_seed_enable(struct phy_device *phydev, bool enable)
++{
++	u16 seed_value;
++
++	if (!enable)
++		return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
++				QCA808X_MASTER_SLAVE_SEED_ENABLE, 0);
++
++	seed_value = get_random_u32_below(QCA808X_MASTER_SLAVE_SEED_RANGE);
++	return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
++			QCA808X_MASTER_SLAVE_SEED_CFG | QCA808X_MASTER_SLAVE_SEED_ENABLE,
++			FIELD_PREP(QCA808X_MASTER_SLAVE_SEED_CFG, seed_value) |
++			QCA808X_MASTER_SLAVE_SEED_ENABLE);
++}
++
++static bool qca808x_is_prefer_master(struct phy_device *phydev)
++{
++	return (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_FORCE) ||
++		(phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_PREFERRED);
++}
++
++static bool qca808x_has_fast_retrain_or_slave_seed(struct phy_device *phydev)
++{
++	return linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
++}
++
++static int qca808x_config_init(struct phy_device *phydev)
++{
++	int ret;
++
++	/* Active adc&vga on 802.3az for the link 1000M and 100M */
++	ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7,
++			     QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
++	if (ret)
++		return ret;
++
++	/* Adjust the threshold on 802.3az for the link 1000M */
++	ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
++			    QCA808X_PHY_MMD3_AZ_TRAINING_CTRL,
++			    QCA808X_MMD3_AZ_TRAINING_VAL);
++	if (ret)
++		return ret;
++
++	if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
++		/* Config the fast retrain for the link 2500M */
++		ret = qca808x_phy_fast_retrain_config(phydev);
++		if (ret)
++			return ret;
++
++		ret = genphy_read_master_slave(phydev);
++		if (ret < 0)
++			return ret;
++
++		if (!qca808x_is_prefer_master(phydev)) {
++			/* Enable seed and configure lower ramdom seed to make phy
++			 * linked as slave mode.
++			 */
++			ret = qca808x_phy_ms_seed_enable(phydev, true);
++			if (ret)
++				return ret;
++		}
++	}
++
++	/* Configure adc threshold as 100mv for the link 10M */
++	return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD,
++				     QCA808X_ADC_THRESHOLD_MASK,
++				     QCA808X_ADC_THRESHOLD_100MV);
++}
++
++static int qca808x_read_status(struct phy_device *phydev)
++{
++	struct at803x_ss_mask ss_mask = { 0 };
++	int ret;
++
++	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
++	if (ret < 0)
++		return ret;
++
++	linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->lp_advertising,
++			 ret & MDIO_AN_10GBT_STAT_LP2_5G);
++
++	ret = genphy_read_status(phydev);
++	if (ret)
++		return ret;
++
++	/* qca8081 takes the different bits for speed value from at803x */
++	ss_mask.speed_mask = QCA808X_SS_SPEED_MASK;
++	ss_mask.speed_shift = __bf_shf(QCA808X_SS_SPEED_MASK);
++	ret = at803x_read_specific_status(phydev, ss_mask);
++	if (ret < 0)
++		return ret;
++
++	if (phydev->link) {
++		if (phydev->speed == SPEED_2500)
++			phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
++		else
++			phydev->interface = PHY_INTERFACE_MODE_SGMII;
++	} else {
++		/* generate seed as a lower random value to make PHY linked as SLAVE easily,
++		 * except for master/slave configuration fault detected or the master mode
++		 * preferred.
++		 *
++		 * the reason for not putting this code into the function link_change_notify is
++		 * the corner case where the link partner is also the qca8081 PHY and the seed
++		 * value is configured as the same value, the link can't be up and no link change
++		 * occurs.
++		 */
++		if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
++			if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR ||
++			    qca808x_is_prefer_master(phydev)) {
++				qca808x_phy_ms_seed_enable(phydev, false);
++			} else {
++				qca808x_phy_ms_seed_enable(phydev, true);
++			}
++		}
++	}
++
++	return 0;
++}
++
++static int qca808x_soft_reset(struct phy_device *phydev)
++{
++	int ret;
++
++	ret = genphy_soft_reset(phydev);
++	if (ret < 0)
++		return ret;
++
++	if (qca808x_has_fast_retrain_or_slave_seed(phydev))
++		ret = qca808x_phy_ms_seed_enable(phydev, true);
++
++	return ret;
++}
++
++static bool qca808x_cdt_fault_length_valid(int cdt_code)
++{
++	switch (cdt_code) {
++	case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
++		return true;
++	default:
++		return false;
++	}
++}
++
++static int qca808x_cable_test_result_trans(int cdt_code)
++{
++	switch (cdt_code) {
++	case QCA808X_CDT_STATUS_STAT_NORMAL:
++		return ETHTOOL_A_CABLE_RESULT_CODE_OK;
++	case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
++		return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
++	case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
++		return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
++		return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
++	case QCA808X_CDT_STATUS_STAT_FAIL:
++	default:
++		return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
++	}
++}
++
++static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
++				    int result)
++{
++	int val;
++	u32 cdt_length_reg = 0;
++
++	switch (pair) {
++	case ETHTOOL_A_CABLE_PAIR_A:
++		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_A;
++		break;
++	case ETHTOOL_A_CABLE_PAIR_B:
++		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_B;
++		break;
++	case ETHTOOL_A_CABLE_PAIR_C:
++		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_C;
++		break;
++	case ETHTOOL_A_CABLE_PAIR_D:
++		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_D;
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	val = phy_read_mmd(phydev, MDIO_MMD_PCS, cdt_length_reg);
++	if (val < 0)
++		return val;
++
++	if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
++		val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
++	else
++		val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
++
++	return at803x_cdt_fault_length(val);
++}
++
++static int qca808x_cable_test_start(struct phy_device *phydev)
++{
++	int ret;
++
++	/* perform CDT with the following configs:
++	 * 1. disable hibernation.
++	 * 2. force PHY working in MDI mode.
++	 * 3. for PHY working in 1000BaseT.
++	 * 4. configure the threshold.
++	 */
++
++	ret = at803x_debug_reg_mask(phydev, QCA808X_DBG_AN_TEST, QCA808X_HIBERNATION_EN, 0);
++	if (ret < 0)
++		return ret;
++
++	ret = at803x_config_mdix(phydev, ETH_TP_MDI);
++	if (ret < 0)
++		return ret;
++
++	/* Force 1000base-T needs to configure PMA/PMD and MII_BMCR */
++	phydev->duplex = DUPLEX_FULL;
++	phydev->speed = SPEED_1000;
++	ret = genphy_c45_pma_setup_forced(phydev);
++	if (ret < 0)
++		return ret;
++
++	ret = genphy_setup_forced(phydev);
++	if (ret < 0)
++		return ret;
++
++	/* configure the thresholds for open, short, pair ok test */
++	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8074, 0xc040);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8076, 0xc040);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8077, 0xa060);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8078, 0xc050);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807a, 0xc060);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807e, 0xb060);
++
++	return 0;
++}
++
++static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
++					      u16 status)
++{
++	int length, result;
++	u16 pair_code;
++
++	switch (pair) {
++	case ETHTOOL_A_CABLE_PAIR_A:
++		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
++		break;
++	case ETHTOOL_A_CABLE_PAIR_B:
++		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
++		break;
++	case ETHTOOL_A_CABLE_PAIR_C:
++		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
++		break;
++	case ETHTOOL_A_CABLE_PAIR_D:
++		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	result = qca808x_cable_test_result_trans(pair_code);
++	ethnl_cable_test_result(phydev, pair, result);
++
++	if (qca808x_cdt_fault_length_valid(pair_code)) {
++		length = qca808x_cdt_fault_length(phydev, pair, result);
++		ethnl_cable_test_fault_length(phydev, pair, length);
++	}
++
++	return 0;
++}
++
++static int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
++{
++	int ret, val;
++
++	*finished = false;
++
++	val = QCA808X_CDT_ENABLE_TEST |
++	      QCA808X_CDT_LENGTH_UNIT;
++	ret = at803x_cdt_start(phydev, val);
++	if (ret)
++		return ret;
++
++	ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
++	if (ret)
++		return ret;
++
++	val = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA808X_MMD3_CDT_STATUS);
++	if (val < 0)
++		return val;
++
++	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
++	if (ret)
++		return ret;
++
++	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
++	if (ret)
++		return ret;
++
++	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
++	if (ret)
++		return ret;
++
++	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
++	if (ret)
++		return ret;
++
++	*finished = true;
++
++	return 0;
++}
++
++static int qca808x_get_features(struct phy_device *phydev)
++{
++	int ret;
++
++	ret = genphy_c45_pma_read_abilities(phydev);
++	if (ret)
++		return ret;
++
++	/* The autoneg ability is not existed in bit3 of MMD7.1,
++	 * but it is supported by qca808x PHY, so we add it here
++	 * manually.
++	 */
++	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
++
++	/* As for the qca8081 1G version chip, the 2500baseT ability is also
++	 * existed in the bit0 of MMD1.21, we need to remove it manually if
++	 * it is the qca8081 1G chip according to the bit0 of MMD7.0x901d.
++	 */
++	ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_CHIP_TYPE);
++	if (ret < 0)
++		return ret;
++
++	if (QCA808X_PHY_CHIP_TYPE_1G & ret)
++		linkmode_clear_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
++
++	return 0;
++}
++
++static int qca808x_config_aneg(struct phy_device *phydev)
++{
++	int phy_ctrl = 0;
++	int ret;
++
++	ret = at803x_prepare_config_aneg(phydev);
++	if (ret)
++		return ret;
++
++	/* The reg MII_BMCR also needs to be configured for force mode, the
++	 * genphy_config_aneg is also needed.
++	 */
++	if (phydev->autoneg == AUTONEG_DISABLE)
++		genphy_c45_pma_setup_forced(phydev);
++
++	if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
++		phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
++
++	ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
++				     MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
++	if (ret < 0)
++		return ret;
++
++	return __genphy_config_aneg(phydev, ret);
++}
++
++static void qca808x_link_change_notify(struct phy_device *phydev)
++{
++	/* Assert interface sgmii fifo on link down, deassert it on link up,
++	 * the interface device address is always phy address added by 1.
++	 */
++	mdiobus_c45_modify_changed(phydev->mdio.bus, phydev->mdio.addr + 1,
++				   MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
++				   QCA8081_PHY_FIFO_RSTN,
++				   phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
++}
++
++static int qca808x_led_parse_netdev(struct phy_device *phydev, unsigned long rules,
++				    u16 *offload_trigger)
++{
++	/* Parsing specific to netdev trigger */
++	if (test_bit(TRIGGER_NETDEV_TX, &rules))
++		*offload_trigger |= QCA808X_LED_TX_BLINK;
++	if (test_bit(TRIGGER_NETDEV_RX, &rules))
++		*offload_trigger |= QCA808X_LED_RX_BLINK;
++	if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
++		*offload_trigger |= QCA808X_LED_SPEED10_ON;
++	if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
++		*offload_trigger |= QCA808X_LED_SPEED100_ON;
++	if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
++		*offload_trigger |= QCA808X_LED_SPEED1000_ON;
++	if (test_bit(TRIGGER_NETDEV_LINK_2500, &rules))
++		*offload_trigger |= QCA808X_LED_SPEED2500_ON;
++	if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
++		*offload_trigger |= QCA808X_LED_HALF_DUPLEX_ON;
++	if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
++		*offload_trigger |= QCA808X_LED_FULL_DUPLEX_ON;
++
++	if (rules && !*offload_trigger)
++		return -EOPNOTSUPP;
++
++	/* Enable BLINK_CHECK_BYPASS by default to make the LED
++	 * blink even with duplex or speed mode not enabled.
++	 */
++	*offload_trigger |= QCA808X_LED_BLINK_CHECK_BYPASS;
++
++	return 0;
++}
++
++static int qca808x_led_hw_control_enable(struct phy_device *phydev, u8 index)
++{
++	u16 reg;
++
++	if (index > 2)
++		return -EINVAL;
++
++	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
++
++	return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
++				  QCA808X_LED_FORCE_EN);
++}
++
++static int qca808x_led_hw_is_supported(struct phy_device *phydev, u8 index,
++				       unsigned long rules)
++{
++	u16 offload_trigger = 0;
++
++	if (index > 2)
++		return -EINVAL;
++
++	return qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
++}
++
++static int qca808x_led_hw_control_set(struct phy_device *phydev, u8 index,
++				      unsigned long rules)
++{
++	u16 reg, offload_trigger = 0;
++	int ret;
++
++	if (index > 2)
++		return -EINVAL;
++
++	reg = QCA808X_MMD7_LED_CTRL(index);
++
++	ret = qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
++	if (ret)
++		return ret;
++
++	ret = qca808x_led_hw_control_enable(phydev, index);
++	if (ret)
++		return ret;
++
++	return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
++			      QCA808X_LED_PATTERN_MASK,
++			      offload_trigger);
++}
++
++static bool qca808x_led_hw_control_status(struct phy_device *phydev, u8 index)
++{
++	u16 reg;
++	int val;
++
++	if (index > 2)
++		return false;
++
++	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
++
++	val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
++
++	return !(val & QCA808X_LED_FORCE_EN);
++}
++
++static int qca808x_led_hw_control_get(struct phy_device *phydev, u8 index,
++				      unsigned long *rules)
++{
++	u16 reg;
++	int val;
++
++	if (index > 2)
++		return -EINVAL;
++
++	/* Check if we have hw control enabled */
++	if (qca808x_led_hw_control_status(phydev, index))
++		return -EINVAL;
++
++	reg = QCA808X_MMD7_LED_CTRL(index);
++
++	val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
++	if (val & QCA808X_LED_TX_BLINK)
++		set_bit(TRIGGER_NETDEV_TX, rules);
++	if (val & QCA808X_LED_RX_BLINK)
++		set_bit(TRIGGER_NETDEV_RX, rules);
++	if (val & QCA808X_LED_SPEED10_ON)
++		set_bit(TRIGGER_NETDEV_LINK_10, rules);
++	if (val & QCA808X_LED_SPEED100_ON)
++		set_bit(TRIGGER_NETDEV_LINK_100, rules);
++	if (val & QCA808X_LED_SPEED1000_ON)
++		set_bit(TRIGGER_NETDEV_LINK_1000, rules);
++	if (val & QCA808X_LED_SPEED2500_ON)
++		set_bit(TRIGGER_NETDEV_LINK_2500, rules);
++	if (val & QCA808X_LED_HALF_DUPLEX_ON)
++		set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
++	if (val & QCA808X_LED_FULL_DUPLEX_ON)
++		set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
++
++	return 0;
++}
++
++static int qca808x_led_hw_control_reset(struct phy_device *phydev, u8 index)
++{
++	u16 reg;
++
++	if (index > 2)
++		return -EINVAL;
++
++	reg = QCA808X_MMD7_LED_CTRL(index);
++
++	return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
++				  QCA808X_LED_PATTERN_MASK);
++}
++
++static int qca808x_led_brightness_set(struct phy_device *phydev,
++				      u8 index, enum led_brightness value)
++{
++	u16 reg;
++	int ret;
++
++	if (index > 2)
++		return -EINVAL;
++
++	if (!value) {
++		ret = qca808x_led_hw_control_reset(phydev, index);
++		if (ret)
++			return ret;
++	}
++
++	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
++
++	return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
++			      QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
++			      QCA808X_LED_FORCE_EN | value ? QCA808X_LED_FORCE_ON :
++							     QCA808X_LED_FORCE_OFF);
++}
++
++static int qca808x_led_blink_set(struct phy_device *phydev, u8 index,
++				 unsigned long *delay_on,
++				 unsigned long *delay_off)
++{
++	int ret;
++	u16 reg;
++
++	if (index > 2)
++		return -EINVAL;
++
++	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
++
++	/* Set blink to 50% off, 50% on at 4Hz by default */
++	ret = phy_modify_mmd(phydev, MDIO_MMD_AN, QCA808X_MMD7_LED_GLOBAL,
++			     QCA808X_LED_BLINK_FREQ_MASK | QCA808X_LED_BLINK_DUTY_MASK,
++			     QCA808X_LED_BLINK_FREQ_4HZ | QCA808X_LED_BLINK_DUTY_50_50);
++	if (ret)
++		return ret;
++
++	/* We use BLINK_1 for normal blinking */
++	ret = phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
++			     QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
++			     QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_BLINK_1);
++	if (ret)
++		return ret;
++
++	/* We set blink to 4Hz, aka 250ms */
++	*delay_on = 250 / 2;
++	*delay_off = 250 / 2;
++
++	return 0;
++}
++
++static int qca808x_led_polarity_set(struct phy_device *phydev, int index,
++				    unsigned long modes)
++{
++	struct at803x_priv *priv = phydev->priv;
++	bool active_low = false;
++	u32 mode;
++
++	for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
++		switch (mode) {
++		case PHY_LED_ACTIVE_LOW:
++			active_low = true;
++			break;
++		default:
++			return -EINVAL;
++		}
++	}
++
++	/* PHY polarity is global and can't be set per LED.
++	 * To detect this, check if last requested polarity mode
++	 * match the new one.
++	 */
++	if (priv->led_polarity_mode >= 0 &&
++	    priv->led_polarity_mode != active_low) {
++		phydev_err(phydev, "PHY polarity is global. Mismatched polarity on different LED\n");
++		return -EINVAL;
++	}
++
++	/* Save the last PHY polarity mode */
++	priv->led_polarity_mode = active_low;
++
++	return phy_modify_mmd(phydev, MDIO_MMD_AN,
++			      QCA808X_MMD7_LED_POLARITY_CTRL,
++			      QCA808X_LED_ACTIVE_HIGH,
++			      active_low ? 0 : QCA808X_LED_ACTIVE_HIGH);
++}
++
++static struct phy_driver at803x_driver[] = {
++{
++	/* Qualcomm Atheros AR8035 */
++	PHY_ID_MATCH_EXACT(ATH8035_PHY_ID),
++	.name			= "Qualcomm Atheros AR8035",
++	.flags			= PHY_POLL_CABLE_TEST,
++	.probe			= at8035_probe,
++	.config_aneg		= at803x_config_aneg,
++	.config_init		= at803x_config_init,
++	.soft_reset		= genphy_soft_reset,
++	.set_wol		= at803x_set_wol,
++	.get_wol		= at803x_get_wol,
++	.suspend		= at803x_suspend,
++	.resume			= at803x_resume,
++	/* PHY_GBIT_FEATURES */
++	.read_status		= at803x_read_status,
++	.config_intr		= at803x_config_intr,
++	.handle_interrupt	= at803x_handle_interrupt,
++	.get_tunable		= at803x_get_tunable,
++	.set_tunable		= at803x_set_tunable,
++	.cable_test_start	= at8031_cable_test_start,
++	.cable_test_get_status	= at8031_cable_test_get_status,
++}, {
++	/* Qualcomm Atheros AR8030 */
++	.phy_id			= ATH8030_PHY_ID,
++	.name			= "Qualcomm Atheros AR8030",
++	.phy_id_mask		= AT8030_PHY_ID_MASK,
++	.probe			= at8035_probe,
++	.config_init		= at803x_config_init,
++	.link_change_notify	= at803x_link_change_notify,
++	.set_wol		= at803x_set_wol,
++	.get_wol		= at803x_get_wol,
++	.suspend		= at803x_suspend,
++	.resume			= at803x_resume,
++	/* PHY_BASIC_FEATURES */
++	.config_intr		= at803x_config_intr,
++	.handle_interrupt	= at803x_handle_interrupt,
++}, {
++	/* Qualcomm Atheros AR8031/AR8033 */
++	PHY_ID_MATCH_EXACT(ATH8031_PHY_ID),
++	.name			= "Qualcomm Atheros AR8031/AR8033",
++	.flags			= PHY_POLL_CABLE_TEST,
++	.probe			= at8031_probe,
++	.config_init		= at8031_config_init,
++	.config_aneg		= at803x_config_aneg,
++	.soft_reset		= genphy_soft_reset,
++	.set_wol		= at8031_set_wol,
++	.get_wol		= at803x_get_wol,
++	.suspend		= at803x_suspend,
++	.resume			= at803x_resume,
++	.read_page		= at803x_read_page,
++	.write_page		= at803x_write_page,
++	.get_features		= at803x_get_features,
++	.read_status		= at8031_read_status,
++	.config_intr		= at8031_config_intr,
++	.handle_interrupt	= at803x_handle_interrupt,
++	.get_tunable		= at803x_get_tunable,
++	.set_tunable		= at803x_set_tunable,
++	.cable_test_start	= at8031_cable_test_start,
++	.cable_test_get_status	= at8031_cable_test_get_status,
++}, {
++	/* Qualcomm Atheros AR8032 */
++	PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
++	.name			= "Qualcomm Atheros AR8032",
++	.probe			= at803x_probe,
++	.flags			= PHY_POLL_CABLE_TEST,
++	.config_init		= at803x_config_init,
++	.link_change_notify	= at803x_link_change_notify,
++	.suspend		= at803x_suspend,
++	.resume			= at803x_resume,
++	/* PHY_BASIC_FEATURES */
++	.config_intr		= at803x_config_intr,
++	.handle_interrupt	= at803x_handle_interrupt,
++	.cable_test_start	= at803x_cable_test_start,
++	.cable_test_get_status	= at8032_cable_test_get_status,
++}, {
++	/* ATHEROS AR9331 */
++	PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
++	.name			= "Qualcomm Atheros AR9331 built-in PHY",
++	.probe			= at803x_probe,
++	.suspend		= at803x_suspend,
++	.resume			= at803x_resume,
++	.flags			= PHY_POLL_CABLE_TEST,
++	/* PHY_BASIC_FEATURES */
++	.config_intr		= at803x_config_intr,
++	.handle_interrupt	= at803x_handle_interrupt,
++	.cable_test_start	= at803x_cable_test_start,
++	.cable_test_get_status	= at8032_cable_test_get_status,
++	.read_status		= at803x_read_status,
++	.soft_reset		= genphy_soft_reset,
++	.config_aneg		= at803x_config_aneg,
++}, {
++	/* Qualcomm Atheros QCA9561 */
++	PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
++	.name			= "Qualcomm Atheros QCA9561 built-in PHY",
++	.probe			= at803x_probe,
++	.suspend		= at803x_suspend,
++	.resume			= at803x_resume,
++	.flags			= PHY_POLL_CABLE_TEST,
++	/* PHY_BASIC_FEATURES */
++	.config_intr		= at803x_config_intr,
++	.handle_interrupt	= at803x_handle_interrupt,
++	.cable_test_start	= at803x_cable_test_start,
++	.cable_test_get_status	= at8032_cable_test_get_status,
++	.read_status		= at803x_read_status,
++	.soft_reset		= genphy_soft_reset,
++	.config_aneg		= at803x_config_aneg,
++}, {
++	/* QCA8337 */
++	.phy_id			= QCA8337_PHY_ID,
++	.phy_id_mask		= QCA8K_PHY_ID_MASK,
++	.name			= "Qualcomm Atheros 8337 internal PHY",
++	/* PHY_GBIT_FEATURES */
++	.probe			= at803x_probe,
++	.flags			= PHY_IS_INTERNAL,
++	.config_init		= qca83xx_config_init,
++	.soft_reset		= genphy_soft_reset,
++	.get_sset_count		= qca83xx_get_sset_count,
++	.get_strings		= qca83xx_get_strings,
++	.get_stats		= qca83xx_get_stats,
++	.suspend		= qca8337_suspend,
++	.resume			= qca83xx_resume,
++}, {
++	/* QCA8327-A from switch QCA8327-AL1A */
++	.phy_id			= QCA8327_A_PHY_ID,
++	.phy_id_mask		= QCA8K_PHY_ID_MASK,
++	.name			= "Qualcomm Atheros 8327-A internal PHY",
++	/* PHY_GBIT_FEATURES */
++	.link_change_notify	= qca83xx_link_change_notify,
++	.probe			= at803x_probe,
++	.flags			= PHY_IS_INTERNAL,
++	.config_init		= qca8327_config_init,
++	.soft_reset		= genphy_soft_reset,
++	.get_sset_count		= qca83xx_get_sset_count,
++	.get_strings		= qca83xx_get_strings,
++	.get_stats		= qca83xx_get_stats,
++	.suspend		= qca8327_suspend,
++	.resume			= qca83xx_resume,
++}, {
++	/* QCA8327-B from switch QCA8327-BL1A */
++	.phy_id			= QCA8327_B_PHY_ID,
++	.phy_id_mask		= QCA8K_PHY_ID_MASK,
++	.name			= "Qualcomm Atheros 8327-B internal PHY",
++	/* PHY_GBIT_FEATURES */
++	.link_change_notify	= qca83xx_link_change_notify,
++	.probe			= at803x_probe,
++	.flags			= PHY_IS_INTERNAL,
++	.config_init		= qca8327_config_init,
++	.soft_reset		= genphy_soft_reset,
++	.get_sset_count		= qca83xx_get_sset_count,
++	.get_strings		= qca83xx_get_strings,
++	.get_stats		= qca83xx_get_stats,
++	.suspend		= qca8327_suspend,
++	.resume			= qca83xx_resume,
++}, {
++	/* Qualcomm QCA8081 */
++	PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
++	.name			= "Qualcomm QCA8081",
++	.flags			= PHY_POLL_CABLE_TEST,
++	.probe			= at803x_probe,
++	.config_intr		= at803x_config_intr,
++	.handle_interrupt	= at803x_handle_interrupt,
++	.get_tunable		= at803x_get_tunable,
++	.set_tunable		= at803x_set_tunable,
++	.set_wol		= at803x_set_wol,
++	.get_wol		= at803x_get_wol,
++	.get_features		= qca808x_get_features,
++	.config_aneg		= qca808x_config_aneg,
++	.suspend		= genphy_suspend,
++	.resume			= genphy_resume,
++	.read_status		= qca808x_read_status,
++	.config_init		= qca808x_config_init,
++	.soft_reset		= qca808x_soft_reset,
++	.cable_test_start	= qca808x_cable_test_start,
++	.cable_test_get_status	= qca808x_cable_test_get_status,
++	.link_change_notify	= qca808x_link_change_notify,
++	.led_brightness_set	= qca808x_led_brightness_set,
++	.led_blink_set		= qca808x_led_blink_set,
++	.led_hw_is_supported	= qca808x_led_hw_is_supported,
++	.led_hw_control_set	= qca808x_led_hw_control_set,
++	.led_hw_control_get	= qca808x_led_hw_control_get,
++	.led_polarity_set	= qca808x_led_polarity_set,
++}, };
++
++module_phy_driver(at803x_driver);
++
++static struct mdio_device_id __maybe_unused atheros_tbl[] = {
++	{ ATH8030_PHY_ID, AT8030_PHY_ID_MASK },
++	{ PHY_ID_MATCH_EXACT(ATH8031_PHY_ID) },
++	{ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
++	{ PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
++	{ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
++	{ PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
++	{ PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
++	{ PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
++	{ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) },
++	{ PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
++	{ }
++};
++
++MODULE_DEVICE_TABLE(mdio, atheros_tbl);

+ 243 - 0
target/linux/generic/backport-6.6/713-v6.9-02-net-phy-qcom-create-and-move-functions-to-shared-lib.patch

@@ -0,0 +1,243 @@
+From 6fb760972c49490b03f3db2ad64cf30bdd28c54a Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Mon, 29 Jan 2024 15:15:20 +0100
+Subject: [PATCH 2/5] net: phy: qcom: create and move functions to shared
+ library
+
+Create and move functions to shared library in preparation for qca83xx
+PHY Family to be detached from at803x driver.
+
+Only the shared defines are moved to the shared qcom.h header.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Link: https://lore.kernel.org/r/[email protected]
+Signed-off-by: Jakub Kicinski <[email protected]>
+---
+ drivers/net/phy/qcom/Kconfig        |  4 ++
+ drivers/net/phy/qcom/Makefile       |  1 +
+ drivers/net/phy/qcom/at803x.c       | 69 +----------------------------
+ drivers/net/phy/qcom/qcom-phy-lib.c | 53 ++++++++++++++++++++++
+ drivers/net/phy/qcom/qcom.h         | 34 ++++++++++++++
+ 5 files changed, 94 insertions(+), 67 deletions(-)
+ create mode 100644 drivers/net/phy/qcom/qcom-phy-lib.c
+ create mode 100644 drivers/net/phy/qcom/qcom.h
+
+--- a/drivers/net/phy/qcom/Kconfig
++++ b/drivers/net/phy/qcom/Kconfig
+@@ -1,6 +1,10 @@
+ # SPDX-License-Identifier: GPL-2.0-only
++config QCOM_NET_PHYLIB
++	tristate
++
+ config AT803X_PHY
+ 	tristate "Qualcomm Atheros AR803X PHYs and QCA833x PHYs"
++	select QCOM_NET_PHYLIB
+ 	depends on REGULATOR
+ 	help
+ 	  Currently supports the AR8030, AR8031, AR8033, AR8035 and internal
+--- a/drivers/net/phy/qcom/Makefile
++++ b/drivers/net/phy/qcom/Makefile
+@@ -1,2 +1,3 @@
+ # SPDX-License-Identifier: GPL-2.0
++obj-$(CONFIG_QCOM_NET_PHYLIB)	+= qcom-phy-lib.o
+ obj-$(CONFIG_AT803X_PHY)	+= at803x.o
+--- a/drivers/net/phy/qcom/at803x.c
++++ b/drivers/net/phy/qcom/at803x.c
+@@ -22,6 +22,8 @@
+ #include <linux/sfp.h>
+ #include <dt-bindings/net/qca-ar803x.h>
+ 
++#include "qcom.h"
++
+ #define AT803X_SPECIFIC_FUNCTION_CONTROL	0x10
+ #define AT803X_SFC_ASSERT_CRS			BIT(11)
+ #define AT803X_SFC_FORCE_LINK			BIT(10)
+@@ -84,9 +86,6 @@
+ #define AT803X_REG_CHIP_CONFIG			0x1f
+ #define AT803X_BT_BX_REG_SEL			0x8000
+ 
+-#define AT803X_DEBUG_ADDR			0x1D
+-#define AT803X_DEBUG_DATA			0x1E
+-
+ #define AT803X_MODE_CFG_MASK			0x0F
+ #define AT803X_MODE_CFG_BASET_RGMII		0x00
+ #define AT803X_MODE_CFG_BASET_SGMII		0x01
+@@ -103,19 +102,6 @@
+ #define AT803X_PSSR				0x11	/*PHY-Specific Status Register*/
+ #define AT803X_PSSR_MR_AN_COMPLETE		0x0200
+ 
+-#define AT803X_DEBUG_ANALOG_TEST_CTRL		0x00
+-#define QCA8327_DEBUG_MANU_CTRL_EN		BIT(2)
+-#define QCA8337_DEBUG_MANU_CTRL_EN		GENMASK(3, 2)
+-#define AT803X_DEBUG_RX_CLK_DLY_EN		BIT(15)
+-
+-#define AT803X_DEBUG_SYSTEM_CTRL_MODE		0x05
+-#define AT803X_DEBUG_TX_CLK_DLY_EN		BIT(8)
+-
+-#define AT803X_DEBUG_REG_HIB_CTRL		0x0b
+-#define   AT803X_DEBUG_HIB_CTRL_SEL_RST_80U	BIT(10)
+-#define   AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE	BIT(13)
+-#define   AT803X_DEBUG_HIB_CTRL_PS_HIB_EN	BIT(15)
+-
+ #define AT803X_DEBUG_REG_3C			0x3C
+ 
+ #define AT803X_DEBUG_REG_GREEN			0x3D
+@@ -393,18 +379,6 @@ MODULE_DESCRIPTION("Qualcomm Atheros AR8
+ MODULE_AUTHOR("Matus Ujhelyi");
+ MODULE_LICENSE("GPL");
+ 
+-enum stat_access_type {
+-	PHY,
+-	MMD
+-};
+-
+-struct at803x_hw_stat {
+-	const char *string;
+-	u8 reg;
+-	u32 mask;
+-	enum stat_access_type access_type;
+-};
+-
+ static struct at803x_hw_stat qca83xx_hw_stats[] = {
+ 	{ "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
+ 	{ "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
+@@ -439,45 +413,6 @@ struct at803x_context {
+ 	u16 led_control;
+ };
+ 
+-static int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data)
+-{
+-	int ret;
+-
+-	ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
+-	if (ret < 0)
+-		return ret;
+-
+-	return phy_write(phydev, AT803X_DEBUG_DATA, data);
+-}
+-
+-static int at803x_debug_reg_read(struct phy_device *phydev, u16 reg)
+-{
+-	int ret;
+-
+-	ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
+-	if (ret < 0)
+-		return ret;
+-
+-	return phy_read(phydev, AT803X_DEBUG_DATA);
+-}
+-
+-static int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
+-				 u16 clear, u16 set)
+-{
+-	u16 val;
+-	int ret;
+-
+-	ret = at803x_debug_reg_read(phydev, reg);
+-	if (ret < 0)
+-		return ret;
+-
+-	val = ret & 0xffff;
+-	val &= ~clear;
+-	val |= set;
+-
+-	return phy_write(phydev, AT803X_DEBUG_DATA, val);
+-}
+-
+ static int at803x_write_page(struct phy_device *phydev, int page)
+ {
+ 	int mask;
+--- /dev/null
++++ b/drivers/net/phy/qcom/qcom-phy-lib.c
+@@ -0,0 +1,53 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/phy.h>
++#include <linux/module.h>
++
++#include "qcom.h"
++
++MODULE_DESCRIPTION("Qualcomm PHY driver Common Functions");
++MODULE_AUTHOR("Matus Ujhelyi");
++MODULE_AUTHOR("Christian Marangi <[email protected]>");
++MODULE_LICENSE("GPL");
++
++int at803x_debug_reg_read(struct phy_device *phydev, u16 reg)
++{
++	int ret;
++
++	ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
++	if (ret < 0)
++		return ret;
++
++	return phy_read(phydev, AT803X_DEBUG_DATA);
++}
++EXPORT_SYMBOL_GPL(at803x_debug_reg_read);
++
++int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
++			  u16 clear, u16 set)
++{
++	u16 val;
++	int ret;
++
++	ret = at803x_debug_reg_read(phydev, reg);
++	if (ret < 0)
++		return ret;
++
++	val = ret & 0xffff;
++	val &= ~clear;
++	val |= set;
++
++	return phy_write(phydev, AT803X_DEBUG_DATA, val);
++}
++EXPORT_SYMBOL_GPL(at803x_debug_reg_mask);
++
++int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data)
++{
++	int ret;
++
++	ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
++	if (ret < 0)
++		return ret;
++
++	return phy_write(phydev, AT803X_DEBUG_DATA, data);
++}
++EXPORT_SYMBOL_GPL(at803x_debug_reg_write);
+--- /dev/null
++++ b/drivers/net/phy/qcom/qcom.h
+@@ -0,0 +1,34 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++
++#define AT803X_DEBUG_ADDR			0x1D
++#define AT803X_DEBUG_DATA			0x1E
++
++#define AT803X_DEBUG_ANALOG_TEST_CTRL		0x00
++#define QCA8327_DEBUG_MANU_CTRL_EN		BIT(2)
++#define QCA8337_DEBUG_MANU_CTRL_EN		GENMASK(3, 2)
++#define AT803X_DEBUG_RX_CLK_DLY_EN		BIT(15)
++
++#define AT803X_DEBUG_SYSTEM_CTRL_MODE		0x05
++#define AT803X_DEBUG_TX_CLK_DLY_EN		BIT(8)
++
++#define AT803X_DEBUG_REG_HIB_CTRL		0x0b
++#define   AT803X_DEBUG_HIB_CTRL_SEL_RST_80U	BIT(10)
++#define   AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE	BIT(13)
++#define   AT803X_DEBUG_HIB_CTRL_PS_HIB_EN	BIT(15)
++
++enum stat_access_type {
++	PHY,
++	MMD
++};
++
++struct at803x_hw_stat {
++	const char *string;
++	u8 reg;
++	u32 mask;
++	enum stat_access_type access_type;
++};
++
++int at803x_debug_reg_read(struct phy_device *phydev, u16 reg);
++int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
++			  u16 clear, u16 set);
++int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data);

+ 638 - 0
target/linux/generic/backport-6.6/713-v6.9-03-net-phy-qcom-deatch-qca83xx-PHY-driver-from-at803x.patch

@@ -0,0 +1,638 @@
+From 2e45d404d99d43bb7127b74b5dea8818df64996c Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Mon, 29 Jan 2024 15:15:21 +0100
+Subject: [PATCH 3/5] net: phy: qcom: deatch qca83xx PHY driver from at803x
+
+Deatch qca83xx PHY driver from at803x.
+
+The QCA83xx PHYs implement specific function and doesn't use generic
+at803x so it can be detached from the driver and moved to a dedicated
+one.
+
+Probe function and priv struct is reimplemented to allocate and use
+only the qca83xx specific data. Unused data from at803x PHY driver
+are dropped from at803x priv struct.
+
+This is to make slimmer PHY drivers instead of including lots of bloat
+that would never be used in specific SoC.
+
+A new Kconfig flag QCA83XX_PHY is introduced to compile the new
+introduced PHY driver.
+
+As the Kconfig name starts with Qualcomm the same order is kept.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Link: https://lore.kernel.org/r/[email protected]
+Signed-off-by: Jakub Kicinski <[email protected]>
+---
+ drivers/net/phy/qcom/Kconfig   |  11 +-
+ drivers/net/phy/qcom/Makefile  |   1 +
+ drivers/net/phy/qcom/at803x.c  | 235 ----------------------------
+ drivers/net/phy/qcom/qca83xx.c | 275 +++++++++++++++++++++++++++++++++
+ 4 files changed, 284 insertions(+), 238 deletions(-)
+ create mode 100644 drivers/net/phy/qcom/qca83xx.c
+
+--- a/drivers/net/phy/qcom/Kconfig
++++ b/drivers/net/phy/qcom/Kconfig
+@@ -3,9 +3,14 @@ config QCOM_NET_PHYLIB
+ 	tristate
+ 
+ config AT803X_PHY
+-	tristate "Qualcomm Atheros AR803X PHYs and QCA833x PHYs"
++	tristate "Qualcomm Atheros AR803X PHYs"
+ 	select QCOM_NET_PHYLIB
+ 	depends on REGULATOR
+ 	help
+-	  Currently supports the AR8030, AR8031, AR8033, AR8035 and internal
+-	  QCA8337(Internal qca8k PHY) model
++	  Currently supports the AR8030, AR8031, AR8033, AR8035 model
++
++config QCA83XX_PHY
++	tristate "Qualcomm Atheros QCA833x PHYs"
++	select QCOM_NET_PHYLIB
++	help
++	  Currently supports the internal QCA8337(Internal qca8k PHY) model
+--- a/drivers/net/phy/qcom/Makefile
++++ b/drivers/net/phy/qcom/Makefile
+@@ -1,3 +1,4 @@
+ # SPDX-License-Identifier: GPL-2.0
+ obj-$(CONFIG_QCOM_NET_PHYLIB)	+= qcom-phy-lib.o
+ obj-$(CONFIG_AT803X_PHY)	+= at803x.o
++obj-$(CONFIG_QCA83XX_PHY)	+= qca83xx.o
+--- a/drivers/net/phy/qcom/at803x.c
++++ b/drivers/net/phy/qcom/at803x.c
+@@ -102,17 +102,10 @@
+ #define AT803X_PSSR				0x11	/*PHY-Specific Status Register*/
+ #define AT803X_PSSR_MR_AN_COMPLETE		0x0200
+ 
+-#define AT803X_DEBUG_REG_3C			0x3C
+-
+-#define AT803X_DEBUG_REG_GREEN			0x3D
+-#define   AT803X_DEBUG_GATE_CLK_IN1000		BIT(6)
+-
+ #define AT803X_DEBUG_REG_1F			0x1F
+ #define AT803X_DEBUG_PLL_ON			BIT(2)
+ #define AT803X_DEBUG_RGMII_1V8			BIT(3)
+ 
+-#define MDIO_AZ_DEBUG				0x800D
+-
+ /* AT803x supports either the XTAL input pad, an internal PLL or the
+  * DSP as clock reference for the clock output pad. The XTAL reference
+  * is only used for 25 MHz output, all other frequencies need the PLL.
+@@ -163,13 +156,7 @@
+ 
+ #define QCA8081_PHY_ID				0x004dd101
+ 
+-#define QCA8327_A_PHY_ID			0x004dd033
+-#define QCA8327_B_PHY_ID			0x004dd034
+-#define QCA8337_PHY_ID				0x004dd036
+ #define QCA9561_PHY_ID				0x004dd042
+-#define QCA8K_PHY_ID_MASK			0xffffffff
+-
+-#define QCA8K_DEVFLAGS_REVISION_MASK		GENMASK(2, 0)
+ 
+ #define AT803X_PAGE_FIBER			0
+ #define AT803X_PAGE_COPPER			1
+@@ -379,12 +366,6 @@ MODULE_DESCRIPTION("Qualcomm Atheros AR8
+ MODULE_AUTHOR("Matus Ujhelyi");
+ MODULE_LICENSE("GPL");
+ 
+-static struct at803x_hw_stat qca83xx_hw_stats[] = {
+-	{ "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
+-	{ "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
+-	{ "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
+-};
+-
+ struct at803x_ss_mask {
+ 	u16 speed_mask;
+ 	u8 speed_shift;
+@@ -400,7 +381,6 @@ struct at803x_priv {
+ 	bool is_1000basex;
+ 	struct regulator_dev *vddio_rdev;
+ 	struct regulator_dev *vddh_rdev;
+-	u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
+ 	int led_polarity_mode;
+ };
+ 
+@@ -564,53 +544,6 @@ static void at803x_get_wol(struct phy_de
+ 		wol->wolopts |= WAKE_MAGIC;
+ }
+ 
+-static int qca83xx_get_sset_count(struct phy_device *phydev)
+-{
+-	return ARRAY_SIZE(qca83xx_hw_stats);
+-}
+-
+-static void qca83xx_get_strings(struct phy_device *phydev, u8 *data)
+-{
+-	int i;
+-
+-	for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++) {
+-		strscpy(data + i * ETH_GSTRING_LEN,
+-			qca83xx_hw_stats[i].string, ETH_GSTRING_LEN);
+-	}
+-}
+-
+-static u64 qca83xx_get_stat(struct phy_device *phydev, int i)
+-{
+-	struct at803x_hw_stat stat = qca83xx_hw_stats[i];
+-	struct at803x_priv *priv = phydev->priv;
+-	int val;
+-	u64 ret;
+-
+-	if (stat.access_type == MMD)
+-		val = phy_read_mmd(phydev, MDIO_MMD_PCS, stat.reg);
+-	else
+-		val = phy_read(phydev, stat.reg);
+-
+-	if (val < 0) {
+-		ret = U64_MAX;
+-	} else {
+-		val = val & stat.mask;
+-		priv->stats[i] += val;
+-		ret = priv->stats[i];
+-	}
+-
+-	return ret;
+-}
+-
+-static void qca83xx_get_stats(struct phy_device *phydev,
+-			      struct ethtool_stats *stats, u64 *data)
+-{
+-	int i;
+-
+-	for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++)
+-		data[i] = qca83xx_get_stat(phydev, i);
+-}
+-
+ static int at803x_suspend(struct phy_device *phydev)
+ {
+ 	int value;
+@@ -1707,124 +1640,6 @@ static int at8035_probe(struct phy_devic
+ 	return at8035_parse_dt(phydev);
+ }
+ 
+-static int qca83xx_config_init(struct phy_device *phydev)
+-{
+-	u8 switch_revision;
+-
+-	switch_revision = phydev->dev_flags & QCA8K_DEVFLAGS_REVISION_MASK;
+-
+-	switch (switch_revision) {
+-	case 1:
+-		/* For 100M waveform */
+-		at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea);
+-		/* Turn on Gigabit clock */
+-		at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0);
+-		break;
+-
+-	case 2:
+-		phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0);
+-		fallthrough;
+-	case 4:
+-		phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f);
+-		at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860);
+-		at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46);
+-		at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000);
+-		break;
+-	}
+-
+-	/* Following original QCA sourcecode set port to prefer master */
+-	phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
+-
+-	return 0;
+-}
+-
+-static int qca8327_config_init(struct phy_device *phydev)
+-{
+-	/* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
+-	 * Disable on init and enable only with 100m speed following
+-	 * qca original source code.
+-	 */
+-	at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+-			      QCA8327_DEBUG_MANU_CTRL_EN, 0);
+-
+-	return qca83xx_config_init(phydev);
+-}
+-
+-static void qca83xx_link_change_notify(struct phy_device *phydev)
+-{
+-	/* Set DAC Amplitude adjustment to +6% for 100m on link running */
+-	if (phydev->state == PHY_RUNNING) {
+-		if (phydev->speed == SPEED_100)
+-			at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+-					      QCA8327_DEBUG_MANU_CTRL_EN,
+-					      QCA8327_DEBUG_MANU_CTRL_EN);
+-	} else {
+-		/* Reset DAC Amplitude adjustment */
+-		at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+-				      QCA8327_DEBUG_MANU_CTRL_EN, 0);
+-	}
+-}
+-
+-static int qca83xx_resume(struct phy_device *phydev)
+-{
+-	int ret, val;
+-
+-	/* Skip reset if not suspended */
+-	if (!phydev->suspended)
+-		return 0;
+-
+-	/* Reinit the port, reset values set by suspend */
+-	qca83xx_config_init(phydev);
+-
+-	/* Reset the port on port resume */
+-	phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
+-
+-	/* On resume from suspend the switch execute a reset and
+-	 * restart auto-negotiation. Wait for reset to complete.
+-	 */
+-	ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
+-				    50000, 600000, true);
+-	if (ret)
+-		return ret;
+-
+-	usleep_range(1000, 2000);
+-
+-	return 0;
+-}
+-
+-static int qca83xx_suspend(struct phy_device *phydev)
+-{
+-	at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
+-			      AT803X_DEBUG_GATE_CLK_IN1000, 0);
+-
+-	at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
+-			      AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE |
+-			      AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0);
+-
+-	return 0;
+-}
+-
+-static int qca8337_suspend(struct phy_device *phydev)
+-{
+-	/* Only QCA8337 support actual suspend. */
+-	genphy_suspend(phydev);
+-
+-	return qca83xx_suspend(phydev);
+-}
+-
+-static int qca8327_suspend(struct phy_device *phydev)
+-{
+-	u16 mask = 0;
+-
+-	/* QCA8327 cause port unreliability when phy suspend
+-	 * is set.
+-	 */
+-	mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
+-	phy_modify(phydev, MII_BMCR, mask, 0);
+-
+-	return qca83xx_suspend(phydev);
+-}
+-
+ static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
+ {
+ 	int ret;
+@@ -2599,53 +2414,6 @@ static struct phy_driver at803x_driver[]
+ 	.soft_reset		= genphy_soft_reset,
+ 	.config_aneg		= at803x_config_aneg,
+ }, {
+-	/* QCA8337 */
+-	.phy_id			= QCA8337_PHY_ID,
+-	.phy_id_mask		= QCA8K_PHY_ID_MASK,
+-	.name			= "Qualcomm Atheros 8337 internal PHY",
+-	/* PHY_GBIT_FEATURES */
+-	.probe			= at803x_probe,
+-	.flags			= PHY_IS_INTERNAL,
+-	.config_init		= qca83xx_config_init,
+-	.soft_reset		= genphy_soft_reset,
+-	.get_sset_count		= qca83xx_get_sset_count,
+-	.get_strings		= qca83xx_get_strings,
+-	.get_stats		= qca83xx_get_stats,
+-	.suspend		= qca8337_suspend,
+-	.resume			= qca83xx_resume,
+-}, {
+-	/* QCA8327-A from switch QCA8327-AL1A */
+-	.phy_id			= QCA8327_A_PHY_ID,
+-	.phy_id_mask		= QCA8K_PHY_ID_MASK,
+-	.name			= "Qualcomm Atheros 8327-A internal PHY",
+-	/* PHY_GBIT_FEATURES */
+-	.link_change_notify	= qca83xx_link_change_notify,
+-	.probe			= at803x_probe,
+-	.flags			= PHY_IS_INTERNAL,
+-	.config_init		= qca8327_config_init,
+-	.soft_reset		= genphy_soft_reset,
+-	.get_sset_count		= qca83xx_get_sset_count,
+-	.get_strings		= qca83xx_get_strings,
+-	.get_stats		= qca83xx_get_stats,
+-	.suspend		= qca8327_suspend,
+-	.resume			= qca83xx_resume,
+-}, {
+-	/* QCA8327-B from switch QCA8327-BL1A */
+-	.phy_id			= QCA8327_B_PHY_ID,
+-	.phy_id_mask		= QCA8K_PHY_ID_MASK,
+-	.name			= "Qualcomm Atheros 8327-B internal PHY",
+-	/* PHY_GBIT_FEATURES */
+-	.link_change_notify	= qca83xx_link_change_notify,
+-	.probe			= at803x_probe,
+-	.flags			= PHY_IS_INTERNAL,
+-	.config_init		= qca8327_config_init,
+-	.soft_reset		= genphy_soft_reset,
+-	.get_sset_count		= qca83xx_get_sset_count,
+-	.get_strings		= qca83xx_get_strings,
+-	.get_stats		= qca83xx_get_stats,
+-	.suspend		= qca8327_suspend,
+-	.resume			= qca83xx_resume,
+-}, {
+ 	/* Qualcomm QCA8081 */
+ 	PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
+ 	.name			= "Qualcomm QCA8081",
+@@ -2683,9 +2451,6 @@ static struct mdio_device_id __maybe_unu
+ 	{ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
+ 	{ PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
+ 	{ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
+-	{ PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
+-	{ PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
+-	{ PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
+ 	{ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) },
+ 	{ PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
+ 	{ }
+--- /dev/null
++++ b/drivers/net/phy/qcom/qca83xx.c
+@@ -0,0 +1,275 @@
++// SPDX-License-Identifier: GPL-2.0+
++
++#include <linux/phy.h>
++#include <linux/module.h>
++
++#include "qcom.h"
++
++#define AT803X_DEBUG_REG_3C			0x3C
++
++#define AT803X_DEBUG_REG_GREEN			0x3D
++#define   AT803X_DEBUG_GATE_CLK_IN1000		BIT(6)
++
++#define MDIO_AZ_DEBUG				0x800D
++
++#define QCA8327_A_PHY_ID			0x004dd033
++#define QCA8327_B_PHY_ID			0x004dd034
++#define QCA8337_PHY_ID				0x004dd036
++#define QCA8K_PHY_ID_MASK			0xffffffff
++
++#define QCA8K_DEVFLAGS_REVISION_MASK		GENMASK(2, 0)
++
++static struct at803x_hw_stat qca83xx_hw_stats[] = {
++	{ "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
++	{ "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
++	{ "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
++};
++
++struct qca83xx_priv {
++	u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
++};
++
++MODULE_DESCRIPTION("Qualcomm Atheros QCA83XX PHY driver");
++MODULE_AUTHOR("Matus Ujhelyi");
++MODULE_AUTHOR("Christian Marangi <[email protected]>");
++MODULE_LICENSE("GPL");
++
++static int qca83xx_get_sset_count(struct phy_device *phydev)
++{
++	return ARRAY_SIZE(qca83xx_hw_stats);
++}
++
++static void qca83xx_get_strings(struct phy_device *phydev, u8 *data)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++) {
++		strscpy(data + i * ETH_GSTRING_LEN,
++			qca83xx_hw_stats[i].string, ETH_GSTRING_LEN);
++	}
++}
++
++static u64 qca83xx_get_stat(struct phy_device *phydev, int i)
++{
++	struct at803x_hw_stat stat = qca83xx_hw_stats[i];
++	struct qca83xx_priv *priv = phydev->priv;
++	int val;
++	u64 ret;
++
++	if (stat.access_type == MMD)
++		val = phy_read_mmd(phydev, MDIO_MMD_PCS, stat.reg);
++	else
++		val = phy_read(phydev, stat.reg);
++
++	if (val < 0) {
++		ret = U64_MAX;
++	} else {
++		val = val & stat.mask;
++		priv->stats[i] += val;
++		ret = priv->stats[i];
++	}
++
++	return ret;
++}
++
++static void qca83xx_get_stats(struct phy_device *phydev,
++			      struct ethtool_stats *stats, u64 *data)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++)
++		data[i] = qca83xx_get_stat(phydev, i);
++}
++
++static int qca83xx_probe(struct phy_device *phydev)
++{
++	struct device *dev = &phydev->mdio.dev;
++	struct qca83xx_priv *priv;
++
++	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
++
++	phydev->priv = priv;
++
++	return 0;
++}
++
++static int qca83xx_config_init(struct phy_device *phydev)
++{
++	u8 switch_revision;
++
++	switch_revision = phydev->dev_flags & QCA8K_DEVFLAGS_REVISION_MASK;
++
++	switch (switch_revision) {
++	case 1:
++		/* For 100M waveform */
++		at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea);
++		/* Turn on Gigabit clock */
++		at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0);
++		break;
++
++	case 2:
++		phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0);
++		fallthrough;
++	case 4:
++		phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f);
++		at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860);
++		at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46);
++		at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000);
++		break;
++	}
++
++	/* Following original QCA sourcecode set port to prefer master */
++	phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
++
++	return 0;
++}
++
++static int qca8327_config_init(struct phy_device *phydev)
++{
++	/* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
++	 * Disable on init and enable only with 100m speed following
++	 * qca original source code.
++	 */
++	at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
++			      QCA8327_DEBUG_MANU_CTRL_EN, 0);
++
++	return qca83xx_config_init(phydev);
++}
++
++static void qca83xx_link_change_notify(struct phy_device *phydev)
++{
++	/* Set DAC Amplitude adjustment to +6% for 100m on link running */
++	if (phydev->state == PHY_RUNNING) {
++		if (phydev->speed == SPEED_100)
++			at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
++					      QCA8327_DEBUG_MANU_CTRL_EN,
++					      QCA8327_DEBUG_MANU_CTRL_EN);
++	} else {
++		/* Reset DAC Amplitude adjustment */
++		at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
++				      QCA8327_DEBUG_MANU_CTRL_EN, 0);
++	}
++}
++
++static int qca83xx_resume(struct phy_device *phydev)
++{
++	int ret, val;
++
++	/* Skip reset if not suspended */
++	if (!phydev->suspended)
++		return 0;
++
++	/* Reinit the port, reset values set by suspend */
++	qca83xx_config_init(phydev);
++
++	/* Reset the port on port resume */
++	phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
++
++	/* On resume from suspend the switch execute a reset and
++	 * restart auto-negotiation. Wait for reset to complete.
++	 */
++	ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
++				    50000, 600000, true);
++	if (ret)
++		return ret;
++
++	usleep_range(1000, 2000);
++
++	return 0;
++}
++
++static int qca83xx_suspend(struct phy_device *phydev)
++{
++	at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
++			      AT803X_DEBUG_GATE_CLK_IN1000, 0);
++
++	at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
++			      AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE |
++			      AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0);
++
++	return 0;
++}
++
++static int qca8337_suspend(struct phy_device *phydev)
++{
++	/* Only QCA8337 support actual suspend. */
++	genphy_suspend(phydev);
++
++	return qca83xx_suspend(phydev);
++}
++
++static int qca8327_suspend(struct phy_device *phydev)
++{
++	u16 mask = 0;
++
++	/* QCA8327 cause port unreliability when phy suspend
++	 * is set.
++	 */
++	mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
++	phy_modify(phydev, MII_BMCR, mask, 0);
++
++	return qca83xx_suspend(phydev);
++}
++
++static struct phy_driver qca83xx_driver[] = {
++{
++	/* QCA8337 */
++	.phy_id			= QCA8337_PHY_ID,
++	.phy_id_mask		= QCA8K_PHY_ID_MASK,
++	.name			= "Qualcomm Atheros 8337 internal PHY",
++	/* PHY_GBIT_FEATURES */
++	.probe			= qca83xx_probe,
++	.flags			= PHY_IS_INTERNAL,
++	.config_init		= qca83xx_config_init,
++	.soft_reset		= genphy_soft_reset,
++	.get_sset_count		= qca83xx_get_sset_count,
++	.get_strings		= qca83xx_get_strings,
++	.get_stats		= qca83xx_get_stats,
++	.suspend		= qca8337_suspend,
++	.resume			= qca83xx_resume,
++}, {
++	/* QCA8327-A from switch QCA8327-AL1A */
++	.phy_id			= QCA8327_A_PHY_ID,
++	.phy_id_mask		= QCA8K_PHY_ID_MASK,
++	.name			= "Qualcomm Atheros 8327-A internal PHY",
++	/* PHY_GBIT_FEATURES */
++	.link_change_notify	= qca83xx_link_change_notify,
++	.probe			= qca83xx_probe,
++	.flags			= PHY_IS_INTERNAL,
++	.config_init		= qca8327_config_init,
++	.soft_reset		= genphy_soft_reset,
++	.get_sset_count		= qca83xx_get_sset_count,
++	.get_strings		= qca83xx_get_strings,
++	.get_stats		= qca83xx_get_stats,
++	.suspend		= qca8327_suspend,
++	.resume			= qca83xx_resume,
++}, {
++	/* QCA8327-B from switch QCA8327-BL1A */
++	.phy_id			= QCA8327_B_PHY_ID,
++	.phy_id_mask		= QCA8K_PHY_ID_MASK,
++	.name			= "Qualcomm Atheros 8327-B internal PHY",
++	/* PHY_GBIT_FEATURES */
++	.link_change_notify	= qca83xx_link_change_notify,
++	.probe			= qca83xx_probe,
++	.flags			= PHY_IS_INTERNAL,
++	.config_init		= qca8327_config_init,
++	.soft_reset		= genphy_soft_reset,
++	.get_sset_count		= qca83xx_get_sset_count,
++	.get_strings		= qca83xx_get_strings,
++	.get_stats		= qca83xx_get_stats,
++	.suspend		= qca8327_suspend,
++	.resume			= qca83xx_resume,
++}, };
++
++module_phy_driver(qca83xx_driver);
++
++static struct mdio_device_id __maybe_unused qca83xx_tbl[] = {
++	{ PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
++	{ PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
++	{ PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
++	{ }
++};
++
++MODULE_DEVICE_TABLE(mdio, qca83xx_tbl);

+ 1014 - 0
target/linux/generic/backport-6.6/713-v6.9-04-net-phy-qcom-move-additional-functions-to-shared-lib.patch

@@ -0,0 +1,1014 @@
+From 249d2b80e4db0e38503ed0ec2af6c7401bc099b9 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Mon, 29 Jan 2024 15:15:22 +0100
+Subject: [PATCH 4/5] net: phy: qcom: move additional functions to shared
+ library
+
+Move additional functions to shared library in preparation for qca808x
+PHY Family to be detached from at803x driver.
+
+Only the shared defines are moved to the shared qcom.h header.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Link: https://lore.kernel.org/r/[email protected]
+Signed-off-by: Jakub Kicinski <[email protected]>
+---
+ drivers/net/phy/qcom/at803x.c       | 426 +---------------------------
+ drivers/net/phy/qcom/qcom-phy-lib.c | 376 ++++++++++++++++++++++++
+ drivers/net/phy/qcom/qcom.h         |  86 ++++++
+ 3 files changed, 463 insertions(+), 425 deletions(-)
+
+--- a/drivers/net/phy/qcom/at803x.c
++++ b/drivers/net/phy/qcom/at803x.c
+@@ -24,65 +24,11 @@
+ 
+ #include "qcom.h"
+ 
+-#define AT803X_SPECIFIC_FUNCTION_CONTROL	0x10
+-#define AT803X_SFC_ASSERT_CRS			BIT(11)
+-#define AT803X_SFC_FORCE_LINK			BIT(10)
+-#define AT803X_SFC_MDI_CROSSOVER_MODE_M		GENMASK(6, 5)
+-#define AT803X_SFC_AUTOMATIC_CROSSOVER		0x3
+-#define AT803X_SFC_MANUAL_MDIX			0x1
+-#define AT803X_SFC_MANUAL_MDI			0x0
+-#define AT803X_SFC_SQE_TEST			BIT(2)
+-#define AT803X_SFC_POLARITY_REVERSAL		BIT(1)
+-#define AT803X_SFC_DISABLE_JABBER		BIT(0)
+-
+-#define AT803X_SPECIFIC_STATUS			0x11
+-#define AT803X_SS_SPEED_MASK			GENMASK(15, 14)
+-#define AT803X_SS_SPEED_1000			2
+-#define AT803X_SS_SPEED_100			1
+-#define AT803X_SS_SPEED_10			0
+-#define AT803X_SS_DUPLEX			BIT(13)
+-#define AT803X_SS_SPEED_DUPLEX_RESOLVED		BIT(11)
+-#define AT803X_SS_MDIX				BIT(6)
+-
+-#define QCA808X_SS_SPEED_MASK			GENMASK(9, 7)
+-#define QCA808X_SS_SPEED_2500			4
+-
+-#define AT803X_INTR_ENABLE			0x12
+-#define AT803X_INTR_ENABLE_AUTONEG_ERR		BIT(15)
+-#define AT803X_INTR_ENABLE_SPEED_CHANGED	BIT(14)
+-#define AT803X_INTR_ENABLE_DUPLEX_CHANGED	BIT(13)
+-#define AT803X_INTR_ENABLE_PAGE_RECEIVED	BIT(12)
+-#define AT803X_INTR_ENABLE_LINK_FAIL		BIT(11)
+-#define AT803X_INTR_ENABLE_LINK_SUCCESS		BIT(10)
+-#define AT803X_INTR_ENABLE_LINK_FAIL_BX		BIT(8)
+-#define AT803X_INTR_ENABLE_LINK_SUCCESS_BX	BIT(7)
+-#define AT803X_INTR_ENABLE_WIRESPEED_DOWNGRADE	BIT(5)
+-#define AT803X_INTR_ENABLE_POLARITY_CHANGED	BIT(1)
+-#define AT803X_INTR_ENABLE_WOL			BIT(0)
+-
+-#define AT803X_INTR_STATUS			0x13
+-
+-#define AT803X_SMART_SPEED			0x14
+-#define AT803X_SMART_SPEED_ENABLE		BIT(5)
+-#define AT803X_SMART_SPEED_RETRY_LIMIT_MASK	GENMASK(4, 2)
+-#define AT803X_SMART_SPEED_BYPASS_TIMER		BIT(1)
+-#define AT803X_CDT				0x16
+-#define AT803X_CDT_MDI_PAIR_MASK		GENMASK(9, 8)
+-#define AT803X_CDT_ENABLE_TEST			BIT(0)
+-#define AT803X_CDT_STATUS			0x1c
+-#define AT803X_CDT_STATUS_STAT_NORMAL		0
+-#define AT803X_CDT_STATUS_STAT_SHORT		1
+-#define AT803X_CDT_STATUS_STAT_OPEN		2
+-#define AT803X_CDT_STATUS_STAT_FAIL		3
+-#define AT803X_CDT_STATUS_STAT_MASK		GENMASK(9, 8)
+-#define AT803X_CDT_STATUS_DELTA_TIME_MASK	GENMASK(7, 0)
+ #define AT803X_LED_CONTROL			0x18
+ 
+ #define AT803X_PHY_MMD3_WOL_CTRL		0x8012
+ #define AT803X_WOL_EN				BIT(5)
+-#define AT803X_LOC_MAC_ADDR_0_15_OFFSET		0x804C
+-#define AT803X_LOC_MAC_ADDR_16_31_OFFSET	0x804B
+-#define AT803X_LOC_MAC_ADDR_32_47_OFFSET	0x804A
++
+ #define AT803X_REG_CHIP_CONFIG			0x1f
+ #define AT803X_BT_BX_REG_SEL			0x8000
+ 
+@@ -138,10 +84,6 @@
+ #define AT803X_CLK_OUT_STRENGTH_HALF		1
+ #define AT803X_CLK_OUT_STRENGTH_QUARTER		2
+ 
+-#define AT803X_DEFAULT_DOWNSHIFT		5
+-#define AT803X_MIN_DOWNSHIFT			2
+-#define AT803X_MAX_DOWNSHIFT			9
+-
+ #define AT803X_MMD3_SMARTEEE_CTL1		0x805b
+ #define AT803X_MMD3_SMARTEEE_CTL2		0x805c
+ #define AT803X_MMD3_SMARTEEE_CTL3		0x805d
+@@ -366,11 +308,6 @@ MODULE_DESCRIPTION("Qualcomm Atheros AR8
+ MODULE_AUTHOR("Matus Ujhelyi");
+ MODULE_LICENSE("GPL");
+ 
+-struct at803x_ss_mask {
+-	u16 speed_mask;
+-	u8 speed_shift;
+-};
+-
+ struct at803x_priv {
+ 	int flags;
+ 	u16 clk_25m_reg;
+@@ -470,80 +407,6 @@ static void at803x_context_restore(struc
+ 	phy_write(phydev, AT803X_LED_CONTROL, context->led_control);
+ }
+ 
+-static int at803x_set_wol(struct phy_device *phydev,
+-			  struct ethtool_wolinfo *wol)
+-{
+-	int ret, irq_enabled;
+-
+-	if (wol->wolopts & WAKE_MAGIC) {
+-		struct net_device *ndev = phydev->attached_dev;
+-		const u8 *mac;
+-		unsigned int i;
+-		static const unsigned int offsets[] = {
+-			AT803X_LOC_MAC_ADDR_32_47_OFFSET,
+-			AT803X_LOC_MAC_ADDR_16_31_OFFSET,
+-			AT803X_LOC_MAC_ADDR_0_15_OFFSET,
+-		};
+-
+-		if (!ndev)
+-			return -ENODEV;
+-
+-		mac = (const u8 *)ndev->dev_addr;
+-
+-		if (!is_valid_ether_addr(mac))
+-			return -EINVAL;
+-
+-		for (i = 0; i < 3; i++)
+-			phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
+-				      mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
+-
+-		/* Enable WOL interrupt */
+-		ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
+-		if (ret)
+-			return ret;
+-	} else {
+-		/* Disable WOL interrupt */
+-		ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	/* Clear WOL status */
+-	ret = phy_read(phydev, AT803X_INTR_STATUS);
+-	if (ret < 0)
+-		return ret;
+-
+-	/* Check if there are other interrupts except for WOL triggered when PHY is
+-	 * in interrupt mode, only the interrupts enabled by AT803X_INTR_ENABLE can
+-	 * be passed up to the interrupt PIN.
+-	 */
+-	irq_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
+-	if (irq_enabled < 0)
+-		return irq_enabled;
+-
+-	irq_enabled &= ~AT803X_INTR_ENABLE_WOL;
+-	if (ret & irq_enabled && !phy_polling_mode(phydev))
+-		phy_trigger_machine(phydev);
+-
+-	return 0;
+-}
+-
+-static void at803x_get_wol(struct phy_device *phydev,
+-			   struct ethtool_wolinfo *wol)
+-{
+-	int value;
+-
+-	wol->supported = WAKE_MAGIC;
+-	wol->wolopts = 0;
+-
+-	value = phy_read(phydev, AT803X_INTR_ENABLE);
+-	if (value < 0)
+-		return;
+-
+-	if (value & AT803X_INTR_ENABLE_WOL)
+-		wol->wolopts |= WAKE_MAGIC;
+-}
+-
+ static int at803x_suspend(struct phy_device *phydev)
+ {
+ 	int value;
+@@ -816,73 +679,6 @@ static int at803x_config_init(struct phy
+ 	return phy_modify(phydev, MII_ADVERTISE, MDIO_AN_CTRL1_XNP, 0);
+ }
+ 
+-static int at803x_ack_interrupt(struct phy_device *phydev)
+-{
+-	int err;
+-
+-	err = phy_read(phydev, AT803X_INTR_STATUS);
+-
+-	return (err < 0) ? err : 0;
+-}
+-
+-static int at803x_config_intr(struct phy_device *phydev)
+-{
+-	int err;
+-	int value;
+-
+-	value = phy_read(phydev, AT803X_INTR_ENABLE);
+-
+-	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+-		/* Clear any pending interrupts */
+-		err = at803x_ack_interrupt(phydev);
+-		if (err)
+-			return err;
+-
+-		value |= AT803X_INTR_ENABLE_AUTONEG_ERR;
+-		value |= AT803X_INTR_ENABLE_SPEED_CHANGED;
+-		value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
+-		value |= AT803X_INTR_ENABLE_LINK_FAIL;
+-		value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
+-
+-		err = phy_write(phydev, AT803X_INTR_ENABLE, value);
+-	} else {
+-		err = phy_write(phydev, AT803X_INTR_ENABLE, 0);
+-		if (err)
+-			return err;
+-
+-		/* Clear any pending interrupts */
+-		err = at803x_ack_interrupt(phydev);
+-	}
+-
+-	return err;
+-}
+-
+-static irqreturn_t at803x_handle_interrupt(struct phy_device *phydev)
+-{
+-	int irq_status, int_enabled;
+-
+-	irq_status = phy_read(phydev, AT803X_INTR_STATUS);
+-	if (irq_status < 0) {
+-		phy_error(phydev);
+-		return IRQ_NONE;
+-	}
+-
+-	/* Read the current enabled interrupts */
+-	int_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
+-	if (int_enabled < 0) {
+-		phy_error(phydev);
+-		return IRQ_NONE;
+-	}
+-
+-	/* See if this was one of our enabled interrupts */
+-	if (!(irq_status & int_enabled))
+-		return IRQ_NONE;
+-
+-	phy_trigger_machine(phydev);
+-
+-	return IRQ_HANDLED;
+-}
+-
+ static void at803x_link_change_notify(struct phy_device *phydev)
+ {
+ 	/*
+@@ -908,69 +704,6 @@ static void at803x_link_change_notify(st
+ 	}
+ }
+ 
+-static int at803x_read_specific_status(struct phy_device *phydev,
+-				       struct at803x_ss_mask ss_mask)
+-{
+-	int ss;
+-
+-	/* Read the AT8035 PHY-Specific Status register, which indicates the
+-	 * speed and duplex that the PHY is actually using, irrespective of
+-	 * whether we are in autoneg mode or not.
+-	 */
+-	ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
+-	if (ss < 0)
+-		return ss;
+-
+-	if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
+-		int sfc, speed;
+-
+-		sfc = phy_read(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL);
+-		if (sfc < 0)
+-			return sfc;
+-
+-		speed = ss & ss_mask.speed_mask;
+-		speed >>= ss_mask.speed_shift;
+-
+-		switch (speed) {
+-		case AT803X_SS_SPEED_10:
+-			phydev->speed = SPEED_10;
+-			break;
+-		case AT803X_SS_SPEED_100:
+-			phydev->speed = SPEED_100;
+-			break;
+-		case AT803X_SS_SPEED_1000:
+-			phydev->speed = SPEED_1000;
+-			break;
+-		case QCA808X_SS_SPEED_2500:
+-			phydev->speed = SPEED_2500;
+-			break;
+-		}
+-		if (ss & AT803X_SS_DUPLEX)
+-			phydev->duplex = DUPLEX_FULL;
+-		else
+-			phydev->duplex = DUPLEX_HALF;
+-
+-		if (ss & AT803X_SS_MDIX)
+-			phydev->mdix = ETH_TP_MDI_X;
+-		else
+-			phydev->mdix = ETH_TP_MDI;
+-
+-		switch (FIELD_GET(AT803X_SFC_MDI_CROSSOVER_MODE_M, sfc)) {
+-		case AT803X_SFC_MANUAL_MDI:
+-			phydev->mdix_ctrl = ETH_TP_MDI;
+-			break;
+-		case AT803X_SFC_MANUAL_MDIX:
+-			phydev->mdix_ctrl = ETH_TP_MDI_X;
+-			break;
+-		case AT803X_SFC_AUTOMATIC_CROSSOVER:
+-			phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+-			break;
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+ static int at803x_read_status(struct phy_device *phydev)
+ {
+ 	struct at803x_ss_mask ss_mask = { 0 };
+@@ -1006,50 +739,6 @@ static int at803x_read_status(struct phy
+ 	return 0;
+ }
+ 
+-static int at803x_config_mdix(struct phy_device *phydev, u8 ctrl)
+-{
+-	u16 val;
+-
+-	switch (ctrl) {
+-	case ETH_TP_MDI:
+-		val = AT803X_SFC_MANUAL_MDI;
+-		break;
+-	case ETH_TP_MDI_X:
+-		val = AT803X_SFC_MANUAL_MDIX;
+-		break;
+-	case ETH_TP_MDI_AUTO:
+-		val = AT803X_SFC_AUTOMATIC_CROSSOVER;
+-		break;
+-	default:
+-		return 0;
+-	}
+-
+-	return phy_modify_changed(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL,
+-			  AT803X_SFC_MDI_CROSSOVER_MODE_M,
+-			  FIELD_PREP(AT803X_SFC_MDI_CROSSOVER_MODE_M, val));
+-}
+-
+-static int at803x_prepare_config_aneg(struct phy_device *phydev)
+-{
+-	int ret;
+-
+-	ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
+-	if (ret < 0)
+-		return ret;
+-
+-	/* Changes of the midx bits are disruptive to the normal operation;
+-	 * therefore any changes to these registers must be followed by a
+-	 * software reset to take effect.
+-	 */
+-	if (ret == 1) {
+-		ret = genphy_soft_reset(phydev);
+-		if (ret < 0)
+-			return ret;
+-	}
+-
+-	return 0;
+-}
+-
+ static int at803x_config_aneg(struct phy_device *phydev)
+ {
+ 	struct at803x_priv *priv = phydev->priv;
+@@ -1065,80 +754,6 @@ static int at803x_config_aneg(struct phy
+ 	return genphy_config_aneg(phydev);
+ }
+ 
+-static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
+-{
+-	int val;
+-
+-	val = phy_read(phydev, AT803X_SMART_SPEED);
+-	if (val < 0)
+-		return val;
+-
+-	if (val & AT803X_SMART_SPEED_ENABLE)
+-		*d = FIELD_GET(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, val) + 2;
+-	else
+-		*d = DOWNSHIFT_DEV_DISABLE;
+-
+-	return 0;
+-}
+-
+-static int at803x_set_downshift(struct phy_device *phydev, u8 cnt)
+-{
+-	u16 mask, set;
+-	int ret;
+-
+-	switch (cnt) {
+-	case DOWNSHIFT_DEV_DEFAULT_COUNT:
+-		cnt = AT803X_DEFAULT_DOWNSHIFT;
+-		fallthrough;
+-	case AT803X_MIN_DOWNSHIFT ... AT803X_MAX_DOWNSHIFT:
+-		set = AT803X_SMART_SPEED_ENABLE |
+-		      AT803X_SMART_SPEED_BYPASS_TIMER |
+-		      FIELD_PREP(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, cnt - 2);
+-		mask = AT803X_SMART_SPEED_RETRY_LIMIT_MASK;
+-		break;
+-	case DOWNSHIFT_DEV_DISABLE:
+-		set = 0;
+-		mask = AT803X_SMART_SPEED_ENABLE |
+-		       AT803X_SMART_SPEED_BYPASS_TIMER;
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	ret = phy_modify_changed(phydev, AT803X_SMART_SPEED, mask, set);
+-
+-	/* After changing the smart speed settings, we need to perform a
+-	 * software reset, use phy_init_hw() to make sure we set the
+-	 * reapply any values which might got lost during software reset.
+-	 */
+-	if (ret == 1)
+-		ret = phy_init_hw(phydev);
+-
+-	return ret;
+-}
+-
+-static int at803x_get_tunable(struct phy_device *phydev,
+-			      struct ethtool_tunable *tuna, void *data)
+-{
+-	switch (tuna->id) {
+-	case ETHTOOL_PHY_DOWNSHIFT:
+-		return at803x_get_downshift(phydev, data);
+-	default:
+-		return -EOPNOTSUPP;
+-	}
+-}
+-
+-static int at803x_set_tunable(struct phy_device *phydev,
+-			      struct ethtool_tunable *tuna, const void *data)
+-{
+-	switch (tuna->id) {
+-	case ETHTOOL_PHY_DOWNSHIFT:
+-		return at803x_set_downshift(phydev, *(const u8 *)data);
+-	default:
+-		return -EOPNOTSUPP;
+-	}
+-}
+-
+ static int at803x_cable_test_result_trans(u16 status)
+ {
+ 	switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
+@@ -1170,45 +785,6 @@ static bool at803x_cdt_fault_length_vali
+ 	return false;
+ }
+ 
+-static int at803x_cdt_fault_length(int dt)
+-{
+-	/* According to the datasheet the distance to the fault is
+-	 * DELTA_TIME * 0.824 meters.
+-	 *
+-	 * The author suspect the correct formula is:
+-	 *
+-	 *   fault_distance = DELTA_TIME * (c * VF) / 125MHz / 2
+-	 *
+-	 * where c is the speed of light, VF is the velocity factor of
+-	 * the twisted pair cable, 125MHz the counter frequency and
+-	 * we need to divide by 2 because the hardware will measure the
+-	 * round trip time to the fault and back to the PHY.
+-	 *
+-	 * With a VF of 0.69 we get the factor 0.824 mentioned in the
+-	 * datasheet.
+-	 */
+-	return (dt * 824) / 10;
+-}
+-
+-static int at803x_cdt_start(struct phy_device *phydev,
+-			    u32 cdt_start)
+-{
+-	return phy_write(phydev, AT803X_CDT, cdt_start);
+-}
+-
+-static int at803x_cdt_wait_for_completion(struct phy_device *phydev,
+-					  u32 cdt_en)
+-{
+-	int val, ret;
+-
+-	/* One test run takes about 25ms */
+-	ret = phy_read_poll_timeout(phydev, AT803X_CDT, val,
+-				    !(val & cdt_en),
+-				    30000, 100000, true);
+-
+-	return ret < 0 ? ret : 0;
+-}
+-
+ static int at803x_cable_test_one_pair(struct phy_device *phydev, int pair)
+ {
+ 	static const int ethtool_pair[] = {
+--- a/drivers/net/phy/qcom/qcom-phy-lib.c
++++ b/drivers/net/phy/qcom/qcom-phy-lib.c
+@@ -3,6 +3,9 @@
+ #include <linux/phy.h>
+ #include <linux/module.h>
+ 
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++
+ #include "qcom.h"
+ 
+ MODULE_DESCRIPTION("Qualcomm PHY driver Common Functions");
+@@ -51,3 +54,376 @@ int at803x_debug_reg_write(struct phy_de
+ 	return phy_write(phydev, AT803X_DEBUG_DATA, data);
+ }
+ EXPORT_SYMBOL_GPL(at803x_debug_reg_write);
++
++int at803x_set_wol(struct phy_device *phydev,
++		   struct ethtool_wolinfo *wol)
++{
++	int ret, irq_enabled;
++
++	if (wol->wolopts & WAKE_MAGIC) {
++		struct net_device *ndev = phydev->attached_dev;
++		const u8 *mac;
++		unsigned int i;
++		static const unsigned int offsets[] = {
++			AT803X_LOC_MAC_ADDR_32_47_OFFSET,
++			AT803X_LOC_MAC_ADDR_16_31_OFFSET,
++			AT803X_LOC_MAC_ADDR_0_15_OFFSET,
++		};
++
++		if (!ndev)
++			return -ENODEV;
++
++		mac = (const u8 *)ndev->dev_addr;
++
++		if (!is_valid_ether_addr(mac))
++			return -EINVAL;
++
++		for (i = 0; i < 3; i++)
++			phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
++				      mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
++
++		/* Enable WOL interrupt */
++		ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
++		if (ret)
++			return ret;
++	} else {
++		/* Disable WOL interrupt */
++		ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
++		if (ret)
++			return ret;
++	}
++
++	/* Clear WOL status */
++	ret = phy_read(phydev, AT803X_INTR_STATUS);
++	if (ret < 0)
++		return ret;
++
++	/* Check if there are other interrupts except for WOL triggered when PHY is
++	 * in interrupt mode, only the interrupts enabled by AT803X_INTR_ENABLE can
++	 * be passed up to the interrupt PIN.
++	 */
++	irq_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
++	if (irq_enabled < 0)
++		return irq_enabled;
++
++	irq_enabled &= ~AT803X_INTR_ENABLE_WOL;
++	if (ret & irq_enabled && !phy_polling_mode(phydev))
++		phy_trigger_machine(phydev);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(at803x_set_wol);
++
++void at803x_get_wol(struct phy_device *phydev,
++		    struct ethtool_wolinfo *wol)
++{
++	int value;
++
++	wol->supported = WAKE_MAGIC;
++	wol->wolopts = 0;
++
++	value = phy_read(phydev, AT803X_INTR_ENABLE);
++	if (value < 0)
++		return;
++
++	if (value & AT803X_INTR_ENABLE_WOL)
++		wol->wolopts |= WAKE_MAGIC;
++}
++EXPORT_SYMBOL_GPL(at803x_get_wol);
++
++int at803x_ack_interrupt(struct phy_device *phydev)
++{
++	int err;
++
++	err = phy_read(phydev, AT803X_INTR_STATUS);
++
++	return (err < 0) ? err : 0;
++}
++EXPORT_SYMBOL_GPL(at803x_ack_interrupt);
++
++int at803x_config_intr(struct phy_device *phydev)
++{
++	int err;
++	int value;
++
++	value = phy_read(phydev, AT803X_INTR_ENABLE);
++
++	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
++		/* Clear any pending interrupts */
++		err = at803x_ack_interrupt(phydev);
++		if (err)
++			return err;
++
++		value |= AT803X_INTR_ENABLE_AUTONEG_ERR;
++		value |= AT803X_INTR_ENABLE_SPEED_CHANGED;
++		value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
++		value |= AT803X_INTR_ENABLE_LINK_FAIL;
++		value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
++
++		err = phy_write(phydev, AT803X_INTR_ENABLE, value);
++	} else {
++		err = phy_write(phydev, AT803X_INTR_ENABLE, 0);
++		if (err)
++			return err;
++
++		/* Clear any pending interrupts */
++		err = at803x_ack_interrupt(phydev);
++	}
++
++	return err;
++}
++EXPORT_SYMBOL_GPL(at803x_config_intr);
++
++irqreturn_t at803x_handle_interrupt(struct phy_device *phydev)
++{
++	int irq_status, int_enabled;
++
++	irq_status = phy_read(phydev, AT803X_INTR_STATUS);
++	if (irq_status < 0) {
++		phy_error(phydev);
++		return IRQ_NONE;
++	}
++
++	/* Read the current enabled interrupts */
++	int_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
++	if (int_enabled < 0) {
++		phy_error(phydev);
++		return IRQ_NONE;
++	}
++
++	/* See if this was one of our enabled interrupts */
++	if (!(irq_status & int_enabled))
++		return IRQ_NONE;
++
++	phy_trigger_machine(phydev);
++
++	return IRQ_HANDLED;
++}
++EXPORT_SYMBOL_GPL(at803x_handle_interrupt);
++
++int at803x_read_specific_status(struct phy_device *phydev,
++				struct at803x_ss_mask ss_mask)
++{
++	int ss;
++
++	/* Read the AT8035 PHY-Specific Status register, which indicates the
++	 * speed and duplex that the PHY is actually using, irrespective of
++	 * whether we are in autoneg mode or not.
++	 */
++	ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
++	if (ss < 0)
++		return ss;
++
++	if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
++		int sfc, speed;
++
++		sfc = phy_read(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL);
++		if (sfc < 0)
++			return sfc;
++
++		speed = ss & ss_mask.speed_mask;
++		speed >>= ss_mask.speed_shift;
++
++		switch (speed) {
++		case AT803X_SS_SPEED_10:
++			phydev->speed = SPEED_10;
++			break;
++		case AT803X_SS_SPEED_100:
++			phydev->speed = SPEED_100;
++			break;
++		case AT803X_SS_SPEED_1000:
++			phydev->speed = SPEED_1000;
++			break;
++		case QCA808X_SS_SPEED_2500:
++			phydev->speed = SPEED_2500;
++			break;
++		}
++		if (ss & AT803X_SS_DUPLEX)
++			phydev->duplex = DUPLEX_FULL;
++		else
++			phydev->duplex = DUPLEX_HALF;
++
++		if (ss & AT803X_SS_MDIX)
++			phydev->mdix = ETH_TP_MDI_X;
++		else
++			phydev->mdix = ETH_TP_MDI;
++
++		switch (FIELD_GET(AT803X_SFC_MDI_CROSSOVER_MODE_M, sfc)) {
++		case AT803X_SFC_MANUAL_MDI:
++			phydev->mdix_ctrl = ETH_TP_MDI;
++			break;
++		case AT803X_SFC_MANUAL_MDIX:
++			phydev->mdix_ctrl = ETH_TP_MDI_X;
++			break;
++		case AT803X_SFC_AUTOMATIC_CROSSOVER:
++			phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
++			break;
++		}
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(at803x_read_specific_status);
++
++int at803x_config_mdix(struct phy_device *phydev, u8 ctrl)
++{
++	u16 val;
++
++	switch (ctrl) {
++	case ETH_TP_MDI:
++		val = AT803X_SFC_MANUAL_MDI;
++		break;
++	case ETH_TP_MDI_X:
++		val = AT803X_SFC_MANUAL_MDIX;
++		break;
++	case ETH_TP_MDI_AUTO:
++		val = AT803X_SFC_AUTOMATIC_CROSSOVER;
++		break;
++	default:
++		return 0;
++	}
++
++	return phy_modify_changed(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL,
++			  AT803X_SFC_MDI_CROSSOVER_MODE_M,
++			  FIELD_PREP(AT803X_SFC_MDI_CROSSOVER_MODE_M, val));
++}
++EXPORT_SYMBOL_GPL(at803x_config_mdix);
++
++int at803x_prepare_config_aneg(struct phy_device *phydev)
++{
++	int ret;
++
++	ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
++	if (ret < 0)
++		return ret;
++
++	/* Changes of the midx bits are disruptive to the normal operation;
++	 * therefore any changes to these registers must be followed by a
++	 * software reset to take effect.
++	 */
++	if (ret == 1) {
++		ret = genphy_soft_reset(phydev);
++		if (ret < 0)
++			return ret;
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(at803x_prepare_config_aneg);
++
++static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
++{
++	int val;
++
++	val = phy_read(phydev, AT803X_SMART_SPEED);
++	if (val < 0)
++		return val;
++
++	if (val & AT803X_SMART_SPEED_ENABLE)
++		*d = FIELD_GET(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, val) + 2;
++	else
++		*d = DOWNSHIFT_DEV_DISABLE;
++
++	return 0;
++}
++
++static int at803x_set_downshift(struct phy_device *phydev, u8 cnt)
++{
++	u16 mask, set;
++	int ret;
++
++	switch (cnt) {
++	case DOWNSHIFT_DEV_DEFAULT_COUNT:
++		cnt = AT803X_DEFAULT_DOWNSHIFT;
++		fallthrough;
++	case AT803X_MIN_DOWNSHIFT ... AT803X_MAX_DOWNSHIFT:
++		set = AT803X_SMART_SPEED_ENABLE |
++		      AT803X_SMART_SPEED_BYPASS_TIMER |
++		      FIELD_PREP(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, cnt - 2);
++		mask = AT803X_SMART_SPEED_RETRY_LIMIT_MASK;
++		break;
++	case DOWNSHIFT_DEV_DISABLE:
++		set = 0;
++		mask = AT803X_SMART_SPEED_ENABLE |
++		       AT803X_SMART_SPEED_BYPASS_TIMER;
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	ret = phy_modify_changed(phydev, AT803X_SMART_SPEED, mask, set);
++
++	/* After changing the smart speed settings, we need to perform a
++	 * software reset, use phy_init_hw() to make sure we set the
++	 * reapply any values which might got lost during software reset.
++	 */
++	if (ret == 1)
++		ret = phy_init_hw(phydev);
++
++	return ret;
++}
++
++int at803x_get_tunable(struct phy_device *phydev,
++		       struct ethtool_tunable *tuna, void *data)
++{
++	switch (tuna->id) {
++	case ETHTOOL_PHY_DOWNSHIFT:
++		return at803x_get_downshift(phydev, data);
++	default:
++		return -EOPNOTSUPP;
++	}
++}
++EXPORT_SYMBOL_GPL(at803x_get_tunable);
++
++int at803x_set_tunable(struct phy_device *phydev,
++		       struct ethtool_tunable *tuna, const void *data)
++{
++	switch (tuna->id) {
++	case ETHTOOL_PHY_DOWNSHIFT:
++		return at803x_set_downshift(phydev, *(const u8 *)data);
++	default:
++		return -EOPNOTSUPP;
++	}
++}
++EXPORT_SYMBOL_GPL(at803x_set_tunable);
++
++int at803x_cdt_fault_length(int dt)
++{
++	/* According to the datasheet the distance to the fault is
++	 * DELTA_TIME * 0.824 meters.
++	 *
++	 * The author suspect the correct formula is:
++	 *
++	 *   fault_distance = DELTA_TIME * (c * VF) / 125MHz / 2
++	 *
++	 * where c is the speed of light, VF is the velocity factor of
++	 * the twisted pair cable, 125MHz the counter frequency and
++	 * we need to divide by 2 because the hardware will measure the
++	 * round trip time to the fault and back to the PHY.
++	 *
++	 * With a VF of 0.69 we get the factor 0.824 mentioned in the
++	 * datasheet.
++	 */
++	return (dt * 824) / 10;
++}
++EXPORT_SYMBOL_GPL(at803x_cdt_fault_length);
++
++int at803x_cdt_start(struct phy_device *phydev, u32 cdt_start)
++{
++	return phy_write(phydev, AT803X_CDT, cdt_start);
++}
++EXPORT_SYMBOL_GPL(at803x_cdt_start);
++
++int at803x_cdt_wait_for_completion(struct phy_device *phydev,
++				   u32 cdt_en)
++{
++	int val, ret;
++
++	/* One test run takes about 25ms */
++	ret = phy_read_poll_timeout(phydev, AT803X_CDT, val,
++				    !(val & cdt_en),
++				    30000, 100000, true);
++
++	return ret < 0 ? ret : 0;
++}
++EXPORT_SYMBOL_GPL(at803x_cdt_wait_for_completion);
+--- a/drivers/net/phy/qcom/qcom.h
++++ b/drivers/net/phy/qcom/qcom.h
+@@ -1,5 +1,63 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ 
++#define AT803X_SPECIFIC_FUNCTION_CONTROL	0x10
++#define AT803X_SFC_ASSERT_CRS			BIT(11)
++#define AT803X_SFC_FORCE_LINK			BIT(10)
++#define AT803X_SFC_MDI_CROSSOVER_MODE_M		GENMASK(6, 5)
++#define AT803X_SFC_AUTOMATIC_CROSSOVER		0x3
++#define AT803X_SFC_MANUAL_MDIX			0x1
++#define AT803X_SFC_MANUAL_MDI			0x0
++#define AT803X_SFC_SQE_TEST			BIT(2)
++#define AT803X_SFC_POLARITY_REVERSAL		BIT(1)
++#define AT803X_SFC_DISABLE_JABBER		BIT(0)
++
++#define AT803X_SPECIFIC_STATUS			0x11
++#define AT803X_SS_SPEED_MASK			GENMASK(15, 14)
++#define AT803X_SS_SPEED_1000			2
++#define AT803X_SS_SPEED_100			1
++#define AT803X_SS_SPEED_10			0
++#define AT803X_SS_DUPLEX			BIT(13)
++#define AT803X_SS_SPEED_DUPLEX_RESOLVED		BIT(11)
++#define AT803X_SS_MDIX				BIT(6)
++
++#define QCA808X_SS_SPEED_MASK			GENMASK(9, 7)
++#define QCA808X_SS_SPEED_2500			4
++
++#define AT803X_INTR_ENABLE			0x12
++#define AT803X_INTR_ENABLE_AUTONEG_ERR		BIT(15)
++#define AT803X_INTR_ENABLE_SPEED_CHANGED	BIT(14)
++#define AT803X_INTR_ENABLE_DUPLEX_CHANGED	BIT(13)
++#define AT803X_INTR_ENABLE_PAGE_RECEIVED	BIT(12)
++#define AT803X_INTR_ENABLE_LINK_FAIL		BIT(11)
++#define AT803X_INTR_ENABLE_LINK_SUCCESS		BIT(10)
++#define AT803X_INTR_ENABLE_LINK_FAIL_BX		BIT(8)
++#define AT803X_INTR_ENABLE_LINK_SUCCESS_BX	BIT(7)
++#define AT803X_INTR_ENABLE_WIRESPEED_DOWNGRADE	BIT(5)
++#define AT803X_INTR_ENABLE_POLARITY_CHANGED	BIT(1)
++#define AT803X_INTR_ENABLE_WOL			BIT(0)
++
++#define AT803X_INTR_STATUS			0x13
++
++#define AT803X_SMART_SPEED			0x14
++#define AT803X_SMART_SPEED_ENABLE		BIT(5)
++#define AT803X_SMART_SPEED_RETRY_LIMIT_MASK	GENMASK(4, 2)
++#define AT803X_SMART_SPEED_BYPASS_TIMER		BIT(1)
++
++#define AT803X_CDT				0x16
++#define AT803X_CDT_MDI_PAIR_MASK		GENMASK(9, 8)
++#define AT803X_CDT_ENABLE_TEST			BIT(0)
++#define AT803X_CDT_STATUS			0x1c
++#define AT803X_CDT_STATUS_STAT_NORMAL		0
++#define AT803X_CDT_STATUS_STAT_SHORT		1
++#define AT803X_CDT_STATUS_STAT_OPEN		2
++#define AT803X_CDT_STATUS_STAT_FAIL		3
++#define AT803X_CDT_STATUS_STAT_MASK		GENMASK(9, 8)
++#define AT803X_CDT_STATUS_DELTA_TIME_MASK	GENMASK(7, 0)
++
++#define AT803X_LOC_MAC_ADDR_0_15_OFFSET		0x804C
++#define AT803X_LOC_MAC_ADDR_16_31_OFFSET	0x804B
++#define AT803X_LOC_MAC_ADDR_32_47_OFFSET	0x804A
++
+ #define AT803X_DEBUG_ADDR			0x1D
+ #define AT803X_DEBUG_DATA			0x1E
+ 
+@@ -16,6 +74,10 @@
+ #define   AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE	BIT(13)
+ #define   AT803X_DEBUG_HIB_CTRL_PS_HIB_EN	BIT(15)
+ 
++#define AT803X_DEFAULT_DOWNSHIFT		5
++#define AT803X_MIN_DOWNSHIFT			2
++#define AT803X_MAX_DOWNSHIFT			9
++
+ enum stat_access_type {
+ 	PHY,
+ 	MMD
+@@ -28,7 +90,31 @@ struct at803x_hw_stat {
+ 	enum stat_access_type access_type;
+ };
+ 
++struct at803x_ss_mask {
++	u16 speed_mask;
++	u8 speed_shift;
++};
++
+ int at803x_debug_reg_read(struct phy_device *phydev, u16 reg);
+ int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
+ 			  u16 clear, u16 set);
+ int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data);
++int at803x_set_wol(struct phy_device *phydev,
++		   struct ethtool_wolinfo *wol);
++void at803x_get_wol(struct phy_device *phydev,
++		    struct ethtool_wolinfo *wol);
++int at803x_ack_interrupt(struct phy_device *phydev);
++int at803x_config_intr(struct phy_device *phydev);
++irqreturn_t at803x_handle_interrupt(struct phy_device *phydev);
++int at803x_read_specific_status(struct phy_device *phydev,
++				struct at803x_ss_mask ss_mask);
++int at803x_config_mdix(struct phy_device *phydev, u8 ctrl);
++int at803x_prepare_config_aneg(struct phy_device *phydev);
++int at803x_get_tunable(struct phy_device *phydev,
++		       struct ethtool_tunable *tuna, void *data);
++int at803x_set_tunable(struct phy_device *phydev,
++		       struct ethtool_tunable *tuna, const void *data);
++int at803x_cdt_fault_length(int dt);
++int at803x_cdt_start(struct phy_device *phydev, u32 cdt_start);
++int at803x_cdt_wait_for_completion(struct phy_device *phydev,
++				   u32 cdt_en);

+ 1936 - 0
target/linux/generic/backport-6.6/713-v6.9-05-net-phy-qcom-detach-qca808x-PHY-driver-from-at803x.patch

@@ -0,0 +1,1936 @@
+From c89414adf2ec7cd9e7080c419aa5847f1db1009c Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Mon, 29 Jan 2024 15:15:23 +0100
+Subject: [PATCH 5/5] net: phy: qcom: detach qca808x PHY driver from at803x
+
+Almost all the QCA8081 PHY driver OPs are specific and only some of them
+use the generic at803x.
+
+To make the at803x code slimmer, move all the specific qca808x regs and
+functions to a dedicated PHY driver.
+
+Probe function and priv struct is reworked to allocate and use only the
+qca808x specific data. Unused data from at803x PHY driver are dropped
+from at803x priv struct.
+
+Also a new Kconfig is introduced QCA808X_PHY, to compile the newly
+introduced PHY driver for QCA8081 PHY.
+
+As the Kconfig name starts with Qualcomm the same order is kept.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Link: https://lore.kernel.org/r/[email protected]
+Signed-off-by: Jakub Kicinski <[email protected]>
+---
+ drivers/net/phy/qcom/Kconfig   |   6 +
+ drivers/net/phy/qcom/Makefile  |   1 +
+ drivers/net/phy/qcom/at803x.c  | 897 +------------------------------
+ drivers/net/phy/qcom/qca808x.c | 934 +++++++++++++++++++++++++++++++++
+ 4 files changed, 942 insertions(+), 896 deletions(-)
+ create mode 100644 drivers/net/phy/qcom/qca808x.c
+
+--- a/drivers/net/phy/qcom/Kconfig
++++ b/drivers/net/phy/qcom/Kconfig
+@@ -14,3 +14,9 @@ config QCA83XX_PHY
+ 	select QCOM_NET_PHYLIB
+ 	help
+ 	  Currently supports the internal QCA8337(Internal qca8k PHY) model
++
++config QCA808X_PHY
++	tristate "Qualcomm QCA808x PHYs"
++	select QCOM_NET_PHYLIB
++	help
++	  Currently supports the QCA8081 model
+--- a/drivers/net/phy/qcom/Makefile
++++ b/drivers/net/phy/qcom/Makefile
+@@ -2,3 +2,4 @@
+ obj-$(CONFIG_QCOM_NET_PHYLIB)	+= qcom-phy-lib.o
+ obj-$(CONFIG_AT803X_PHY)	+= at803x.o
+ obj-$(CONFIG_QCA83XX_PHY)	+= qca83xx.o
++obj-$(CONFIG_QCA808X_PHY)	+= qca808x.o
+--- a/drivers/net/phy/qcom/at803x.c
++++ b/drivers/net/phy/qcom/at803x.c
+@@ -96,8 +96,6 @@
+ #define ATH8035_PHY_ID				0x004dd072
+ #define AT8030_PHY_ID_MASK			0xffffffef
+ 
+-#define QCA8081_PHY_ID				0x004dd101
+-
+ #define QCA9561_PHY_ID				0x004dd042
+ 
+ #define AT803X_PAGE_FIBER			0
+@@ -110,201 +108,7 @@
+ /* disable hibernation mode */
+ #define AT803X_DISABLE_HIBERNATION_MODE		BIT(2)
+ 
+-/* ADC threshold */
+-#define QCA808X_PHY_DEBUG_ADC_THRESHOLD		0x2c80
+-#define QCA808X_ADC_THRESHOLD_MASK		GENMASK(7, 0)
+-#define QCA808X_ADC_THRESHOLD_80MV		0
+-#define QCA808X_ADC_THRESHOLD_100MV		0xf0
+-#define QCA808X_ADC_THRESHOLD_200MV		0x0f
+-#define QCA808X_ADC_THRESHOLD_300MV		0xff
+-
+-/* CLD control */
+-#define QCA808X_PHY_MMD3_ADDR_CLD_CTRL7		0x8007
+-#define QCA808X_8023AZ_AFE_CTRL_MASK		GENMASK(8, 4)
+-#define QCA808X_8023AZ_AFE_EN			0x90
+-
+-/* AZ control */
+-#define QCA808X_PHY_MMD3_AZ_TRAINING_CTRL	0x8008
+-#define QCA808X_MMD3_AZ_TRAINING_VAL		0x1c32
+-
+-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB	0x8014
+-#define QCA808X_MSE_THRESHOLD_20DB_VALUE	0x529
+-
+-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB	0x800E
+-#define QCA808X_MSE_THRESHOLD_17DB_VALUE	0x341
+-
+-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB	0x801E
+-#define QCA808X_MSE_THRESHOLD_27DB_VALUE	0x419
+-
+-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB	0x8020
+-#define QCA808X_MSE_THRESHOLD_28DB_VALUE	0x341
+-
+-#define QCA808X_PHY_MMD7_TOP_OPTION1		0x901c
+-#define QCA808X_TOP_OPTION1_DATA		0x0
+-
+-#define QCA808X_PHY_MMD3_DEBUG_1		0xa100
+-#define QCA808X_MMD3_DEBUG_1_VALUE		0x9203
+-#define QCA808X_PHY_MMD3_DEBUG_2		0xa101
+-#define QCA808X_MMD3_DEBUG_2_VALUE		0x48ad
+-#define QCA808X_PHY_MMD3_DEBUG_3		0xa103
+-#define QCA808X_MMD3_DEBUG_3_VALUE		0x1698
+-#define QCA808X_PHY_MMD3_DEBUG_4		0xa105
+-#define QCA808X_MMD3_DEBUG_4_VALUE		0x8001
+-#define QCA808X_PHY_MMD3_DEBUG_5		0xa106
+-#define QCA808X_MMD3_DEBUG_5_VALUE		0x1111
+-#define QCA808X_PHY_MMD3_DEBUG_6		0xa011
+-#define QCA808X_MMD3_DEBUG_6_VALUE		0x5f85
+-
+-/* master/slave seed config */
+-#define QCA808X_PHY_DEBUG_LOCAL_SEED		9
+-#define QCA808X_MASTER_SLAVE_SEED_ENABLE	BIT(1)
+-#define QCA808X_MASTER_SLAVE_SEED_CFG		GENMASK(12, 2)
+-#define QCA808X_MASTER_SLAVE_SEED_RANGE		0x32
+-
+-/* Hibernation yields lower power consumpiton in contrast with normal operation mode.
+- * when the copper cable is unplugged, the PHY enters into hibernation mode in about 10s.
+- */
+-#define QCA808X_DBG_AN_TEST			0xb
+-#define QCA808X_HIBERNATION_EN			BIT(15)
+-
+-#define QCA808X_CDT_ENABLE_TEST			BIT(15)
+-#define QCA808X_CDT_INTER_CHECK_DIS		BIT(13)
+-#define QCA808X_CDT_STATUS			BIT(11)
+-#define QCA808X_CDT_LENGTH_UNIT			BIT(10)
+-
+-#define QCA808X_MMD3_CDT_STATUS			0x8064
+-#define QCA808X_MMD3_CDT_DIAG_PAIR_A		0x8065
+-#define QCA808X_MMD3_CDT_DIAG_PAIR_B		0x8066
+-#define QCA808X_MMD3_CDT_DIAG_PAIR_C		0x8067
+-#define QCA808X_MMD3_CDT_DIAG_PAIR_D		0x8068
+-#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT	GENMASK(15, 8)
+-#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT	GENMASK(7, 0)
+-
+-#define QCA808X_CDT_CODE_PAIR_A			GENMASK(15, 12)
+-#define QCA808X_CDT_CODE_PAIR_B			GENMASK(11, 8)
+-#define QCA808X_CDT_CODE_PAIR_C			GENMASK(7, 4)
+-#define QCA808X_CDT_CODE_PAIR_D			GENMASK(3, 0)
+-
+-#define QCA808X_CDT_STATUS_STAT_TYPE		GENMASK(1, 0)
+-#define QCA808X_CDT_STATUS_STAT_FAIL		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
+-#define QCA808X_CDT_STATUS_STAT_NORMAL		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
+-#define QCA808X_CDT_STATUS_STAT_SAME_OPEN	FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
+-#define QCA808X_CDT_STATUS_STAT_SAME_SHORT	FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
+-
+-#define QCA808X_CDT_STATUS_STAT_MDI		GENMASK(3, 2)
+-#define QCA808X_CDT_STATUS_STAT_MDI1		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
+-#define QCA808X_CDT_STATUS_STAT_MDI2		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
+-#define QCA808X_CDT_STATUS_STAT_MDI3		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
+-
+-/* NORMAL are MDI with type set to 0 */
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI1
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+-									 QCA808X_CDT_STATUS_STAT_MDI1)
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+-									 QCA808X_CDT_STATUS_STAT_MDI1)
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI2
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+-									 QCA808X_CDT_STATUS_STAT_MDI2)
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+-									 QCA808X_CDT_STATUS_STAT_MDI2)
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI3
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+-									 QCA808X_CDT_STATUS_STAT_MDI3)
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+-									 QCA808X_CDT_STATUS_STAT_MDI3)
+-
+-/* Added for reference of existence but should be handled by wait_for_completion already */
+-#define QCA808X_CDT_STATUS_STAT_BUSY		(BIT(1) | BIT(3))
+-
+-#define QCA808X_MMD7_LED_GLOBAL			0x8073
+-#define QCA808X_LED_BLINK_1			GENMASK(11, 6)
+-#define QCA808X_LED_BLINK_2			GENMASK(5, 0)
+-/* Values are the same for both BLINK_1 and BLINK_2 */
+-#define QCA808X_LED_BLINK_FREQ_MASK		GENMASK(5, 3)
+-#define QCA808X_LED_BLINK_FREQ_2HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x0)
+-#define QCA808X_LED_BLINK_FREQ_4HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x1)
+-#define QCA808X_LED_BLINK_FREQ_8HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x2)
+-#define QCA808X_LED_BLINK_FREQ_16HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x3)
+-#define QCA808X_LED_BLINK_FREQ_32HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x4)
+-#define QCA808X_LED_BLINK_FREQ_64HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x5)
+-#define QCA808X_LED_BLINK_FREQ_128HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x6)
+-#define QCA808X_LED_BLINK_FREQ_256HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x7)
+-#define QCA808X_LED_BLINK_DUTY_MASK		GENMASK(2, 0)
+-#define QCA808X_LED_BLINK_DUTY_50_50		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x0)
+-#define QCA808X_LED_BLINK_DUTY_75_25		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x1)
+-#define QCA808X_LED_BLINK_DUTY_25_75		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x2)
+-#define QCA808X_LED_BLINK_DUTY_33_67		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x3)
+-#define QCA808X_LED_BLINK_DUTY_67_33		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x4)
+-#define QCA808X_LED_BLINK_DUTY_17_83		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x5)
+-#define QCA808X_LED_BLINK_DUTY_83_17		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x6)
+-#define QCA808X_LED_BLINK_DUTY_8_92		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x7)
+-
+-#define QCA808X_MMD7_LED2_CTRL			0x8074
+-#define QCA808X_MMD7_LED2_FORCE_CTRL		0x8075
+-#define QCA808X_MMD7_LED1_CTRL			0x8076
+-#define QCA808X_MMD7_LED1_FORCE_CTRL		0x8077
+-#define QCA808X_MMD7_LED0_CTRL			0x8078
+-#define QCA808X_MMD7_LED_CTRL(x)		(0x8078 - ((x) * 2))
+-
+-/* LED hw control pattern is the same for every LED */
+-#define QCA808X_LED_PATTERN_MASK		GENMASK(15, 0)
+-#define QCA808X_LED_SPEED2500_ON		BIT(15)
+-#define QCA808X_LED_SPEED2500_BLINK		BIT(14)
+-/* Follow blink trigger even if duplex or speed condition doesn't match */
+-#define QCA808X_LED_BLINK_CHECK_BYPASS		BIT(13)
+-#define QCA808X_LED_FULL_DUPLEX_ON		BIT(12)
+-#define QCA808X_LED_HALF_DUPLEX_ON		BIT(11)
+-#define QCA808X_LED_TX_BLINK			BIT(10)
+-#define QCA808X_LED_RX_BLINK			BIT(9)
+-#define QCA808X_LED_TX_ON_10MS			BIT(8)
+-#define QCA808X_LED_RX_ON_10MS			BIT(7)
+-#define QCA808X_LED_SPEED1000_ON		BIT(6)
+-#define QCA808X_LED_SPEED100_ON			BIT(5)
+-#define QCA808X_LED_SPEED10_ON			BIT(4)
+-#define QCA808X_LED_COLLISION_BLINK		BIT(3)
+-#define QCA808X_LED_SPEED1000_BLINK		BIT(2)
+-#define QCA808X_LED_SPEED100_BLINK		BIT(1)
+-#define QCA808X_LED_SPEED10_BLINK		BIT(0)
+-
+-#define QCA808X_MMD7_LED0_FORCE_CTRL		0x8079
+-#define QCA808X_MMD7_LED_FORCE_CTRL(x)		(0x8079 - ((x) * 2))
+-
+-/* LED force ctrl is the same for every LED
+- * No documentation exist for this, not even internal one
+- * with NDA as QCOM gives only info about configuring
+- * hw control pattern rules and doesn't indicate any way
+- * to force the LED to specific mode.
+- * These define comes from reverse and testing and maybe
+- * lack of some info or some info are not entirely correct.
+- * For the basic LED control and hw control these finding
+- * are enough to support LED control in all the required APIs.
+- *
+- * On doing some comparison with implementation with qca807x,
+- * it was found that it's 1:1 equal to it and confirms all the
+- * reverse done. It was also found further specification with the
+- * force mode and the blink modes.
+- */
+-#define QCA808X_LED_FORCE_EN			BIT(15)
+-#define QCA808X_LED_FORCE_MODE_MASK		GENMASK(14, 13)
+-#define QCA808X_LED_FORCE_BLINK_1		FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x3)
+-#define QCA808X_LED_FORCE_BLINK_2		FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x2)
+-#define QCA808X_LED_FORCE_ON			FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x1)
+-#define QCA808X_LED_FORCE_OFF			FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x0)
+-
+-#define QCA808X_MMD7_LED_POLARITY_CTRL		0x901a
+-/* QSDK sets by default 0x46 to this reg that sets BIT 6 for
+- * LED to active high. It's not clear what BIT 3 and BIT 4 does.
+- */
+-#define QCA808X_LED_ACTIVE_HIGH			BIT(6)
+-
+-/* QCA808X 1G chip type */
+-#define QCA808X_PHY_MMD7_CHIP_TYPE		0x901d
+-#define QCA808X_PHY_CHIP_TYPE_1G		BIT(0)
+-
+-#define QCA8081_PHY_SERDES_MMD1_FIFO_CTRL	0x9072
+-#define QCA8081_PHY_FIFO_RSTN			BIT(11)
+-
+-MODULE_DESCRIPTION("Qualcomm Atheros AR803x and QCA808X PHY driver");
++MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver");
+ MODULE_AUTHOR("Matus Ujhelyi");
+ MODULE_LICENSE("GPL");
+ 
+@@ -318,7 +122,6 @@ struct at803x_priv {
+ 	bool is_1000basex;
+ 	struct regulator_dev *vddio_rdev;
+ 	struct regulator_dev *vddh_rdev;
+-	int led_polarity_mode;
+ };
+ 
+ struct at803x_context {
+@@ -519,9 +322,6 @@ static int at803x_probe(struct phy_devic
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
+-	/* Init LED polarity mode to -1 */
+-	priv->led_polarity_mode = -1;
+-
+ 	phydev->priv = priv;
+ 
+ 	ret = at803x_parse_dt(phydev);
+@@ -1216,672 +1016,6 @@ static int at8035_probe(struct phy_devic
+ 	return at8035_parse_dt(phydev);
+ }
+ 
+-static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
+-{
+-	int ret;
+-
+-	/* Enable fast retrain */
+-	ret = genphy_c45_fast_retrain(phydev, true);
+-	if (ret)
+-		return ret;
+-
+-	phy_write_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_TOP_OPTION1,
+-		      QCA808X_TOP_OPTION1_DATA);
+-	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB,
+-		      QCA808X_MSE_THRESHOLD_20DB_VALUE);
+-	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB,
+-		      QCA808X_MSE_THRESHOLD_17DB_VALUE);
+-	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB,
+-		      QCA808X_MSE_THRESHOLD_27DB_VALUE);
+-	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB,
+-		      QCA808X_MSE_THRESHOLD_28DB_VALUE);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_1,
+-		      QCA808X_MMD3_DEBUG_1_VALUE);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_4,
+-		      QCA808X_MMD3_DEBUG_4_VALUE);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_5,
+-		      QCA808X_MMD3_DEBUG_5_VALUE);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_3,
+-		      QCA808X_MMD3_DEBUG_3_VALUE);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_6,
+-		      QCA808X_MMD3_DEBUG_6_VALUE);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_2,
+-		      QCA808X_MMD3_DEBUG_2_VALUE);
+-
+-	return 0;
+-}
+-
+-static int qca808x_phy_ms_seed_enable(struct phy_device *phydev, bool enable)
+-{
+-	u16 seed_value;
+-
+-	if (!enable)
+-		return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
+-				QCA808X_MASTER_SLAVE_SEED_ENABLE, 0);
+-
+-	seed_value = get_random_u32_below(QCA808X_MASTER_SLAVE_SEED_RANGE);
+-	return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
+-			QCA808X_MASTER_SLAVE_SEED_CFG | QCA808X_MASTER_SLAVE_SEED_ENABLE,
+-			FIELD_PREP(QCA808X_MASTER_SLAVE_SEED_CFG, seed_value) |
+-			QCA808X_MASTER_SLAVE_SEED_ENABLE);
+-}
+-
+-static bool qca808x_is_prefer_master(struct phy_device *phydev)
+-{
+-	return (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_FORCE) ||
+-		(phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_PREFERRED);
+-}
+-
+-static bool qca808x_has_fast_retrain_or_slave_seed(struct phy_device *phydev)
+-{
+-	return linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
+-}
+-
+-static int qca808x_config_init(struct phy_device *phydev)
+-{
+-	int ret;
+-
+-	/* Active adc&vga on 802.3az for the link 1000M and 100M */
+-	ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7,
+-			     QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
+-	if (ret)
+-		return ret;
+-
+-	/* Adjust the threshold on 802.3az for the link 1000M */
+-	ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+-			    QCA808X_PHY_MMD3_AZ_TRAINING_CTRL,
+-			    QCA808X_MMD3_AZ_TRAINING_VAL);
+-	if (ret)
+-		return ret;
+-
+-	if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
+-		/* Config the fast retrain for the link 2500M */
+-		ret = qca808x_phy_fast_retrain_config(phydev);
+-		if (ret)
+-			return ret;
+-
+-		ret = genphy_read_master_slave(phydev);
+-		if (ret < 0)
+-			return ret;
+-
+-		if (!qca808x_is_prefer_master(phydev)) {
+-			/* Enable seed and configure lower ramdom seed to make phy
+-			 * linked as slave mode.
+-			 */
+-			ret = qca808x_phy_ms_seed_enable(phydev, true);
+-			if (ret)
+-				return ret;
+-		}
+-	}
+-
+-	/* Configure adc threshold as 100mv for the link 10M */
+-	return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD,
+-				     QCA808X_ADC_THRESHOLD_MASK,
+-				     QCA808X_ADC_THRESHOLD_100MV);
+-}
+-
+-static int qca808x_read_status(struct phy_device *phydev)
+-{
+-	struct at803x_ss_mask ss_mask = { 0 };
+-	int ret;
+-
+-	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
+-	if (ret < 0)
+-		return ret;
+-
+-	linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->lp_advertising,
+-			 ret & MDIO_AN_10GBT_STAT_LP2_5G);
+-
+-	ret = genphy_read_status(phydev);
+-	if (ret)
+-		return ret;
+-
+-	/* qca8081 takes the different bits for speed value from at803x */
+-	ss_mask.speed_mask = QCA808X_SS_SPEED_MASK;
+-	ss_mask.speed_shift = __bf_shf(QCA808X_SS_SPEED_MASK);
+-	ret = at803x_read_specific_status(phydev, ss_mask);
+-	if (ret < 0)
+-		return ret;
+-
+-	if (phydev->link) {
+-		if (phydev->speed == SPEED_2500)
+-			phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+-		else
+-			phydev->interface = PHY_INTERFACE_MODE_SGMII;
+-	} else {
+-		/* generate seed as a lower random value to make PHY linked as SLAVE easily,
+-		 * except for master/slave configuration fault detected or the master mode
+-		 * preferred.
+-		 *
+-		 * the reason for not putting this code into the function link_change_notify is
+-		 * the corner case where the link partner is also the qca8081 PHY and the seed
+-		 * value is configured as the same value, the link can't be up and no link change
+-		 * occurs.
+-		 */
+-		if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
+-			if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR ||
+-			    qca808x_is_prefer_master(phydev)) {
+-				qca808x_phy_ms_seed_enable(phydev, false);
+-			} else {
+-				qca808x_phy_ms_seed_enable(phydev, true);
+-			}
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+-static int qca808x_soft_reset(struct phy_device *phydev)
+-{
+-	int ret;
+-
+-	ret = genphy_soft_reset(phydev);
+-	if (ret < 0)
+-		return ret;
+-
+-	if (qca808x_has_fast_retrain_or_slave_seed(phydev))
+-		ret = qca808x_phy_ms_seed_enable(phydev, true);
+-
+-	return ret;
+-}
+-
+-static bool qca808x_cdt_fault_length_valid(int cdt_code)
+-{
+-	switch (cdt_code) {
+-	case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+-	case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+-		return true;
+-	default:
+-		return false;
+-	}
+-}
+-
+-static int qca808x_cable_test_result_trans(int cdt_code)
+-{
+-	switch (cdt_code) {
+-	case QCA808X_CDT_STATUS_STAT_NORMAL:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+-	case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+-	case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
+-	case QCA808X_CDT_STATUS_STAT_FAIL:
+-	default:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+-	}
+-}
+-
+-static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
+-				    int result)
+-{
+-	int val;
+-	u32 cdt_length_reg = 0;
+-
+-	switch (pair) {
+-	case ETHTOOL_A_CABLE_PAIR_A:
+-		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_A;
+-		break;
+-	case ETHTOOL_A_CABLE_PAIR_B:
+-		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_B;
+-		break;
+-	case ETHTOOL_A_CABLE_PAIR_C:
+-		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_C;
+-		break;
+-	case ETHTOOL_A_CABLE_PAIR_D:
+-		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_D;
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_PCS, cdt_length_reg);
+-	if (val < 0)
+-		return val;
+-
+-	if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
+-		val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
+-	else
+-		val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
+-
+-	return at803x_cdt_fault_length(val);
+-}
+-
+-static int qca808x_cable_test_start(struct phy_device *phydev)
+-{
+-	int ret;
+-
+-	/* perform CDT with the following configs:
+-	 * 1. disable hibernation.
+-	 * 2. force PHY working in MDI mode.
+-	 * 3. for PHY working in 1000BaseT.
+-	 * 4. configure the threshold.
+-	 */
+-
+-	ret = at803x_debug_reg_mask(phydev, QCA808X_DBG_AN_TEST, QCA808X_HIBERNATION_EN, 0);
+-	if (ret < 0)
+-		return ret;
+-
+-	ret = at803x_config_mdix(phydev, ETH_TP_MDI);
+-	if (ret < 0)
+-		return ret;
+-
+-	/* Force 1000base-T needs to configure PMA/PMD and MII_BMCR */
+-	phydev->duplex = DUPLEX_FULL;
+-	phydev->speed = SPEED_1000;
+-	ret = genphy_c45_pma_setup_forced(phydev);
+-	if (ret < 0)
+-		return ret;
+-
+-	ret = genphy_setup_forced(phydev);
+-	if (ret < 0)
+-		return ret;
+-
+-	/* configure the thresholds for open, short, pair ok test */
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8074, 0xc040);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8076, 0xc040);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8077, 0xa060);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8078, 0xc050);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807a, 0xc060);
+-	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807e, 0xb060);
+-
+-	return 0;
+-}
+-
+-static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
+-					      u16 status)
+-{
+-	int length, result;
+-	u16 pair_code;
+-
+-	switch (pair) {
+-	case ETHTOOL_A_CABLE_PAIR_A:
+-		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
+-		break;
+-	case ETHTOOL_A_CABLE_PAIR_B:
+-		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
+-		break;
+-	case ETHTOOL_A_CABLE_PAIR_C:
+-		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
+-		break;
+-	case ETHTOOL_A_CABLE_PAIR_D:
+-		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	result = qca808x_cable_test_result_trans(pair_code);
+-	ethnl_cable_test_result(phydev, pair, result);
+-
+-	if (qca808x_cdt_fault_length_valid(pair_code)) {
+-		length = qca808x_cdt_fault_length(phydev, pair, result);
+-		ethnl_cable_test_fault_length(phydev, pair, length);
+-	}
+-
+-	return 0;
+-}
+-
+-static int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
+-{
+-	int ret, val;
+-
+-	*finished = false;
+-
+-	val = QCA808X_CDT_ENABLE_TEST |
+-	      QCA808X_CDT_LENGTH_UNIT;
+-	ret = at803x_cdt_start(phydev, val);
+-	if (ret)
+-		return ret;
+-
+-	ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
+-	if (ret)
+-		return ret;
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA808X_MMD3_CDT_STATUS);
+-	if (val < 0)
+-		return val;
+-
+-	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
+-	if (ret)
+-		return ret;
+-
+-	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
+-	if (ret)
+-		return ret;
+-
+-	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
+-	if (ret)
+-		return ret;
+-
+-	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
+-	if (ret)
+-		return ret;
+-
+-	*finished = true;
+-
+-	return 0;
+-}
+-
+-static int qca808x_get_features(struct phy_device *phydev)
+-{
+-	int ret;
+-
+-	ret = genphy_c45_pma_read_abilities(phydev);
+-	if (ret)
+-		return ret;
+-
+-	/* The autoneg ability is not existed in bit3 of MMD7.1,
+-	 * but it is supported by qca808x PHY, so we add it here
+-	 * manually.
+-	 */
+-	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+-
+-	/* As for the qca8081 1G version chip, the 2500baseT ability is also
+-	 * existed in the bit0 of MMD1.21, we need to remove it manually if
+-	 * it is the qca8081 1G chip according to the bit0 of MMD7.0x901d.
+-	 */
+-	ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_CHIP_TYPE);
+-	if (ret < 0)
+-		return ret;
+-
+-	if (QCA808X_PHY_CHIP_TYPE_1G & ret)
+-		linkmode_clear_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
+-
+-	return 0;
+-}
+-
+-static int qca808x_config_aneg(struct phy_device *phydev)
+-{
+-	int phy_ctrl = 0;
+-	int ret;
+-
+-	ret = at803x_prepare_config_aneg(phydev);
+-	if (ret)
+-		return ret;
+-
+-	/* The reg MII_BMCR also needs to be configured for force mode, the
+-	 * genphy_config_aneg is also needed.
+-	 */
+-	if (phydev->autoneg == AUTONEG_DISABLE)
+-		genphy_c45_pma_setup_forced(phydev);
+-
+-	if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
+-		phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
+-
+-	ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+-				     MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
+-	if (ret < 0)
+-		return ret;
+-
+-	return __genphy_config_aneg(phydev, ret);
+-}
+-
+-static void qca808x_link_change_notify(struct phy_device *phydev)
+-{
+-	/* Assert interface sgmii fifo on link down, deassert it on link up,
+-	 * the interface device address is always phy address added by 1.
+-	 */
+-	mdiobus_c45_modify_changed(phydev->mdio.bus, phydev->mdio.addr + 1,
+-				   MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
+-				   QCA8081_PHY_FIFO_RSTN,
+-				   phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
+-}
+-
+-static int qca808x_led_parse_netdev(struct phy_device *phydev, unsigned long rules,
+-				    u16 *offload_trigger)
+-{
+-	/* Parsing specific to netdev trigger */
+-	if (test_bit(TRIGGER_NETDEV_TX, &rules))
+-		*offload_trigger |= QCA808X_LED_TX_BLINK;
+-	if (test_bit(TRIGGER_NETDEV_RX, &rules))
+-		*offload_trigger |= QCA808X_LED_RX_BLINK;
+-	if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+-		*offload_trigger |= QCA808X_LED_SPEED10_ON;
+-	if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+-		*offload_trigger |= QCA808X_LED_SPEED100_ON;
+-	if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+-		*offload_trigger |= QCA808X_LED_SPEED1000_ON;
+-	if (test_bit(TRIGGER_NETDEV_LINK_2500, &rules))
+-		*offload_trigger |= QCA808X_LED_SPEED2500_ON;
+-	if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+-		*offload_trigger |= QCA808X_LED_HALF_DUPLEX_ON;
+-	if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+-		*offload_trigger |= QCA808X_LED_FULL_DUPLEX_ON;
+-
+-	if (rules && !*offload_trigger)
+-		return -EOPNOTSUPP;
+-
+-	/* Enable BLINK_CHECK_BYPASS by default to make the LED
+-	 * blink even with duplex or speed mode not enabled.
+-	 */
+-	*offload_trigger |= QCA808X_LED_BLINK_CHECK_BYPASS;
+-
+-	return 0;
+-}
+-
+-static int qca808x_led_hw_control_enable(struct phy_device *phydev, u8 index)
+-{
+-	u16 reg;
+-
+-	if (index > 2)
+-		return -EINVAL;
+-
+-	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+-
+-	return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
+-				  QCA808X_LED_FORCE_EN);
+-}
+-
+-static int qca808x_led_hw_is_supported(struct phy_device *phydev, u8 index,
+-				       unsigned long rules)
+-{
+-	u16 offload_trigger = 0;
+-
+-	if (index > 2)
+-		return -EINVAL;
+-
+-	return qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
+-}
+-
+-static int qca808x_led_hw_control_set(struct phy_device *phydev, u8 index,
+-				      unsigned long rules)
+-{
+-	u16 reg, offload_trigger = 0;
+-	int ret;
+-
+-	if (index > 2)
+-		return -EINVAL;
+-
+-	reg = QCA808X_MMD7_LED_CTRL(index);
+-
+-	ret = qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
+-	if (ret)
+-		return ret;
+-
+-	ret = qca808x_led_hw_control_enable(phydev, index);
+-	if (ret)
+-		return ret;
+-
+-	return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+-			      QCA808X_LED_PATTERN_MASK,
+-			      offload_trigger);
+-}
+-
+-static bool qca808x_led_hw_control_status(struct phy_device *phydev, u8 index)
+-{
+-	u16 reg;
+-	int val;
+-
+-	if (index > 2)
+-		return false;
+-
+-	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+-
+-	return !(val & QCA808X_LED_FORCE_EN);
+-}
+-
+-static int qca808x_led_hw_control_get(struct phy_device *phydev, u8 index,
+-				      unsigned long *rules)
+-{
+-	u16 reg;
+-	int val;
+-
+-	if (index > 2)
+-		return -EINVAL;
+-
+-	/* Check if we have hw control enabled */
+-	if (qca808x_led_hw_control_status(phydev, index))
+-		return -EINVAL;
+-
+-	reg = QCA808X_MMD7_LED_CTRL(index);
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+-	if (val & QCA808X_LED_TX_BLINK)
+-		set_bit(TRIGGER_NETDEV_TX, rules);
+-	if (val & QCA808X_LED_RX_BLINK)
+-		set_bit(TRIGGER_NETDEV_RX, rules);
+-	if (val & QCA808X_LED_SPEED10_ON)
+-		set_bit(TRIGGER_NETDEV_LINK_10, rules);
+-	if (val & QCA808X_LED_SPEED100_ON)
+-		set_bit(TRIGGER_NETDEV_LINK_100, rules);
+-	if (val & QCA808X_LED_SPEED1000_ON)
+-		set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+-	if (val & QCA808X_LED_SPEED2500_ON)
+-		set_bit(TRIGGER_NETDEV_LINK_2500, rules);
+-	if (val & QCA808X_LED_HALF_DUPLEX_ON)
+-		set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+-	if (val & QCA808X_LED_FULL_DUPLEX_ON)
+-		set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+-
+-	return 0;
+-}
+-
+-static int qca808x_led_hw_control_reset(struct phy_device *phydev, u8 index)
+-{
+-	u16 reg;
+-
+-	if (index > 2)
+-		return -EINVAL;
+-
+-	reg = QCA808X_MMD7_LED_CTRL(index);
+-
+-	return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
+-				  QCA808X_LED_PATTERN_MASK);
+-}
+-
+-static int qca808x_led_brightness_set(struct phy_device *phydev,
+-				      u8 index, enum led_brightness value)
+-{
+-	u16 reg;
+-	int ret;
+-
+-	if (index > 2)
+-		return -EINVAL;
+-
+-	if (!value) {
+-		ret = qca808x_led_hw_control_reset(phydev, index);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+-
+-	return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+-			      QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
+-			      QCA808X_LED_FORCE_EN | value ? QCA808X_LED_FORCE_ON :
+-							     QCA808X_LED_FORCE_OFF);
+-}
+-
+-static int qca808x_led_blink_set(struct phy_device *phydev, u8 index,
+-				 unsigned long *delay_on,
+-				 unsigned long *delay_off)
+-{
+-	int ret;
+-	u16 reg;
+-
+-	if (index > 2)
+-		return -EINVAL;
+-
+-	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+-
+-	/* Set blink to 50% off, 50% on at 4Hz by default */
+-	ret = phy_modify_mmd(phydev, MDIO_MMD_AN, QCA808X_MMD7_LED_GLOBAL,
+-			     QCA808X_LED_BLINK_FREQ_MASK | QCA808X_LED_BLINK_DUTY_MASK,
+-			     QCA808X_LED_BLINK_FREQ_4HZ | QCA808X_LED_BLINK_DUTY_50_50);
+-	if (ret)
+-		return ret;
+-
+-	/* We use BLINK_1 for normal blinking */
+-	ret = phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+-			     QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
+-			     QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_BLINK_1);
+-	if (ret)
+-		return ret;
+-
+-	/* We set blink to 4Hz, aka 250ms */
+-	*delay_on = 250 / 2;
+-	*delay_off = 250 / 2;
+-
+-	return 0;
+-}
+-
+-static int qca808x_led_polarity_set(struct phy_device *phydev, int index,
+-				    unsigned long modes)
+-{
+-	struct at803x_priv *priv = phydev->priv;
+-	bool active_low = false;
+-	u32 mode;
+-
+-	for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+-		switch (mode) {
+-		case PHY_LED_ACTIVE_LOW:
+-			active_low = true;
+-			break;
+-		default:
+-			return -EINVAL;
+-		}
+-	}
+-
+-	/* PHY polarity is global and can't be set per LED.
+-	 * To detect this, check if last requested polarity mode
+-	 * match the new one.
+-	 */
+-	if (priv->led_polarity_mode >= 0 &&
+-	    priv->led_polarity_mode != active_low) {
+-		phydev_err(phydev, "PHY polarity is global. Mismatched polarity on different LED\n");
+-		return -EINVAL;
+-	}
+-
+-	/* Save the last PHY polarity mode */
+-	priv->led_polarity_mode = active_low;
+-
+-	return phy_modify_mmd(phydev, MDIO_MMD_AN,
+-			      QCA808X_MMD7_LED_POLARITY_CTRL,
+-			      QCA808X_LED_ACTIVE_HIGH,
+-			      active_low ? 0 : QCA808X_LED_ACTIVE_HIGH);
+-}
+-
+ static struct phy_driver at803x_driver[] = {
+ {
+ 	/* Qualcomm Atheros AR8035 */
+@@ -1989,34 +1123,6 @@ static struct phy_driver at803x_driver[]
+ 	.read_status		= at803x_read_status,
+ 	.soft_reset		= genphy_soft_reset,
+ 	.config_aneg		= at803x_config_aneg,
+-}, {
+-	/* Qualcomm QCA8081 */
+-	PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
+-	.name			= "Qualcomm QCA8081",
+-	.flags			= PHY_POLL_CABLE_TEST,
+-	.probe			= at803x_probe,
+-	.config_intr		= at803x_config_intr,
+-	.handle_interrupt	= at803x_handle_interrupt,
+-	.get_tunable		= at803x_get_tunable,
+-	.set_tunable		= at803x_set_tunable,
+-	.set_wol		= at803x_set_wol,
+-	.get_wol		= at803x_get_wol,
+-	.get_features		= qca808x_get_features,
+-	.config_aneg		= qca808x_config_aneg,
+-	.suspend		= genphy_suspend,
+-	.resume			= genphy_resume,
+-	.read_status		= qca808x_read_status,
+-	.config_init		= qca808x_config_init,
+-	.soft_reset		= qca808x_soft_reset,
+-	.cable_test_start	= qca808x_cable_test_start,
+-	.cable_test_get_status	= qca808x_cable_test_get_status,
+-	.link_change_notify	= qca808x_link_change_notify,
+-	.led_brightness_set	= qca808x_led_brightness_set,
+-	.led_blink_set		= qca808x_led_blink_set,
+-	.led_hw_is_supported	= qca808x_led_hw_is_supported,
+-	.led_hw_control_set	= qca808x_led_hw_control_set,
+-	.led_hw_control_get	= qca808x_led_hw_control_get,
+-	.led_polarity_set	= qca808x_led_polarity_set,
+ }, };
+ 
+ module_phy_driver(at803x_driver);
+@@ -2028,7 +1134,6 @@ static struct mdio_device_id __maybe_unu
+ 	{ PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
+ 	{ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
+ 	{ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) },
+-	{ PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
+ 	{ }
+ };
+ 
+--- /dev/null
++++ b/drivers/net/phy/qcom/qca808x.c
+@@ -0,0 +1,934 @@
++// SPDX-License-Identifier: GPL-2.0+
++
++#include <linux/phy.h>
++#include <linux/module.h>
++#include <linux/ethtool_netlink.h>
++
++#include "qcom.h"
++
++/* ADC threshold */
++#define QCA808X_PHY_DEBUG_ADC_THRESHOLD		0x2c80
++#define QCA808X_ADC_THRESHOLD_MASK		GENMASK(7, 0)
++#define QCA808X_ADC_THRESHOLD_80MV		0
++#define QCA808X_ADC_THRESHOLD_100MV		0xf0
++#define QCA808X_ADC_THRESHOLD_200MV		0x0f
++#define QCA808X_ADC_THRESHOLD_300MV		0xff
++
++/* CLD control */
++#define QCA808X_PHY_MMD3_ADDR_CLD_CTRL7		0x8007
++#define QCA808X_8023AZ_AFE_CTRL_MASK		GENMASK(8, 4)
++#define QCA808X_8023AZ_AFE_EN			0x90
++
++/* AZ control */
++#define QCA808X_PHY_MMD3_AZ_TRAINING_CTRL	0x8008
++#define QCA808X_MMD3_AZ_TRAINING_VAL		0x1c32
++
++#define QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB	0x8014
++#define QCA808X_MSE_THRESHOLD_20DB_VALUE	0x529
++
++#define QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB	0x800E
++#define QCA808X_MSE_THRESHOLD_17DB_VALUE	0x341
++
++#define QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB	0x801E
++#define QCA808X_MSE_THRESHOLD_27DB_VALUE	0x419
++
++#define QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB	0x8020
++#define QCA808X_MSE_THRESHOLD_28DB_VALUE	0x341
++
++#define QCA808X_PHY_MMD7_TOP_OPTION1		0x901c
++#define QCA808X_TOP_OPTION1_DATA		0x0
++
++#define QCA808X_PHY_MMD3_DEBUG_1		0xa100
++#define QCA808X_MMD3_DEBUG_1_VALUE		0x9203
++#define QCA808X_PHY_MMD3_DEBUG_2		0xa101
++#define QCA808X_MMD3_DEBUG_2_VALUE		0x48ad
++#define QCA808X_PHY_MMD3_DEBUG_3		0xa103
++#define QCA808X_MMD3_DEBUG_3_VALUE		0x1698
++#define QCA808X_PHY_MMD3_DEBUG_4		0xa105
++#define QCA808X_MMD3_DEBUG_4_VALUE		0x8001
++#define QCA808X_PHY_MMD3_DEBUG_5		0xa106
++#define QCA808X_MMD3_DEBUG_5_VALUE		0x1111
++#define QCA808X_PHY_MMD3_DEBUG_6		0xa011
++#define QCA808X_MMD3_DEBUG_6_VALUE		0x5f85
++
++/* master/slave seed config */
++#define QCA808X_PHY_DEBUG_LOCAL_SEED		9
++#define QCA808X_MASTER_SLAVE_SEED_ENABLE	BIT(1)
++#define QCA808X_MASTER_SLAVE_SEED_CFG		GENMASK(12, 2)
++#define QCA808X_MASTER_SLAVE_SEED_RANGE		0x32
++
++/* Hibernation yields lower power consumpiton in contrast with normal operation mode.
++ * when the copper cable is unplugged, the PHY enters into hibernation mode in about 10s.
++ */
++#define QCA808X_DBG_AN_TEST			0xb
++#define QCA808X_HIBERNATION_EN			BIT(15)
++
++#define QCA808X_CDT_ENABLE_TEST			BIT(15)
++#define QCA808X_CDT_INTER_CHECK_DIS		BIT(13)
++#define QCA808X_CDT_STATUS			BIT(11)
++#define QCA808X_CDT_LENGTH_UNIT			BIT(10)
++
++#define QCA808X_MMD3_CDT_STATUS			0x8064
++#define QCA808X_MMD3_CDT_DIAG_PAIR_A		0x8065
++#define QCA808X_MMD3_CDT_DIAG_PAIR_B		0x8066
++#define QCA808X_MMD3_CDT_DIAG_PAIR_C		0x8067
++#define QCA808X_MMD3_CDT_DIAG_PAIR_D		0x8068
++#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT	GENMASK(15, 8)
++#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT	GENMASK(7, 0)
++
++#define QCA808X_CDT_CODE_PAIR_A			GENMASK(15, 12)
++#define QCA808X_CDT_CODE_PAIR_B			GENMASK(11, 8)
++#define QCA808X_CDT_CODE_PAIR_C			GENMASK(7, 4)
++#define QCA808X_CDT_CODE_PAIR_D			GENMASK(3, 0)
++
++#define QCA808X_CDT_STATUS_STAT_TYPE		GENMASK(1, 0)
++#define QCA808X_CDT_STATUS_STAT_FAIL		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
++#define QCA808X_CDT_STATUS_STAT_NORMAL		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
++#define QCA808X_CDT_STATUS_STAT_SAME_OPEN	FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
++#define QCA808X_CDT_STATUS_STAT_SAME_SHORT	FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
++
++#define QCA808X_CDT_STATUS_STAT_MDI		GENMASK(3, 2)
++#define QCA808X_CDT_STATUS_STAT_MDI1		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
++#define QCA808X_CDT_STATUS_STAT_MDI2		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
++#define QCA808X_CDT_STATUS_STAT_MDI3		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
++
++/* NORMAL are MDI with type set to 0 */
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI1
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
++									 QCA808X_CDT_STATUS_STAT_MDI1)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
++									 QCA808X_CDT_STATUS_STAT_MDI1)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI2
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
++									 QCA808X_CDT_STATUS_STAT_MDI2)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
++									 QCA808X_CDT_STATUS_STAT_MDI2)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI3
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
++									 QCA808X_CDT_STATUS_STAT_MDI3)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
++									 QCA808X_CDT_STATUS_STAT_MDI3)
++
++/* Added for reference of existence but should be handled by wait_for_completion already */
++#define QCA808X_CDT_STATUS_STAT_BUSY		(BIT(1) | BIT(3))
++
++#define QCA808X_MMD7_LED_GLOBAL			0x8073
++#define QCA808X_LED_BLINK_1			GENMASK(11, 6)
++#define QCA808X_LED_BLINK_2			GENMASK(5, 0)
++/* Values are the same for both BLINK_1 and BLINK_2 */
++#define QCA808X_LED_BLINK_FREQ_MASK		GENMASK(5, 3)
++#define QCA808X_LED_BLINK_FREQ_2HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x0)
++#define QCA808X_LED_BLINK_FREQ_4HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x1)
++#define QCA808X_LED_BLINK_FREQ_8HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x2)
++#define QCA808X_LED_BLINK_FREQ_16HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x3)
++#define QCA808X_LED_BLINK_FREQ_32HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x4)
++#define QCA808X_LED_BLINK_FREQ_64HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x5)
++#define QCA808X_LED_BLINK_FREQ_128HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x6)
++#define QCA808X_LED_BLINK_FREQ_256HZ		FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x7)
++#define QCA808X_LED_BLINK_DUTY_MASK		GENMASK(2, 0)
++#define QCA808X_LED_BLINK_DUTY_50_50		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x0)
++#define QCA808X_LED_BLINK_DUTY_75_25		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x1)
++#define QCA808X_LED_BLINK_DUTY_25_75		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x2)
++#define QCA808X_LED_BLINK_DUTY_33_67		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x3)
++#define QCA808X_LED_BLINK_DUTY_67_33		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x4)
++#define QCA808X_LED_BLINK_DUTY_17_83		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x5)
++#define QCA808X_LED_BLINK_DUTY_83_17		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x6)
++#define QCA808X_LED_BLINK_DUTY_8_92		FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x7)
++
++#define QCA808X_MMD7_LED2_CTRL			0x8074
++#define QCA808X_MMD7_LED2_FORCE_CTRL		0x8075
++#define QCA808X_MMD7_LED1_CTRL			0x8076
++#define QCA808X_MMD7_LED1_FORCE_CTRL		0x8077
++#define QCA808X_MMD7_LED0_CTRL			0x8078
++#define QCA808X_MMD7_LED_CTRL(x)		(0x8078 - ((x) * 2))
++
++/* LED hw control pattern is the same for every LED */
++#define QCA808X_LED_PATTERN_MASK		GENMASK(15, 0)
++#define QCA808X_LED_SPEED2500_ON		BIT(15)
++#define QCA808X_LED_SPEED2500_BLINK		BIT(14)
++/* Follow blink trigger even if duplex or speed condition doesn't match */
++#define QCA808X_LED_BLINK_CHECK_BYPASS		BIT(13)
++#define QCA808X_LED_FULL_DUPLEX_ON		BIT(12)
++#define QCA808X_LED_HALF_DUPLEX_ON		BIT(11)
++#define QCA808X_LED_TX_BLINK			BIT(10)
++#define QCA808X_LED_RX_BLINK			BIT(9)
++#define QCA808X_LED_TX_ON_10MS			BIT(8)
++#define QCA808X_LED_RX_ON_10MS			BIT(7)
++#define QCA808X_LED_SPEED1000_ON		BIT(6)
++#define QCA808X_LED_SPEED100_ON			BIT(5)
++#define QCA808X_LED_SPEED10_ON			BIT(4)
++#define QCA808X_LED_COLLISION_BLINK		BIT(3)
++#define QCA808X_LED_SPEED1000_BLINK		BIT(2)
++#define QCA808X_LED_SPEED100_BLINK		BIT(1)
++#define QCA808X_LED_SPEED10_BLINK		BIT(0)
++
++#define QCA808X_MMD7_LED0_FORCE_CTRL		0x8079
++#define QCA808X_MMD7_LED_FORCE_CTRL(x)		(0x8079 - ((x) * 2))
++
++/* LED force ctrl is the same for every LED
++ * No documentation exist for this, not even internal one
++ * with NDA as QCOM gives only info about configuring
++ * hw control pattern rules and doesn't indicate any way
++ * to force the LED to specific mode.
++ * These define comes from reverse and testing and maybe
++ * lack of some info or some info are not entirely correct.
++ * For the basic LED control and hw control these finding
++ * are enough to support LED control in all the required APIs.
++ *
++ * On doing some comparison with implementation with qca807x,
++ * it was found that it's 1:1 equal to it and confirms all the
++ * reverse done. It was also found further specification with the
++ * force mode and the blink modes.
++ */
++#define QCA808X_LED_FORCE_EN			BIT(15)
++#define QCA808X_LED_FORCE_MODE_MASK		GENMASK(14, 13)
++#define QCA808X_LED_FORCE_BLINK_1		FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x3)
++#define QCA808X_LED_FORCE_BLINK_2		FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x2)
++#define QCA808X_LED_FORCE_ON			FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x1)
++#define QCA808X_LED_FORCE_OFF			FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x0)
++
++#define QCA808X_MMD7_LED_POLARITY_CTRL		0x901a
++/* QSDK sets by default 0x46 to this reg that sets BIT 6 for
++ * LED to active high. It's not clear what BIT 3 and BIT 4 does.
++ */
++#define QCA808X_LED_ACTIVE_HIGH			BIT(6)
++
++/* QCA808X 1G chip type */
++#define QCA808X_PHY_MMD7_CHIP_TYPE		0x901d
++#define QCA808X_PHY_CHIP_TYPE_1G		BIT(0)
++
++#define QCA8081_PHY_SERDES_MMD1_FIFO_CTRL	0x9072
++#define QCA8081_PHY_FIFO_RSTN			BIT(11)
++
++#define QCA8081_PHY_ID				0x004dd101
++
++MODULE_DESCRIPTION("Qualcomm Atheros QCA808X PHY driver");
++MODULE_AUTHOR("Matus Ujhelyi");
++MODULE_LICENSE("GPL");
++
++struct qca808x_priv {
++	int led_polarity_mode;
++};
++
++static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
++{
++	int ret;
++
++	/* Enable fast retrain */
++	ret = genphy_c45_fast_retrain(phydev, true);
++	if (ret)
++		return ret;
++
++	phy_write_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_TOP_OPTION1,
++		      QCA808X_TOP_OPTION1_DATA);
++	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB,
++		      QCA808X_MSE_THRESHOLD_20DB_VALUE);
++	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB,
++		      QCA808X_MSE_THRESHOLD_17DB_VALUE);
++	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB,
++		      QCA808X_MSE_THRESHOLD_27DB_VALUE);
++	phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB,
++		      QCA808X_MSE_THRESHOLD_28DB_VALUE);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_1,
++		      QCA808X_MMD3_DEBUG_1_VALUE);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_4,
++		      QCA808X_MMD3_DEBUG_4_VALUE);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_5,
++		      QCA808X_MMD3_DEBUG_5_VALUE);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_3,
++		      QCA808X_MMD3_DEBUG_3_VALUE);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_6,
++		      QCA808X_MMD3_DEBUG_6_VALUE);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_2,
++		      QCA808X_MMD3_DEBUG_2_VALUE);
++
++	return 0;
++}
++
++static int qca808x_phy_ms_seed_enable(struct phy_device *phydev, bool enable)
++{
++	u16 seed_value;
++
++	if (!enable)
++		return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
++				QCA808X_MASTER_SLAVE_SEED_ENABLE, 0);
++
++	seed_value = get_random_u32_below(QCA808X_MASTER_SLAVE_SEED_RANGE);
++	return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
++			QCA808X_MASTER_SLAVE_SEED_CFG | QCA808X_MASTER_SLAVE_SEED_ENABLE,
++			FIELD_PREP(QCA808X_MASTER_SLAVE_SEED_CFG, seed_value) |
++			QCA808X_MASTER_SLAVE_SEED_ENABLE);
++}
++
++static bool qca808x_is_prefer_master(struct phy_device *phydev)
++{
++	return (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_FORCE) ||
++		(phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_PREFERRED);
++}
++
++static bool qca808x_has_fast_retrain_or_slave_seed(struct phy_device *phydev)
++{
++	return linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
++}
++
++static int qca808x_probe(struct phy_device *phydev)
++{
++	struct device *dev = &phydev->mdio.dev;
++	struct qca808x_priv *priv;
++
++	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
++
++	/* Init LED polarity mode to -1 */
++	priv->led_polarity_mode = -1;
++
++	phydev->priv = priv;
++
++	return 0;
++}
++
++static int qca808x_config_init(struct phy_device *phydev)
++{
++	int ret;
++
++	/* Active adc&vga on 802.3az for the link 1000M and 100M */
++	ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7,
++			     QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
++	if (ret)
++		return ret;
++
++	/* Adjust the threshold on 802.3az for the link 1000M */
++	ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
++			    QCA808X_PHY_MMD3_AZ_TRAINING_CTRL,
++			    QCA808X_MMD3_AZ_TRAINING_VAL);
++	if (ret)
++		return ret;
++
++	if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
++		/* Config the fast retrain for the link 2500M */
++		ret = qca808x_phy_fast_retrain_config(phydev);
++		if (ret)
++			return ret;
++
++		ret = genphy_read_master_slave(phydev);
++		if (ret < 0)
++			return ret;
++
++		if (!qca808x_is_prefer_master(phydev)) {
++			/* Enable seed and configure lower ramdom seed to make phy
++			 * linked as slave mode.
++			 */
++			ret = qca808x_phy_ms_seed_enable(phydev, true);
++			if (ret)
++				return ret;
++		}
++	}
++
++	/* Configure adc threshold as 100mv for the link 10M */
++	return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD,
++				     QCA808X_ADC_THRESHOLD_MASK,
++				     QCA808X_ADC_THRESHOLD_100MV);
++}
++
++static int qca808x_read_status(struct phy_device *phydev)
++{
++	struct at803x_ss_mask ss_mask = { 0 };
++	int ret;
++
++	ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
++	if (ret < 0)
++		return ret;
++
++	linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->lp_advertising,
++			 ret & MDIO_AN_10GBT_STAT_LP2_5G);
++
++	ret = genphy_read_status(phydev);
++	if (ret)
++		return ret;
++
++	/* qca8081 takes the different bits for speed value from at803x */
++	ss_mask.speed_mask = QCA808X_SS_SPEED_MASK;
++	ss_mask.speed_shift = __bf_shf(QCA808X_SS_SPEED_MASK);
++	ret = at803x_read_specific_status(phydev, ss_mask);
++	if (ret < 0)
++		return ret;
++
++	if (phydev->link) {
++		if (phydev->speed == SPEED_2500)
++			phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
++		else
++			phydev->interface = PHY_INTERFACE_MODE_SGMII;
++	} else {
++		/* generate seed as a lower random value to make PHY linked as SLAVE easily,
++		 * except for master/slave configuration fault detected or the master mode
++		 * preferred.
++		 *
++		 * the reason for not putting this code into the function link_change_notify is
++		 * the corner case where the link partner is also the qca8081 PHY and the seed
++		 * value is configured as the same value, the link can't be up and no link change
++		 * occurs.
++		 */
++		if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
++			if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR ||
++			    qca808x_is_prefer_master(phydev)) {
++				qca808x_phy_ms_seed_enable(phydev, false);
++			} else {
++				qca808x_phy_ms_seed_enable(phydev, true);
++			}
++		}
++	}
++
++	return 0;
++}
++
++static int qca808x_soft_reset(struct phy_device *phydev)
++{
++	int ret;
++
++	ret = genphy_soft_reset(phydev);
++	if (ret < 0)
++		return ret;
++
++	if (qca808x_has_fast_retrain_or_slave_seed(phydev))
++		ret = qca808x_phy_ms_seed_enable(phydev, true);
++
++	return ret;
++}
++
++static bool qca808x_cdt_fault_length_valid(int cdt_code)
++{
++	switch (cdt_code) {
++	case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
++		return true;
++	default:
++		return false;
++	}
++}
++
++static int qca808x_cable_test_result_trans(int cdt_code)
++{
++	switch (cdt_code) {
++	case QCA808X_CDT_STATUS_STAT_NORMAL:
++		return ETHTOOL_A_CABLE_RESULT_CODE_OK;
++	case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
++		return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
++	case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
++		return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
++		return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
++	case QCA808X_CDT_STATUS_STAT_FAIL:
++	default:
++		return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
++	}
++}
++
++static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
++				    int result)
++{
++	int val;
++	u32 cdt_length_reg = 0;
++
++	switch (pair) {
++	case ETHTOOL_A_CABLE_PAIR_A:
++		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_A;
++		break;
++	case ETHTOOL_A_CABLE_PAIR_B:
++		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_B;
++		break;
++	case ETHTOOL_A_CABLE_PAIR_C:
++		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_C;
++		break;
++	case ETHTOOL_A_CABLE_PAIR_D:
++		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_D;
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	val = phy_read_mmd(phydev, MDIO_MMD_PCS, cdt_length_reg);
++	if (val < 0)
++		return val;
++
++	if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
++		val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
++	else
++		val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
++
++	return at803x_cdt_fault_length(val);
++}
++
++static int qca808x_cable_test_start(struct phy_device *phydev)
++{
++	int ret;
++
++	/* perform CDT with the following configs:
++	 * 1. disable hibernation.
++	 * 2. force PHY working in MDI mode.
++	 * 3. for PHY working in 1000BaseT.
++	 * 4. configure the threshold.
++	 */
++
++	ret = at803x_debug_reg_mask(phydev, QCA808X_DBG_AN_TEST, QCA808X_HIBERNATION_EN, 0);
++	if (ret < 0)
++		return ret;
++
++	ret = at803x_config_mdix(phydev, ETH_TP_MDI);
++	if (ret < 0)
++		return ret;
++
++	/* Force 1000base-T needs to configure PMA/PMD and MII_BMCR */
++	phydev->duplex = DUPLEX_FULL;
++	phydev->speed = SPEED_1000;
++	ret = genphy_c45_pma_setup_forced(phydev);
++	if (ret < 0)
++		return ret;
++
++	ret = genphy_setup_forced(phydev);
++	if (ret < 0)
++		return ret;
++
++	/* configure the thresholds for open, short, pair ok test */
++	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8074, 0xc040);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8076, 0xc040);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8077, 0xa060);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8078, 0xc050);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807a, 0xc060);
++	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807e, 0xb060);
++
++	return 0;
++}
++
++static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
++					      u16 status)
++{
++	int length, result;
++	u16 pair_code;
++
++	switch (pair) {
++	case ETHTOOL_A_CABLE_PAIR_A:
++		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
++		break;
++	case ETHTOOL_A_CABLE_PAIR_B:
++		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
++		break;
++	case ETHTOOL_A_CABLE_PAIR_C:
++		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
++		break;
++	case ETHTOOL_A_CABLE_PAIR_D:
++		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	result = qca808x_cable_test_result_trans(pair_code);
++	ethnl_cable_test_result(phydev, pair, result);
++
++	if (qca808x_cdt_fault_length_valid(pair_code)) {
++		length = qca808x_cdt_fault_length(phydev, pair, result);
++		ethnl_cable_test_fault_length(phydev, pair, length);
++	}
++
++	return 0;
++}
++
++static int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
++{
++	int ret, val;
++
++	*finished = false;
++
++	val = QCA808X_CDT_ENABLE_TEST |
++	      QCA808X_CDT_LENGTH_UNIT;
++	ret = at803x_cdt_start(phydev, val);
++	if (ret)
++		return ret;
++
++	ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
++	if (ret)
++		return ret;
++
++	val = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA808X_MMD3_CDT_STATUS);
++	if (val < 0)
++		return val;
++
++	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
++	if (ret)
++		return ret;
++
++	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
++	if (ret)
++		return ret;
++
++	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
++	if (ret)
++		return ret;
++
++	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
++	if (ret)
++		return ret;
++
++	*finished = true;
++
++	return 0;
++}
++
++static int qca808x_get_features(struct phy_device *phydev)
++{
++	int ret;
++
++	ret = genphy_c45_pma_read_abilities(phydev);
++	if (ret)
++		return ret;
++
++	/* The autoneg ability is not existed in bit3 of MMD7.1,
++	 * but it is supported by qca808x PHY, so we add it here
++	 * manually.
++	 */
++	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
++
++	/* As for the qca8081 1G version chip, the 2500baseT ability is also
++	 * existed in the bit0 of MMD1.21, we need to remove it manually if
++	 * it is the qca8081 1G chip according to the bit0 of MMD7.0x901d.
++	 */
++	ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_CHIP_TYPE);
++	if (ret < 0)
++		return ret;
++
++	if (QCA808X_PHY_CHIP_TYPE_1G & ret)
++		linkmode_clear_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
++
++	return 0;
++}
++
++static int qca808x_config_aneg(struct phy_device *phydev)
++{
++	int phy_ctrl = 0;
++	int ret;
++
++	ret = at803x_prepare_config_aneg(phydev);
++	if (ret)
++		return ret;
++
++	/* The reg MII_BMCR also needs to be configured for force mode, the
++	 * genphy_config_aneg is also needed.
++	 */
++	if (phydev->autoneg == AUTONEG_DISABLE)
++		genphy_c45_pma_setup_forced(phydev);
++
++	if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
++		phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
++
++	ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
++				     MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
++	if (ret < 0)
++		return ret;
++
++	return __genphy_config_aneg(phydev, ret);
++}
++
++static void qca808x_link_change_notify(struct phy_device *phydev)
++{
++	/* Assert interface sgmii fifo on link down, deassert it on link up,
++	 * the interface device address is always phy address added by 1.
++	 */
++	mdiobus_c45_modify_changed(phydev->mdio.bus, phydev->mdio.addr + 1,
++				   MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
++				   QCA8081_PHY_FIFO_RSTN,
++				   phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
++}
++
++static int qca808x_led_parse_netdev(struct phy_device *phydev, unsigned long rules,
++				    u16 *offload_trigger)
++{
++	/* Parsing specific to netdev trigger */
++	if (test_bit(TRIGGER_NETDEV_TX, &rules))
++		*offload_trigger |= QCA808X_LED_TX_BLINK;
++	if (test_bit(TRIGGER_NETDEV_RX, &rules))
++		*offload_trigger |= QCA808X_LED_RX_BLINK;
++	if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
++		*offload_trigger |= QCA808X_LED_SPEED10_ON;
++	if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
++		*offload_trigger |= QCA808X_LED_SPEED100_ON;
++	if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
++		*offload_trigger |= QCA808X_LED_SPEED1000_ON;
++	if (test_bit(TRIGGER_NETDEV_LINK_2500, &rules))
++		*offload_trigger |= QCA808X_LED_SPEED2500_ON;
++	if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
++		*offload_trigger |= QCA808X_LED_HALF_DUPLEX_ON;
++	if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
++		*offload_trigger |= QCA808X_LED_FULL_DUPLEX_ON;
++
++	if (rules && !*offload_trigger)
++		return -EOPNOTSUPP;
++
++	/* Enable BLINK_CHECK_BYPASS by default to make the LED
++	 * blink even with duplex or speed mode not enabled.
++	 */
++	*offload_trigger |= QCA808X_LED_BLINK_CHECK_BYPASS;
++
++	return 0;
++}
++
++static int qca808x_led_hw_control_enable(struct phy_device *phydev, u8 index)
++{
++	u16 reg;
++
++	if (index > 2)
++		return -EINVAL;
++
++	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
++
++	return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
++				  QCA808X_LED_FORCE_EN);
++}
++
++static int qca808x_led_hw_is_supported(struct phy_device *phydev, u8 index,
++				       unsigned long rules)
++{
++	u16 offload_trigger = 0;
++
++	if (index > 2)
++		return -EINVAL;
++
++	return qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
++}
++
++static int qca808x_led_hw_control_set(struct phy_device *phydev, u8 index,
++				      unsigned long rules)
++{
++	u16 reg, offload_trigger = 0;
++	int ret;
++
++	if (index > 2)
++		return -EINVAL;
++
++	reg = QCA808X_MMD7_LED_CTRL(index);
++
++	ret = qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
++	if (ret)
++		return ret;
++
++	ret = qca808x_led_hw_control_enable(phydev, index);
++	if (ret)
++		return ret;
++
++	return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
++			      QCA808X_LED_PATTERN_MASK,
++			      offload_trigger);
++}
++
++static bool qca808x_led_hw_control_status(struct phy_device *phydev, u8 index)
++{
++	u16 reg;
++	int val;
++
++	if (index > 2)
++		return false;
++
++	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
++
++	val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
++
++	return !(val & QCA808X_LED_FORCE_EN);
++}
++
++static int qca808x_led_hw_control_get(struct phy_device *phydev, u8 index,
++				      unsigned long *rules)
++{
++	u16 reg;
++	int val;
++
++	if (index > 2)
++		return -EINVAL;
++
++	/* Check if we have hw control enabled */
++	if (qca808x_led_hw_control_status(phydev, index))
++		return -EINVAL;
++
++	reg = QCA808X_MMD7_LED_CTRL(index);
++
++	val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
++	if (val & QCA808X_LED_TX_BLINK)
++		set_bit(TRIGGER_NETDEV_TX, rules);
++	if (val & QCA808X_LED_RX_BLINK)
++		set_bit(TRIGGER_NETDEV_RX, rules);
++	if (val & QCA808X_LED_SPEED10_ON)
++		set_bit(TRIGGER_NETDEV_LINK_10, rules);
++	if (val & QCA808X_LED_SPEED100_ON)
++		set_bit(TRIGGER_NETDEV_LINK_100, rules);
++	if (val & QCA808X_LED_SPEED1000_ON)
++		set_bit(TRIGGER_NETDEV_LINK_1000, rules);
++	if (val & QCA808X_LED_SPEED2500_ON)
++		set_bit(TRIGGER_NETDEV_LINK_2500, rules);
++	if (val & QCA808X_LED_HALF_DUPLEX_ON)
++		set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
++	if (val & QCA808X_LED_FULL_DUPLEX_ON)
++		set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
++
++	return 0;
++}
++
++static int qca808x_led_hw_control_reset(struct phy_device *phydev, u8 index)
++{
++	u16 reg;
++
++	if (index > 2)
++		return -EINVAL;
++
++	reg = QCA808X_MMD7_LED_CTRL(index);
++
++	return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
++				  QCA808X_LED_PATTERN_MASK);
++}
++
++static int qca808x_led_brightness_set(struct phy_device *phydev,
++				      u8 index, enum led_brightness value)
++{
++	u16 reg;
++	int ret;
++
++	if (index > 2)
++		return -EINVAL;
++
++	if (!value) {
++		ret = qca808x_led_hw_control_reset(phydev, index);
++		if (ret)
++			return ret;
++	}
++
++	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
++
++	return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
++			      QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
++			      QCA808X_LED_FORCE_EN | value ? QCA808X_LED_FORCE_ON :
++							     QCA808X_LED_FORCE_OFF);
++}
++
++static int qca808x_led_blink_set(struct phy_device *phydev, u8 index,
++				 unsigned long *delay_on,
++				 unsigned long *delay_off)
++{
++	int ret;
++	u16 reg;
++
++	if (index > 2)
++		return -EINVAL;
++
++	reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
++
++	/* Set blink to 50% off, 50% on at 4Hz by default */
++	ret = phy_modify_mmd(phydev, MDIO_MMD_AN, QCA808X_MMD7_LED_GLOBAL,
++			     QCA808X_LED_BLINK_FREQ_MASK | QCA808X_LED_BLINK_DUTY_MASK,
++			     QCA808X_LED_BLINK_FREQ_4HZ | QCA808X_LED_BLINK_DUTY_50_50);
++	if (ret)
++		return ret;
++
++	/* We use BLINK_1 for normal blinking */
++	ret = phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
++			     QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
++			     QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_BLINK_1);
++	if (ret)
++		return ret;
++
++	/* We set blink to 4Hz, aka 250ms */
++	*delay_on = 250 / 2;
++	*delay_off = 250 / 2;
++
++	return 0;
++}
++
++static int qca808x_led_polarity_set(struct phy_device *phydev, int index,
++				    unsigned long modes)
++{
++	struct qca808x_priv *priv = phydev->priv;
++	bool active_low = false;
++	u32 mode;
++
++	for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
++		switch (mode) {
++		case PHY_LED_ACTIVE_LOW:
++			active_low = true;
++			break;
++		default:
++			return -EINVAL;
++		}
++	}
++
++	/* PHY polarity is global and can't be set per LED.
++	 * To detect this, check if last requested polarity mode
++	 * match the new one.
++	 */
++	if (priv->led_polarity_mode >= 0 &&
++	    priv->led_polarity_mode != active_low) {
++		phydev_err(phydev, "PHY polarity is global. Mismatched polarity on different LED\n");
++		return -EINVAL;
++	}
++
++	/* Save the last PHY polarity mode */
++	priv->led_polarity_mode = active_low;
++
++	return phy_modify_mmd(phydev, MDIO_MMD_AN,
++			      QCA808X_MMD7_LED_POLARITY_CTRL,
++			      QCA808X_LED_ACTIVE_HIGH,
++			      active_low ? 0 : QCA808X_LED_ACTIVE_HIGH);
++}
++
++static struct phy_driver qca808x_driver[] = {
++{
++	/* Qualcomm QCA8081 */
++	PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
++	.name			= "Qualcomm QCA8081",
++	.flags			= PHY_POLL_CABLE_TEST,
++	.probe			= qca808x_probe,
++	.config_intr		= at803x_config_intr,
++	.handle_interrupt	= at803x_handle_interrupt,
++	.get_tunable		= at803x_get_tunable,
++	.set_tunable		= at803x_set_tunable,
++	.set_wol		= at803x_set_wol,
++	.get_wol		= at803x_get_wol,
++	.get_features		= qca808x_get_features,
++	.config_aneg		= qca808x_config_aneg,
++	.suspend		= genphy_suspend,
++	.resume			= genphy_resume,
++	.read_status		= qca808x_read_status,
++	.config_init		= qca808x_config_init,
++	.soft_reset		= qca808x_soft_reset,
++	.cable_test_start	= qca808x_cable_test_start,
++	.cable_test_get_status	= qca808x_cable_test_get_status,
++	.link_change_notify	= qca808x_link_change_notify,
++	.led_brightness_set	= qca808x_led_brightness_set,
++	.led_blink_set		= qca808x_led_blink_set,
++	.led_hw_is_supported	= qca808x_led_hw_is_supported,
++	.led_hw_control_set	= qca808x_led_hw_control_set,
++	.led_hw_control_get	= qca808x_led_hw_control_get,
++	.led_polarity_set	= qca808x_led_polarity_set,
++}, };
++
++module_phy_driver(qca808x_driver);
++
++static struct mdio_device_id __maybe_unused qca808x_tbl[] = {
++	{ PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
++	{ }
++};
++
++MODULE_DEVICE_TABLE(mdio, qca808x_tbl);

+ 28 - 0
target/linux/generic/backport-6.6/714-v6.8-01-net-phy-make-addr-type-u8-in-phy_package_shared-stru.patch

@@ -0,0 +1,28 @@
+From ebb30ccbbdbd6fae5177b676da4f4ac92bb4f635 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Fri, 15 Dec 2023 14:15:31 +0100
+Subject: [PATCH 1/4] net: phy: make addr type u8 in phy_package_shared struct
+
+Switch addr type in phy_package_shared struct to u8.
+
+The value is already checked to be non negative and to be less than
+PHY_MAX_ADDR, hence u8 is better suited than using int.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Russell King (Oracle) <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ include/linux/phy.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -338,7 +338,7 @@ struct mdio_bus_stats {
+  * phy_package_leave().
+  */
+ struct phy_package_shared {
+-	int addr;
++	u8 addr;
+ 	refcount_t refcnt;
+ 	unsigned long flags;
+ 	size_t priv_size;

+ 341 - 0
target/linux/generic/backport-6.6/714-v6.8-02-net-phy-extend-PHY-package-API-to-support-multiple-g.patch

@@ -0,0 +1,341 @@
+From 9eea577eb1155fe4a183bc5e7bf269b0b2e7a6ba Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Fri, 15 Dec 2023 14:15:32 +0100
+Subject: [PATCH 2/4] net: phy: extend PHY package API to support multiple
+ global address
+
+Current API for PHY package are limited to single address to configure
+global settings for the PHY package.
+
+It was found that some PHY package (for example the qca807x, a PHY
+package that is shipped with a bundle of 5 PHY) requires multiple PHY
+address to configure global settings. An example scenario is a PHY that
+have a dedicated PHY for PSGMII/serdes calibrarion and have a specific
+PHY in the package where the global PHY mode is set and affects every
+other PHY in the package.
+
+Change the API in the following way:
+- Change phy_package_join() to take the base addr of the PHY package
+  instead of the global PHY addr.
+- Make __/phy_package_write/read() require an additional arg that
+  select what global PHY address to use by passing the offset from the
+  base addr passed on phy_package_join().
+
+Each user of this API is updated to follow this new implementation
+following a pattern where an enum is defined to declare the offset of the
+addr.
+
+We also drop the check if shared is defined as any user of the
+phy_package_read/write is expected to use phy_package_join first. Misuse
+of this will correctly trigger a kernel panic for NULL pointer
+exception.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/bcm54140.c       | 16 ++++++--
+ drivers/net/phy/mscc/mscc.h      |  5 +++
+ drivers/net/phy/mscc/mscc_main.c |  4 +-
+ drivers/net/phy/phy_device.c     | 35 +++++++++--------
+ include/linux/phy.h              | 64 +++++++++++++++++++++-----------
+ 5 files changed, 80 insertions(+), 44 deletions(-)
+
+--- a/drivers/net/phy/bcm54140.c
++++ b/drivers/net/phy/bcm54140.c
+@@ -128,6 +128,10 @@
+ #define BCM54140_DEFAULT_DOWNSHIFT 5
+ #define BCM54140_MAX_DOWNSHIFT 9
+ 
++enum bcm54140_global_phy {
++	BCM54140_BASE_ADDR = 0,
++};
++
+ struct bcm54140_priv {
+ 	int port;
+ 	int base_addr;
+@@ -429,11 +433,13 @@ static int bcm54140_base_read_rdb(struct
+ 	int ret;
+ 
+ 	phy_lock_mdio_bus(phydev);
+-	ret = __phy_package_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
++	ret = __phy_package_write(phydev, BCM54140_BASE_ADDR,
++				  MII_BCM54XX_RDB_ADDR, rdb);
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	ret = __phy_package_read(phydev, MII_BCM54XX_RDB_DATA);
++	ret = __phy_package_read(phydev, BCM54140_BASE_ADDR,
++				 MII_BCM54XX_RDB_DATA);
+ 
+ out:
+ 	phy_unlock_mdio_bus(phydev);
+@@ -446,11 +452,13 @@ static int bcm54140_base_write_rdb(struc
+ 	int ret;
+ 
+ 	phy_lock_mdio_bus(phydev);
+-	ret = __phy_package_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
++	ret = __phy_package_write(phydev, BCM54140_BASE_ADDR,
++				  MII_BCM54XX_RDB_ADDR, rdb);
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	ret = __phy_package_write(phydev, MII_BCM54XX_RDB_DATA, val);
++	ret = __phy_package_write(phydev, BCM54140_BASE_ADDR,
++				  MII_BCM54XX_RDB_DATA, val);
+ 
+ out:
+ 	phy_unlock_mdio_bus(phydev);
+--- a/drivers/net/phy/mscc/mscc.h
++++ b/drivers/net/phy/mscc/mscc.h
+@@ -416,6 +416,11 @@ struct vsc8531_private {
+  * gpio_lock: used for PHC operations. Common for all PHYs as the load/save GPIO
+  * is shared.
+  */
++
++enum vsc85xx_global_phy {
++	VSC88XX_BASE_ADDR = 0,
++};
++
+ struct vsc85xx_shared_private {
+ 	struct mutex gpio_lock;
+ };
+--- a/drivers/net/phy/mscc/mscc_main.c
++++ b/drivers/net/phy/mscc/mscc_main.c
+@@ -711,7 +711,7 @@ int phy_base_write(struct phy_device *ph
+ 		dump_stack();
+ 	}
+ 
+-	return __phy_package_write(phydev, regnum, val);
++	return __phy_package_write(phydev, VSC88XX_BASE_ADDR, regnum, val);
+ }
+ 
+ /* phydev->bus->mdio_lock should be locked when using this function */
+@@ -722,7 +722,7 @@ int phy_base_read(struct phy_device *phy
+ 		dump_stack();
+ 	}
+ 
+-	return __phy_package_read(phydev, regnum);
++	return __phy_package_read(phydev, VSC88XX_BASE_ADDR, regnum);
+ }
+ 
+ u32 vsc85xx_csr_read(struct phy_device *phydev,
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1658,20 +1658,22 @@ EXPORT_SYMBOL_GPL(phy_driver_is_genphy_1
+ /**
+  * phy_package_join - join a common PHY group
+  * @phydev: target phy_device struct
+- * @addr: cookie and PHY address for global register access
++ * @base_addr: cookie and base PHY address of PHY package for offset
++ *   calculation of global register access
+  * @priv_size: if non-zero allocate this amount of bytes for private data
+  *
+  * This joins a PHY group and provides a shared storage for all phydevs in
+  * this group. This is intended to be used for packages which contain
+  * more than one PHY, for example a quad PHY transceiver.
+  *
+- * The addr parameter serves as a cookie which has to have the same value
+- * for all members of one group and as a PHY address to access generic
+- * registers of a PHY package. Usually, one of the PHY addresses of the
+- * different PHYs in the package provides access to these global registers.
++ * The base_addr parameter serves as cookie which has to have the same values
++ * for all members of one group and as the base PHY address of the PHY package
++ * for offset calculation to access generic registers of a PHY package.
++ * Usually, one of the PHY addresses of the different PHYs in the package
++ * provides access to these global registers.
+  * The address which is given here, will be used in the phy_package_read()
+- * and phy_package_write() convenience functions. If your PHY doesn't have
+- * global registers you can just pick any of the PHY addresses.
++ * and phy_package_write() convenience functions as base and added to the
++ * passed offset in those functions.
+  *
+  * This will set the shared pointer of the phydev to the shared storage.
+  * If this is the first call for a this cookie the shared storage will be
+@@ -1681,17 +1683,17 @@ EXPORT_SYMBOL_GPL(phy_driver_is_genphy_1
+  * Returns < 1 on error, 0 on success. Esp. calling phy_package_join()
+  * with the same cookie but a different priv_size is an error.
+  */
+-int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size)
++int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size)
+ {
+ 	struct mii_bus *bus = phydev->mdio.bus;
+ 	struct phy_package_shared *shared;
+ 	int ret;
+ 
+-	if (addr < 0 || addr >= PHY_MAX_ADDR)
++	if (base_addr < 0 || base_addr >= PHY_MAX_ADDR)
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&bus->shared_lock);
+-	shared = bus->shared[addr];
++	shared = bus->shared[base_addr];
+ 	if (!shared) {
+ 		ret = -ENOMEM;
+ 		shared = kzalloc(sizeof(*shared), GFP_KERNEL);
+@@ -1703,9 +1705,9 @@ int phy_package_join(struct phy_device *
+ 				goto err_free;
+ 			shared->priv_size = priv_size;
+ 		}
+-		shared->addr = addr;
++		shared->base_addr = base_addr;
+ 		refcount_set(&shared->refcnt, 1);
+-		bus->shared[addr] = shared;
++		bus->shared[base_addr] = shared;
+ 	} else {
+ 		ret = -EINVAL;
+ 		if (priv_size && priv_size != shared->priv_size)
+@@ -1743,7 +1745,7 @@ void phy_package_leave(struct phy_device
+ 		return;
+ 
+ 	if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) {
+-		bus->shared[shared->addr] = NULL;
++		bus->shared[shared->base_addr] = NULL;
+ 		mutex_unlock(&bus->shared_lock);
+ 		kfree(shared->priv);
+ 		kfree(shared);
+@@ -1762,7 +1764,8 @@ static void devm_phy_package_leave(struc
+  * devm_phy_package_join - resource managed phy_package_join()
+  * @dev: device that is registering this PHY package
+  * @phydev: target phy_device struct
+- * @addr: cookie and PHY address for global register access
++ * @base_addr: cookie and base PHY address of PHY package for offset
++ *   calculation of global register access
+  * @priv_size: if non-zero allocate this amount of bytes for private data
+  *
+  * Managed phy_package_join(). Shared storage fetched by this function,
+@@ -1770,7 +1773,7 @@ static void devm_phy_package_leave(struc
+  * phy_package_join() for more information.
+  */
+ int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
+-			  int addr, size_t priv_size)
++			  int base_addr, size_t priv_size)
+ {
+ 	struct phy_device **ptr;
+ 	int ret;
+@@ -1780,7 +1783,7 @@ int devm_phy_package_join(struct device
+ 	if (!ptr)
+ 		return -ENOMEM;
+ 
+-	ret = phy_package_join(phydev, addr, priv_size);
++	ret = phy_package_join(phydev, base_addr, priv_size);
+ 
+ 	if (!ret) {
+ 		*ptr = phydev;
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -327,7 +327,8 @@ struct mdio_bus_stats {
+ 
+ /**
+  * struct phy_package_shared - Shared information in PHY packages
+- * @addr: Common PHY address used to combine PHYs in one package
++ * @base_addr: Base PHY address of PHY package used to combine PHYs
++ *   in one package and for offset calculation of phy_package_read/write
+  * @refcnt: Number of PHYs connected to this shared data
+  * @flags: Initialization of PHY package
+  * @priv_size: Size of the shared private data @priv
+@@ -338,7 +339,7 @@ struct mdio_bus_stats {
+  * phy_package_leave().
+  */
+ struct phy_package_shared {
+-	u8 addr;
++	u8 base_addr;
+ 	refcount_t refcnt;
+ 	unsigned long flags;
+ 	size_t priv_size;
+@@ -1969,10 +1970,10 @@ int phy_ethtool_get_link_ksettings(struc
+ int phy_ethtool_set_link_ksettings(struct net_device *ndev,
+ 				   const struct ethtool_link_ksettings *cmd);
+ int phy_ethtool_nway_reset(struct net_device *ndev);
+-int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size);
++int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size);
+ void phy_package_leave(struct phy_device *phydev);
+ int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
+-			  int addr, size_t priv_size);
++			  int base_addr, size_t priv_size);
+ 
+ int __init mdio_bus_init(void);
+ void mdio_bus_exit(void);
+@@ -1995,46 +1996,65 @@ int __phy_hwtstamp_set(struct phy_device
+ 		       struct kernel_hwtstamp_config *config,
+ 		       struct netlink_ext_ack *extack);
+ 
+-static inline int phy_package_read(struct phy_device *phydev, u32 regnum)
++static inline int phy_package_address(struct phy_device *phydev,
++				      unsigned int addr_offset)
+ {
+ 	struct phy_package_shared *shared = phydev->shared;
++	u8 base_addr = shared->base_addr;
+ 
+-	if (!shared)
++	if (addr_offset >= PHY_MAX_ADDR - base_addr)
+ 		return -EIO;
+ 
+-	return mdiobus_read(phydev->mdio.bus, shared->addr, regnum);
++	/* we know that addr will be in the range 0..31 and thus the
++	 * implicit cast to a signed int is not a problem.
++	 */
++	return base_addr + addr_offset;
+ }
+ 
+-static inline int __phy_package_read(struct phy_device *phydev, u32 regnum)
++static inline int phy_package_read(struct phy_device *phydev,
++				   unsigned int addr_offset, u32 regnum)
+ {
+-	struct phy_package_shared *shared = phydev->shared;
++	int addr = phy_package_address(phydev, addr_offset);
+ 
+-	if (!shared)
+-		return -EIO;
++	if (addr < 0)
++		return addr;
++
++	return mdiobus_read(phydev->mdio.bus, addr, regnum);
++}
++
++static inline int __phy_package_read(struct phy_device *phydev,
++				     unsigned int addr_offset, u32 regnum)
++{
++	int addr = phy_package_address(phydev, addr_offset);
++
++	if (addr < 0)
++		return addr;
+ 
+-	return __mdiobus_read(phydev->mdio.bus, shared->addr, regnum);
++	return __mdiobus_read(phydev->mdio.bus, addr, regnum);
+ }
+ 
+ static inline int phy_package_write(struct phy_device *phydev,
+-				    u32 regnum, u16 val)
++				    unsigned int addr_offset, u32 regnum,
++				    u16 val)
+ {
+-	struct phy_package_shared *shared = phydev->shared;
++	int addr = phy_package_address(phydev, addr_offset);
+ 
+-	if (!shared)
+-		return -EIO;
++	if (addr < 0)
++		return addr;
+ 
+-	return mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val);
++	return mdiobus_write(phydev->mdio.bus, addr, regnum, val);
+ }
+ 
+ static inline int __phy_package_write(struct phy_device *phydev,
+-				      u32 regnum, u16 val)
++				      unsigned int addr_offset, u32 regnum,
++				      u16 val)
+ {
+-	struct phy_package_shared *shared = phydev->shared;
++	int addr = phy_package_address(phydev, addr_offset);
+ 
+-	if (!shared)
+-		return -EIO;
++	if (addr < 0)
++		return addr;
+ 
+-	return __mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val);
++	return __mdiobus_write(phydev->mdio.bus, addr, regnum, val);
+ }
+ 
+ static inline bool __phy_package_set_once(struct phy_device *phydev,

+ 116 - 0
target/linux/generic/backport-6.6/714-v6.8-03-net-phy-restructure-__phy_write-read_mmd-to-helper-a.patch

@@ -0,0 +1,116 @@
+From 028672bd1d73cf65249a420c1de75e8d2acd2f6a Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Fri, 15 Dec 2023 14:15:33 +0100
+Subject: [PATCH 3/4] net: phy: restructure __phy_write/read_mmd to helper and
+ phydev user
+
+Restructure phy_write_mmd and phy_read_mmd to implement generic helper
+for direct mdiobus access for mmd and use these helper for phydev user.
+
+This is needed in preparation of PHY package API that requires generic
+access to the mdiobus and are deatched from phydev struct but instead
+access them based on PHY package base_addr and offsets.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Russell King (Oracle) <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/phy-core.c | 64 ++++++++++++++++++--------------------
+ 1 file changed, 30 insertions(+), 34 deletions(-)
+
+--- a/drivers/net/phy/phy-core.c
++++ b/drivers/net/phy/phy-core.c
+@@ -540,6 +540,28 @@ static void mmd_phy_indirect(struct mii_
+ 			devad | MII_MMD_CTRL_NOINCR);
+ }
+ 
++static int mmd_phy_read(struct mii_bus *bus, int phy_addr, bool is_c45,
++			int devad, u32 regnum)
++{
++	if (is_c45)
++		return __mdiobus_c45_read(bus, phy_addr, devad, regnum);
++
++	mmd_phy_indirect(bus, phy_addr, devad, regnum);
++	/* Read the content of the MMD's selected register */
++	return __mdiobus_read(bus, phy_addr, MII_MMD_DATA);
++}
++
++static int mmd_phy_write(struct mii_bus *bus, int phy_addr, bool is_c45,
++			 int devad, u32 regnum, u16 val)
++{
++	if (is_c45)
++		return __mdiobus_c45_write(bus, phy_addr, devad, regnum, val);
++
++	mmd_phy_indirect(bus, phy_addr, devad, regnum);
++	/* Write the data into MMD's selected register */
++	return __mdiobus_write(bus, phy_addr, MII_MMD_DATA, val);
++}
++
+ /**
+  * __phy_read_mmd - Convenience function for reading a register
+  * from an MMD on a given PHY.
+@@ -551,26 +573,14 @@ static void mmd_phy_indirect(struct mii_
+  */
+ int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
+ {
+-	int val;
+-
+ 	if (regnum > (u16)~0 || devad > 32)
+ 		return -EINVAL;
+ 
+-	if (phydev->drv && phydev->drv->read_mmd) {
+-		val = phydev->drv->read_mmd(phydev, devad, regnum);
+-	} else if (phydev->is_c45) {
+-		val = __mdiobus_c45_read(phydev->mdio.bus, phydev->mdio.addr,
+-					 devad, regnum);
+-	} else {
+-		struct mii_bus *bus = phydev->mdio.bus;
+-		int phy_addr = phydev->mdio.addr;
+-
+-		mmd_phy_indirect(bus, phy_addr, devad, regnum);
+-
+-		/* Read the content of the MMD's selected register */
+-		val = __mdiobus_read(bus, phy_addr, MII_MMD_DATA);
+-	}
+-	return val;
++	if (phydev->drv && phydev->drv->read_mmd)
++		return phydev->drv->read_mmd(phydev, devad, regnum);
++
++	return mmd_phy_read(phydev->mdio.bus, phydev->mdio.addr,
++			    phydev->is_c45, devad, regnum);
+ }
+ EXPORT_SYMBOL(__phy_read_mmd);
+ 
+@@ -607,28 +617,14 @@ EXPORT_SYMBOL(phy_read_mmd);
+  */
+ int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
+ {
+-	int ret;
+-
+ 	if (regnum > (u16)~0 || devad > 32)
+ 		return -EINVAL;
+ 
+-	if (phydev->drv && phydev->drv->write_mmd) {
+-		ret = phydev->drv->write_mmd(phydev, devad, regnum, val);
+-	} else if (phydev->is_c45) {
+-		ret = __mdiobus_c45_write(phydev->mdio.bus, phydev->mdio.addr,
+-					  devad, regnum, val);
+-	} else {
+-		struct mii_bus *bus = phydev->mdio.bus;
+-		int phy_addr = phydev->mdio.addr;
++	if (phydev->drv && phydev->drv->write_mmd)
++		return phydev->drv->write_mmd(phydev, devad, regnum, val);
+ 
+-		mmd_phy_indirect(bus, phy_addr, devad, regnum);
+-
+-		/* Write the data into MMD's selected register */
+-		__mdiobus_write(bus, phy_addr, MII_MMD_DATA, val);
+-
+-		ret = 0;
+-	}
+-	return ret;
++	return mmd_phy_write(phydev->mdio.bus, phydev->mdio.addr,
++			     phydev->is_c45, devad, regnum, val);
+ }
+ EXPORT_SYMBOL(__phy_write_mmd);
+ 

+ 196 - 0
target/linux/generic/backport-6.6/714-v6.8-04-net-phy-add-support-for-PHY-package-MMD-read-write.patch

@@ -0,0 +1,196 @@
+From d63710fc0f1a501fd75a7025e3070a96ffa1645f Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Fri, 15 Dec 2023 14:15:34 +0100
+Subject: [PATCH 4/4] net: phy: add support for PHY package MMD read/write
+
+Some PHY in PHY package may require to read/write MMD regs to correctly
+configure the PHY package.
+
+Add support for these additional required function in both lock and no
+lock variant.
+
+It's assumed that the entire PHY package is either C22 or C45. We use
+C22 or C45 way of writing/reading to mmd regs based on the passed phydev
+whether it's C22 or C45.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/phy-core.c | 140 +++++++++++++++++++++++++++++++++++++
+ include/linux/phy.h        |  16 +++++
+ 2 files changed, 156 insertions(+)
+
+--- a/drivers/net/phy/phy-core.c
++++ b/drivers/net/phy/phy-core.c
+@@ -651,6 +651,146 @@ int phy_write_mmd(struct phy_device *phy
+ EXPORT_SYMBOL(phy_write_mmd);
+ 
+ /**
++ * __phy_package_read_mmd - read MMD reg relative to PHY package base addr
++ * @phydev: The phy_device struct
++ * @addr_offset: The offset to be added to PHY package base_addr
++ * @devad: The MMD to read from
++ * @regnum: The register on the MMD to read
++ *
++ * Convenience helper for reading a register of an MMD on a given PHY
++ * using the PHY package base address. The base address is added to
++ * the addr_offset value.
++ *
++ * Same calling rules as for __phy_read();
++ *
++ * NOTE: It's assumed that the entire PHY package is either C22 or C45.
++ */
++int __phy_package_read_mmd(struct phy_device *phydev,
++			   unsigned int addr_offset, int devad,
++			   u32 regnum)
++{
++	int addr = phy_package_address(phydev, addr_offset);
++
++	if (addr < 0)
++		return addr;
++
++	if (regnum > (u16)~0 || devad > 32)
++		return -EINVAL;
++
++	return mmd_phy_read(phydev->mdio.bus, addr, phydev->is_c45, devad,
++			    regnum);
++}
++EXPORT_SYMBOL(__phy_package_read_mmd);
++
++/**
++ * phy_package_read_mmd - read MMD reg relative to PHY package base addr
++ * @phydev: The phy_device struct
++ * @addr_offset: The offset to be added to PHY package base_addr
++ * @devad: The MMD to read from
++ * @regnum: The register on the MMD to read
++ *
++ * Convenience helper for reading a register of an MMD on a given PHY
++ * using the PHY package base address. The base address is added to
++ * the addr_offset value.
++ *
++ * Same calling rules as for phy_read();
++ *
++ * NOTE: It's assumed that the entire PHY package is either C22 or C45.
++ */
++int phy_package_read_mmd(struct phy_device *phydev,
++			 unsigned int addr_offset, int devad,
++			 u32 regnum)
++{
++	int addr = phy_package_address(phydev, addr_offset);
++	int val;
++
++	if (addr < 0)
++		return addr;
++
++	if (regnum > (u16)~0 || devad > 32)
++		return -EINVAL;
++
++	phy_lock_mdio_bus(phydev);
++	val = mmd_phy_read(phydev->mdio.bus, addr, phydev->is_c45, devad,
++			   regnum);
++	phy_unlock_mdio_bus(phydev);
++
++	return val;
++}
++EXPORT_SYMBOL(phy_package_read_mmd);
++
++/**
++ * __phy_package_write_mmd - write MMD reg relative to PHY package base addr
++ * @phydev: The phy_device struct
++ * @addr_offset: The offset to be added to PHY package base_addr
++ * @devad: The MMD to write to
++ * @regnum: The register on the MMD to write
++ * @val: value to write to @regnum
++ *
++ * Convenience helper for writing a register of an MMD on a given PHY
++ * using the PHY package base address. The base address is added to
++ * the addr_offset value.
++ *
++ * Same calling rules as for __phy_write();
++ *
++ * NOTE: It's assumed that the entire PHY package is either C22 or C45.
++ */
++int __phy_package_write_mmd(struct phy_device *phydev,
++			    unsigned int addr_offset, int devad,
++			    u32 regnum, u16 val)
++{
++	int addr = phy_package_address(phydev, addr_offset);
++
++	if (addr < 0)
++		return addr;
++
++	if (regnum > (u16)~0 || devad > 32)
++		return -EINVAL;
++
++	return mmd_phy_write(phydev->mdio.bus, addr, phydev->is_c45, devad,
++			     regnum, val);
++}
++EXPORT_SYMBOL(__phy_package_write_mmd);
++
++/**
++ * phy_package_write_mmd - write MMD reg relative to PHY package base addr
++ * @phydev: The phy_device struct
++ * @addr_offset: The offset to be added to PHY package base_addr
++ * @devad: The MMD to write to
++ * @regnum: The register on the MMD to write
++ * @val: value to write to @regnum
++ *
++ * Convenience helper for writing a register of an MMD on a given PHY
++ * using the PHY package base address. The base address is added to
++ * the addr_offset value.
++ *
++ * Same calling rules as for phy_write();
++ *
++ * NOTE: It's assumed that the entire PHY package is either C22 or C45.
++ */
++int phy_package_write_mmd(struct phy_device *phydev,
++			  unsigned int addr_offset, int devad,
++			  u32 regnum, u16 val)
++{
++	int addr = phy_package_address(phydev, addr_offset);
++	int ret;
++
++	if (addr < 0)
++		return addr;
++
++	if (regnum > (u16)~0 || devad > 32)
++		return -EINVAL;
++
++	phy_lock_mdio_bus(phydev);
++	ret = mmd_phy_write(phydev->mdio.bus, addr, phydev->is_c45, devad,
++			    regnum, val);
++	phy_unlock_mdio_bus(phydev);
++
++	return ret;
++}
++EXPORT_SYMBOL(phy_package_write_mmd);
++
++/**
+  * phy_modify_changed - Function for modifying a PHY register
+  * @phydev: the phy_device struct
+  * @regnum: register number to modify
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -2057,6 +2057,22 @@ static inline int __phy_package_write(st
+ 	return __mdiobus_write(phydev->mdio.bus, addr, regnum, val);
+ }
+ 
++int __phy_package_read_mmd(struct phy_device *phydev,
++			   unsigned int addr_offset, int devad,
++			   u32 regnum);
++
++int phy_package_read_mmd(struct phy_device *phydev,
++			 unsigned int addr_offset, int devad,
++			 u32 regnum);
++
++int __phy_package_write_mmd(struct phy_device *phydev,
++			    unsigned int addr_offset, int devad,
++			    u32 regnum, u16 val);
++
++int phy_package_write_mmd(struct phy_device *phydev,
++			  unsigned int addr_offset, int devad,
++			  u32 regnum, u16 val);
++
+ static inline bool __phy_package_set_once(struct phy_device *phydev,
+ 					  unsigned int b)
+ {

+ 36 - 0
target/linux/generic/backport-6.6/715-v6.9-01-net-phy-qcom-qca808x-fix-logic-error-in-LED-brightne.patch

@@ -0,0 +1,36 @@
+From f2ec98566775dd4341ec1dcf93aa5859c60de826 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Thu, 1 Feb 2024 14:46:00 +0100
+Subject: [PATCH 1/2] net: phy: qcom: qca808x: fix logic error in LED
+ brightness set
+
+In switching to using phy_modify_mmd and a more short version of the
+LED ON/OFF condition in later revision, it was made a logic error where
+
+value ? QCA808X_LED_FORCE_ON : QCA808X_LED_FORCE_OFF is always true as
+value is always OR with QCA808X_LED_FORCE_EN due to missing ()
+resulting in the testing condition being QCA808X_LED_FORCE_EN | value.
+
+Add the () to apply the correct condition and restore correct
+functionality of the brightness ON/OFF.
+
+Fixes: 7196062b64ee ("net: phy: at803x: add LED support for qca808x")
+Signed-off-by: Christian Marangi <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/qcom/qca808x.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/phy/qcom/qca808x.c
++++ b/drivers/net/phy/qcom/qca808x.c
+@@ -820,8 +820,8 @@ static int qca808x_led_brightness_set(st
+ 
+ 	return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+ 			      QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
+-			      QCA808X_LED_FORCE_EN | value ? QCA808X_LED_FORCE_ON :
+-							     QCA808X_LED_FORCE_OFF);
++			      QCA808X_LED_FORCE_EN | (value ? QCA808X_LED_FORCE_ON :
++							     QCA808X_LED_FORCE_OFF));
+ }
+ 
+ static int qca808x_led_blink_set(struct phy_device *phydev, u8 index,

+ 41 - 0
target/linux/generic/backport-6.6/715-v6.9-02-net-phy-qcom-qca808x-default-to-LED-active-High-if-n.patch

@@ -0,0 +1,41 @@
+From f203c8c77c7616c099647636f4c67d59a45fe8a2 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Thu, 1 Feb 2024 14:46:01 +0100
+Subject: [PATCH 2/2] net: phy: qcom: qca808x: default to LED active High if
+ not set
+
+qca808x PHY provide support for the led_polarity_set OP to configure
+and apply the active-low property but on PHY reset, the Active High bit
+is not set resulting in the LED driven as active-low.
+
+To fix this, check if active-low is not set in DT and enable Active High
+polarity by default to restore correct funcionality of the LED.
+
+Fixes: 7196062b64ee ("net: phy: at803x: add LED support for qca808x")
+Signed-off-by: Christian Marangi <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/qcom/qca808x.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/net/phy/qcom/qca808x.c
++++ b/drivers/net/phy/qcom/qca808x.c
+@@ -290,8 +290,18 @@ static int qca808x_probe(struct phy_devi
+ 
+ static int qca808x_config_init(struct phy_device *phydev)
+ {
++	struct qca808x_priv *priv = phydev->priv;
+ 	int ret;
+ 
++	/* Default to LED Active High if active-low not in DT */
++	if (priv->led_polarity_mode == -1) {
++		ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN,
++				       QCA808X_MMD7_LED_POLARITY_CTRL,
++				       QCA808X_LED_ACTIVE_HIGH);
++		if (ret)
++			return ret;
++	}
++
+ 	/* Active adc&vga on 802.3az for the link 1000M and 100M */
+ 	ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7,
+ 			     QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);

+ 211 - 0
target/linux/generic/backport-6.6/716-v6.9-02-net-phy-add-support-for-scanning-PHY-in-PHY-packages.patch

@@ -0,0 +1,211 @@
+From 385ef48f468696d6d172eb367656a3466fa0408d Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Tue, 6 Feb 2024 18:31:05 +0100
+Subject: [PATCH 02/10] net: phy: add support for scanning PHY in PHY packages
+ nodes
+
+Add support for scanning PHY in PHY package nodes. PHY packages nodes
+are just container for actual PHY on the MDIO bus.
+
+Their PHY address defined in the PHY package node are absolute and
+reflect the address on the MDIO bus.
+
+mdio_bus.c and of_mdio.c is updated to now support and parse also
+PHY package subnode by checking if the node name match
+"ethernet-phy-package".
+
+As PHY package reg is mandatory and each PHY in the PHY package must
+have a reg, every invalid PHY Package node is ignored and will be
+skipped by the autoscan fallback.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/mdio/of_mdio.c | 79 +++++++++++++++++++++++++++-----------
+ drivers/net/phy/mdio_bus.c | 44 +++++++++++++++++----
+ 2 files changed, 92 insertions(+), 31 deletions(-)
+
+--- a/drivers/net/mdio/of_mdio.c
++++ b/drivers/net/mdio/of_mdio.c
+@@ -138,6 +138,53 @@ bool of_mdiobus_child_is_phy(struct devi
+ }
+ EXPORT_SYMBOL(of_mdiobus_child_is_phy);
+ 
++static int __of_mdiobus_parse_phys(struct mii_bus *mdio, struct device_node *np,
++				   bool *scanphys)
++{
++	struct device_node *child;
++	int addr, rc = 0;
++
++	/* Loop over the child nodes and register a phy_device for each phy */
++	for_each_available_child_of_node(np, child) {
++		if (of_node_name_eq(child, "ethernet-phy-package")) {
++			/* Ignore invalid ethernet-phy-package node */
++			if (!of_find_property(child, "reg", NULL))
++				continue;
++
++			rc = __of_mdiobus_parse_phys(mdio, child, NULL);
++			if (rc && rc != -ENODEV)
++				goto exit;
++
++			continue;
++		}
++
++		addr = of_mdio_parse_addr(&mdio->dev, child);
++		if (addr < 0) {
++			/* Skip scanning for invalid ethernet-phy-package node */
++			if (scanphys)
++				*scanphys = true;
++			continue;
++		}
++
++		if (of_mdiobus_child_is_phy(child))
++			rc = of_mdiobus_register_phy(mdio, child, addr);
++		else
++			rc = of_mdiobus_register_device(mdio, child, addr);
++
++		if (rc == -ENODEV)
++			dev_err(&mdio->dev,
++				"MDIO device at address %d is missing.\n",
++				addr);
++		else if (rc)
++			goto exit;
++	}
++
++	return 0;
++exit:
++	of_node_put(child);
++	return rc;
++}
++
+ /**
+  * __of_mdiobus_register - Register mii_bus and create PHYs from the device tree
+  * @mdio: pointer to mii_bus structure
+@@ -179,33 +226,18 @@ int __of_mdiobus_register(struct mii_bus
+ 		return rc;
+ 
+ 	/* Loop over the child nodes and register a phy_device for each phy */
+-	for_each_available_child_of_node(np, child) {
+-		addr = of_mdio_parse_addr(&mdio->dev, child);
+-		if (addr < 0) {
+-			scanphys = true;
+-			continue;
+-		}
+-
+-		if (of_mdiobus_child_is_phy(child))
+-			rc = of_mdiobus_register_phy(mdio, child, addr);
+-		else
+-			rc = of_mdiobus_register_device(mdio, child, addr);
+-
+-		if (rc == -ENODEV)
+-			dev_err(&mdio->dev,
+-				"MDIO device at address %d is missing.\n",
+-				addr);
+-		else if (rc)
+-			goto unregister;
+-	}
++	rc = __of_mdiobus_parse_phys(mdio, np, &scanphys);
++	if (rc)
++		goto unregister;
+ 
+ 	if (!scanphys)
+ 		return 0;
+ 
+ 	/* auto scan for PHYs with empty reg property */
+ 	for_each_available_child_of_node(np, child) {
+-		/* Skip PHYs with reg property set */
+-		if (of_property_present(child, "reg"))
++		/* Skip PHYs with reg property set or ethernet-phy-package node */
++		if (of_property_present(child, "reg") ||
++		    of_node_name_eq(child, "ethernet-phy-package"))
+ 			continue;
+ 
+ 		for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+@@ -226,15 +258,16 @@ int __of_mdiobus_register(struct mii_bus
+ 				if (!rc)
+ 					break;
+ 				if (rc != -ENODEV)
+-					goto unregister;
++					goto put_unregister;
+ 			}
+ 		}
+ 	}
+ 
+ 	return 0;
+ 
+-unregister:
++put_unregister:
+ 	of_node_put(child);
++unregister:
+ 	mdiobus_unregister(mdio);
+ 	return rc;
+ }
+--- a/drivers/net/phy/mdio_bus.c
++++ b/drivers/net/phy/mdio_bus.c
+@@ -455,19 +455,34 @@ EXPORT_SYMBOL(of_mdio_find_bus);
+  * found, set the of_node pointer for the mdio device. This allows
+  * auto-probed phy devices to be supplied with information passed in
+  * via DT.
++ * If a PHY package is found, PHY is searched also there.
+  */
+-static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
+-				    struct mdio_device *mdiodev)
++static int of_mdiobus_find_phy(struct device *dev, struct mdio_device *mdiodev,
++			       struct device_node *np)
+ {
+-	struct device *dev = &mdiodev->dev;
+ 	struct device_node *child;
+ 
+-	if (dev->of_node || !bus->dev.of_node)
+-		return;
+-
+-	for_each_available_child_of_node(bus->dev.of_node, child) {
++	for_each_available_child_of_node(np, child) {
+ 		int addr;
+ 
++		if (of_node_name_eq(child, "ethernet-phy-package")) {
++			/* Validate PHY package reg presence */
++			if (!of_find_property(child, "reg", NULL)) {
++				of_node_put(child);
++				return -EINVAL;
++			}
++
++			if (!of_mdiobus_find_phy(dev, mdiodev, child)) {
++				/* The refcount for the PHY package will be
++				 * incremented later when PHY join the Package.
++				 */
++				of_node_put(child);
++				return 0;
++			}
++
++			continue;
++		}
++
+ 		addr = of_mdio_parse_addr(dev, child);
+ 		if (addr < 0)
+ 			continue;
+@@ -477,9 +492,22 @@ static void of_mdiobus_link_mdiodev(stru
+ 			/* The refcount on "child" is passed to the mdio
+ 			 * device. Do _not_ use of_node_put(child) here.
+ 			 */
+-			return;
++			return 0;
+ 		}
+ 	}
++
++	return -ENODEV;
++}
++
++static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
++				    struct mdio_device *mdiodev)
++{
++	struct device *dev = &mdiodev->dev;
++
++	if (dev->of_node || !bus->dev.of_node)
++		return;
++
++	of_mdiobus_find_phy(dev, mdiodev, bus->dev.of_node);
+ }
+ #else /* !IS_ENABLED(CONFIG_OF_MDIO) */
+ static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio,

+ 185 - 0
target/linux/generic/backport-6.6/716-v6.9-03-net-phy-add-devm-of_phy_package_join-helper.patch

@@ -0,0 +1,185 @@
+From 471e8fd3afcef5a9f9089f0bd21965ad9ba35c91 Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Tue, 6 Feb 2024 18:31:06 +0100
+Subject: [PATCH 03/10] net: phy: add devm/of_phy_package_join helper
+
+Add devm/of_phy_package_join helper to join PHYs in a PHY package. These
+are variant of the manual phy_package_join with the difference that
+these will use DT nodes to derive the base_addr instead of manually
+passing an hardcoded value.
+
+An additional value is added in phy_package_shared, "np" to reference
+the PHY package node pointer in specific PHY driver probe_once and
+config_init_once functions to make use of additional specific properties
+defined in the PHY package node in DT.
+
+The np value is filled only with of_phy_package_join if a valid PHY
+package node is found. A valid PHY package node must have the node name
+set to "ethernet-phy-package".
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/phy_device.c | 96 ++++++++++++++++++++++++++++++++++++
+ include/linux/phy.h          |  6 +++
+ 2 files changed, 102 insertions(+)
+
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1706,6 +1706,7 @@ int phy_package_join(struct phy_device *
+ 			shared->priv_size = priv_size;
+ 		}
+ 		shared->base_addr = base_addr;
++		shared->np = NULL;
+ 		refcount_set(&shared->refcnt, 1);
+ 		bus->shared[base_addr] = shared;
+ 	} else {
+@@ -1729,6 +1730,63 @@ err_unlock:
+ EXPORT_SYMBOL_GPL(phy_package_join);
+ 
+ /**
++ * of_phy_package_join - join a common PHY group in PHY package
++ * @phydev: target phy_device struct
++ * @priv_size: if non-zero allocate this amount of bytes for private data
++ *
++ * This is a variant of phy_package_join for PHY package defined in DT.
++ *
++ * The parent node of the @phydev is checked as a valid PHY package node
++ * structure (by matching the node name "ethernet-phy-package") and the
++ * base_addr for the PHY package is passed to phy_package_join.
++ *
++ * With this configuration the shared struct will also have the np value
++ * filled to use additional DT defined properties in PHY specific
++ * probe_once and config_init_once PHY package OPs.
++ *
++ * Returns < 0 on error, 0 on success. Esp. calling phy_package_join()
++ * with the same cookie but a different priv_size is an error. Or a parent
++ * node is not detected or is not valid or doesn't match the expected node
++ * name for PHY package.
++ */
++int of_phy_package_join(struct phy_device *phydev, size_t priv_size)
++{
++	struct device_node *node = phydev->mdio.dev.of_node;
++	struct device_node *package_node;
++	u32 base_addr;
++	int ret;
++
++	if (!node)
++		return -EINVAL;
++
++	package_node = of_get_parent(node);
++	if (!package_node)
++		return -EINVAL;
++
++	if (!of_node_name_eq(package_node, "ethernet-phy-package")) {
++		ret = -EINVAL;
++		goto exit;
++	}
++
++	if (of_property_read_u32(package_node, "reg", &base_addr)) {
++		ret = -EINVAL;
++		goto exit;
++	}
++
++	ret = phy_package_join(phydev, base_addr, priv_size);
++	if (ret)
++		goto exit;
++
++	phydev->shared->np = package_node;
++
++	return 0;
++exit:
++	of_node_put(package_node);
++	return ret;
++}
++EXPORT_SYMBOL_GPL(of_phy_package_join);
++
++/**
+  * phy_package_leave - leave a common PHY group
+  * @phydev: target phy_device struct
+  *
+@@ -1744,6 +1802,10 @@ void phy_package_leave(struct phy_device
+ 	if (!shared)
+ 		return;
+ 
++	/* Decrease the node refcount on leave if present */
++	if (shared->np)
++		of_node_put(shared->np);
++
+ 	if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) {
+ 		bus->shared[shared->base_addr] = NULL;
+ 		mutex_unlock(&bus->shared_lock);
+@@ -1797,6 +1859,40 @@ int devm_phy_package_join(struct device
+ EXPORT_SYMBOL_GPL(devm_phy_package_join);
+ 
+ /**
++ * devm_of_phy_package_join - resource managed of_phy_package_join()
++ * @dev: device that is registering this PHY package
++ * @phydev: target phy_device struct
++ * @priv_size: if non-zero allocate this amount of bytes for private data
++ *
++ * Managed of_phy_package_join(). Shared storage fetched by this function,
++ * phy_package_leave() is automatically called on driver detach. See
++ * of_phy_package_join() for more information.
++ */
++int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
++			     size_t priv_size)
++{
++	struct phy_device **ptr;
++	int ret;
++
++	ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
++			   GFP_KERNEL);
++	if (!ptr)
++		return -ENOMEM;
++
++	ret = of_phy_package_join(phydev, priv_size);
++
++	if (!ret) {
++		*ptr = phydev;
++		devres_add(dev, ptr);
++	} else {
++		devres_free(ptr);
++	}
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(devm_of_phy_package_join);
++
++/**
+  * phy_detach - detach a PHY device from its network device
+  * @phydev: target phy_device struct
+  *
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -329,6 +329,7 @@ struct mdio_bus_stats {
+  * struct phy_package_shared - Shared information in PHY packages
+  * @base_addr: Base PHY address of PHY package used to combine PHYs
+  *   in one package and for offset calculation of phy_package_read/write
++ * @np: Pointer to the Device Node if PHY package defined in DT
+  * @refcnt: Number of PHYs connected to this shared data
+  * @flags: Initialization of PHY package
+  * @priv_size: Size of the shared private data @priv
+@@ -340,6 +341,8 @@ struct mdio_bus_stats {
+  */
+ struct phy_package_shared {
+ 	u8 base_addr;
++	/* With PHY package defined in DT this points to the PHY package node */
++	struct device_node *np;
+ 	refcount_t refcnt;
+ 	unsigned long flags;
+ 	size_t priv_size;
+@@ -1971,9 +1974,12 @@ int phy_ethtool_set_link_ksettings(struc
+ 				   const struct ethtool_link_ksettings *cmd);
+ int phy_ethtool_nway_reset(struct net_device *ndev);
+ int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size);
++int of_phy_package_join(struct phy_device *phydev, size_t priv_size);
+ void phy_package_leave(struct phy_device *phydev);
+ int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
+ 			  int base_addr, size_t priv_size);
++int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
++			     size_t priv_size);
+ 
+ int __init mdio_bus_init(void);
+ void mdio_bus_exit(void);

+ 583 - 0
target/linux/generic/backport-6.6/716-v6.9-04-net-phy-qcom-move-more-function-to-shared-library.patch

@@ -0,0 +1,583 @@
+From 737eb75a815f9c08dcbb6631db57f4f4b0540a5b Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Tue, 6 Feb 2024 18:31:07 +0100
+Subject: [PATCH 04/10] net: phy: qcom: move more function to shared library
+
+Move more function to shared library in preparation for introduction of
+new PHY Family qca807x that will make use of both functions from at803x
+and qca808x as it's a transition PHY with some implementation of at803x
+and some from the new qca808x.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Reviewed-by: Andrew Lunn <[email protected]>
+Signed-off-by: David S. Miller <[email protected]>
+---
+ drivers/net/phy/qcom/at803x.c       |  35 -----
+ drivers/net/phy/qcom/qca808x.c      | 205 ----------------------------
+ drivers/net/phy/qcom/qcom-phy-lib.c | 193 ++++++++++++++++++++++++++
+ drivers/net/phy/qcom/qcom.h         |  51 +++++++
+ 4 files changed, 244 insertions(+), 240 deletions(-)
+
+--- a/drivers/net/phy/qcom/at803x.c
++++ b/drivers/net/phy/qcom/at803x.c
+@@ -504,41 +504,6 @@ static void at803x_link_change_notify(st
+ 	}
+ }
+ 
+-static int at803x_read_status(struct phy_device *phydev)
+-{
+-	struct at803x_ss_mask ss_mask = { 0 };
+-	int err, old_link = phydev->link;
+-
+-	/* Update the link, but return if there was an error */
+-	err = genphy_update_link(phydev);
+-	if (err)
+-		return err;
+-
+-	/* why bother the PHY if nothing can have changed */
+-	if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+-		return 0;
+-
+-	phydev->speed = SPEED_UNKNOWN;
+-	phydev->duplex = DUPLEX_UNKNOWN;
+-	phydev->pause = 0;
+-	phydev->asym_pause = 0;
+-
+-	err = genphy_read_lpa(phydev);
+-	if (err < 0)
+-		return err;
+-
+-	ss_mask.speed_mask = AT803X_SS_SPEED_MASK;
+-	ss_mask.speed_shift = __bf_shf(AT803X_SS_SPEED_MASK);
+-	err = at803x_read_specific_status(phydev, ss_mask);
+-	if (err < 0)
+-		return err;
+-
+-	if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
+-		phy_resolve_aneg_pause(phydev);
+-
+-	return 0;
+-}
+-
+ static int at803x_config_aneg(struct phy_device *phydev)
+ {
+ 	struct at803x_priv *priv = phydev->priv;
+--- a/drivers/net/phy/qcom/qca808x.c
++++ b/drivers/net/phy/qcom/qca808x.c
+@@ -2,7 +2,6 @@
+ 
+ #include <linux/phy.h>
+ #include <linux/module.h>
+-#include <linux/ethtool_netlink.h>
+ 
+ #include "qcom.h"
+ 
+@@ -63,55 +62,6 @@
+ #define QCA808X_DBG_AN_TEST			0xb
+ #define QCA808X_HIBERNATION_EN			BIT(15)
+ 
+-#define QCA808X_CDT_ENABLE_TEST			BIT(15)
+-#define QCA808X_CDT_INTER_CHECK_DIS		BIT(13)
+-#define QCA808X_CDT_STATUS			BIT(11)
+-#define QCA808X_CDT_LENGTH_UNIT			BIT(10)
+-
+-#define QCA808X_MMD3_CDT_STATUS			0x8064
+-#define QCA808X_MMD3_CDT_DIAG_PAIR_A		0x8065
+-#define QCA808X_MMD3_CDT_DIAG_PAIR_B		0x8066
+-#define QCA808X_MMD3_CDT_DIAG_PAIR_C		0x8067
+-#define QCA808X_MMD3_CDT_DIAG_PAIR_D		0x8068
+-#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT	GENMASK(15, 8)
+-#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT	GENMASK(7, 0)
+-
+-#define QCA808X_CDT_CODE_PAIR_A			GENMASK(15, 12)
+-#define QCA808X_CDT_CODE_PAIR_B			GENMASK(11, 8)
+-#define QCA808X_CDT_CODE_PAIR_C			GENMASK(7, 4)
+-#define QCA808X_CDT_CODE_PAIR_D			GENMASK(3, 0)
+-
+-#define QCA808X_CDT_STATUS_STAT_TYPE		GENMASK(1, 0)
+-#define QCA808X_CDT_STATUS_STAT_FAIL		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
+-#define QCA808X_CDT_STATUS_STAT_NORMAL		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
+-#define QCA808X_CDT_STATUS_STAT_SAME_OPEN	FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
+-#define QCA808X_CDT_STATUS_STAT_SAME_SHORT	FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
+-
+-#define QCA808X_CDT_STATUS_STAT_MDI		GENMASK(3, 2)
+-#define QCA808X_CDT_STATUS_STAT_MDI1		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
+-#define QCA808X_CDT_STATUS_STAT_MDI2		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
+-#define QCA808X_CDT_STATUS_STAT_MDI3		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
+-
+-/* NORMAL are MDI with type set to 0 */
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI1
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+-									 QCA808X_CDT_STATUS_STAT_MDI1)
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+-									 QCA808X_CDT_STATUS_STAT_MDI1)
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI2
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+-									 QCA808X_CDT_STATUS_STAT_MDI2)
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+-									 QCA808X_CDT_STATUS_STAT_MDI2)
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI3
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+-									 QCA808X_CDT_STATUS_STAT_MDI3)
+-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+-									 QCA808X_CDT_STATUS_STAT_MDI3)
+-
+-/* Added for reference of existence but should be handled by wait_for_completion already */
+-#define QCA808X_CDT_STATUS_STAT_BUSY		(BIT(1) | BIT(3))
+-
+ #define QCA808X_MMD7_LED_GLOBAL			0x8073
+ #define QCA808X_LED_BLINK_1			GENMASK(11, 6)
+ #define QCA808X_LED_BLINK_2			GENMASK(5, 0)
+@@ -406,86 +356,6 @@ static int qca808x_soft_reset(struct phy
+ 	return ret;
+ }
+ 
+-static bool qca808x_cdt_fault_length_valid(int cdt_code)
+-{
+-	switch (cdt_code) {
+-	case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+-	case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+-		return true;
+-	default:
+-		return false;
+-	}
+-}
+-
+-static int qca808x_cable_test_result_trans(int cdt_code)
+-{
+-	switch (cdt_code) {
+-	case QCA808X_CDT_STATUS_STAT_NORMAL:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+-	case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+-	case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+-	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
+-	case QCA808X_CDT_STATUS_STAT_FAIL:
+-	default:
+-		return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+-	}
+-}
+-
+-static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
+-				    int result)
+-{
+-	int val;
+-	u32 cdt_length_reg = 0;
+-
+-	switch (pair) {
+-	case ETHTOOL_A_CABLE_PAIR_A:
+-		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_A;
+-		break;
+-	case ETHTOOL_A_CABLE_PAIR_B:
+-		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_B;
+-		break;
+-	case ETHTOOL_A_CABLE_PAIR_C:
+-		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_C;
+-		break;
+-	case ETHTOOL_A_CABLE_PAIR_D:
+-		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_D;
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_PCS, cdt_length_reg);
+-	if (val < 0)
+-		return val;
+-
+-	if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
+-		val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
+-	else
+-		val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
+-
+-	return at803x_cdt_fault_length(val);
+-}
+-
+ static int qca808x_cable_test_start(struct phy_device *phydev)
+ {
+ 	int ret;
+@@ -526,81 +396,6 @@ static int qca808x_cable_test_start(stru
+ 
+ 	return 0;
+ }
+-
+-static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
+-					      u16 status)
+-{
+-	int length, result;
+-	u16 pair_code;
+-
+-	switch (pair) {
+-	case ETHTOOL_A_CABLE_PAIR_A:
+-		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
+-		break;
+-	case ETHTOOL_A_CABLE_PAIR_B:
+-		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
+-		break;
+-	case ETHTOOL_A_CABLE_PAIR_C:
+-		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
+-		break;
+-	case ETHTOOL_A_CABLE_PAIR_D:
+-		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	result = qca808x_cable_test_result_trans(pair_code);
+-	ethnl_cable_test_result(phydev, pair, result);
+-
+-	if (qca808x_cdt_fault_length_valid(pair_code)) {
+-		length = qca808x_cdt_fault_length(phydev, pair, result);
+-		ethnl_cable_test_fault_length(phydev, pair, length);
+-	}
+-
+-	return 0;
+-}
+-
+-static int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
+-{
+-	int ret, val;
+-
+-	*finished = false;
+-
+-	val = QCA808X_CDT_ENABLE_TEST |
+-	      QCA808X_CDT_LENGTH_UNIT;
+-	ret = at803x_cdt_start(phydev, val);
+-	if (ret)
+-		return ret;
+-
+-	ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
+-	if (ret)
+-		return ret;
+-
+-	val = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA808X_MMD3_CDT_STATUS);
+-	if (val < 0)
+-		return val;
+-
+-	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
+-	if (ret)
+-		return ret;
+-
+-	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
+-	if (ret)
+-		return ret;
+-
+-	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
+-	if (ret)
+-		return ret;
+-
+-	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
+-	if (ret)
+-		return ret;
+-
+-	*finished = true;
+-
+-	return 0;
+-}
+ 
+ static int qca808x_get_features(struct phy_device *phydev)
+ {
+--- a/drivers/net/phy/qcom/qcom-phy-lib.c
++++ b/drivers/net/phy/qcom/qcom-phy-lib.c
+@@ -5,6 +5,7 @@
+ 
+ #include <linux/netdevice.h>
+ #include <linux/etherdevice.h>
++#include <linux/ethtool_netlink.h>
+ 
+ #include "qcom.h"
+ 
+@@ -311,6 +312,42 @@ int at803x_prepare_config_aneg(struct ph
+ }
+ EXPORT_SYMBOL_GPL(at803x_prepare_config_aneg);
+ 
++int at803x_read_status(struct phy_device *phydev)
++{
++	struct at803x_ss_mask ss_mask = { 0 };
++	int err, old_link = phydev->link;
++
++	/* Update the link, but return if there was an error */
++	err = genphy_update_link(phydev);
++	if (err)
++		return err;
++
++	/* why bother the PHY if nothing can have changed */
++	if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
++		return 0;
++
++	phydev->speed = SPEED_UNKNOWN;
++	phydev->duplex = DUPLEX_UNKNOWN;
++	phydev->pause = 0;
++	phydev->asym_pause = 0;
++
++	err = genphy_read_lpa(phydev);
++	if (err < 0)
++		return err;
++
++	ss_mask.speed_mask = AT803X_SS_SPEED_MASK;
++	ss_mask.speed_shift = __bf_shf(AT803X_SS_SPEED_MASK);
++	err = at803x_read_specific_status(phydev, ss_mask);
++	if (err < 0)
++		return err;
++
++	if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
++		phy_resolve_aneg_pause(phydev);
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(at803x_read_status);
++
+ static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
+ {
+ 	int val;
+@@ -427,3 +464,159 @@ int at803x_cdt_wait_for_completion(struc
+ 	return ret < 0 ? ret : 0;
+ }
+ EXPORT_SYMBOL_GPL(at803x_cdt_wait_for_completion);
++
++static bool qca808x_cdt_fault_length_valid(int cdt_code)
++{
++	switch (cdt_code) {
++	case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
++		return true;
++	default:
++		return false;
++	}
++}
++
++static int qca808x_cable_test_result_trans(int cdt_code)
++{
++	switch (cdt_code) {
++	case QCA808X_CDT_STATUS_STAT_NORMAL:
++		return ETHTOOL_A_CABLE_RESULT_CODE_OK;
++	case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
++		return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
++	case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
++		return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
++	case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
++		return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
++	case QCA808X_CDT_STATUS_STAT_FAIL:
++	default:
++		return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
++	}
++}
++
++static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
++				    int result)
++{
++	int val;
++	u32 cdt_length_reg = 0;
++
++	switch (pair) {
++	case ETHTOOL_A_CABLE_PAIR_A:
++		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_A;
++		break;
++	case ETHTOOL_A_CABLE_PAIR_B:
++		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_B;
++		break;
++	case ETHTOOL_A_CABLE_PAIR_C:
++		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_C;
++		break;
++	case ETHTOOL_A_CABLE_PAIR_D:
++		cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_D;
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	val = phy_read_mmd(phydev, MDIO_MMD_PCS, cdt_length_reg);
++	if (val < 0)
++		return val;
++
++	if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
++		val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
++	else
++		val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
++
++	return at803x_cdt_fault_length(val);
++}
++
++static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
++					      u16 status)
++{
++	int length, result;
++	u16 pair_code;
++
++	switch (pair) {
++	case ETHTOOL_A_CABLE_PAIR_A:
++		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
++		break;
++	case ETHTOOL_A_CABLE_PAIR_B:
++		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
++		break;
++	case ETHTOOL_A_CABLE_PAIR_C:
++		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
++		break;
++	case ETHTOOL_A_CABLE_PAIR_D:
++		pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	result = qca808x_cable_test_result_trans(pair_code);
++	ethnl_cable_test_result(phydev, pair, result);
++
++	if (qca808x_cdt_fault_length_valid(pair_code)) {
++		length = qca808x_cdt_fault_length(phydev, pair, result);
++		ethnl_cable_test_fault_length(phydev, pair, length);
++	}
++
++	return 0;
++}
++
++int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
++{
++	int ret, val;
++
++	*finished = false;
++
++	val = QCA808X_CDT_ENABLE_TEST |
++	      QCA808X_CDT_LENGTH_UNIT;
++	ret = at803x_cdt_start(phydev, val);
++	if (ret)
++		return ret;
++
++	ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
++	if (ret)
++		return ret;
++
++	val = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA808X_MMD3_CDT_STATUS);
++	if (val < 0)
++		return val;
++
++	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
++	if (ret)
++		return ret;
++
++	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
++	if (ret)
++		return ret;
++
++	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
++	if (ret)
++		return ret;
++
++	ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
++	if (ret)
++		return ret;
++
++	*finished = true;
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(qca808x_cable_test_get_status);
+--- a/drivers/net/phy/qcom/qcom.h
++++ b/drivers/net/phy/qcom/qcom.h
+@@ -54,6 +54,55 @@
+ #define AT803X_CDT_STATUS_STAT_MASK		GENMASK(9, 8)
+ #define AT803X_CDT_STATUS_DELTA_TIME_MASK	GENMASK(7, 0)
+ 
++#define QCA808X_CDT_ENABLE_TEST			BIT(15)
++#define QCA808X_CDT_INTER_CHECK_DIS		BIT(13)
++#define QCA808X_CDT_STATUS			BIT(11)
++#define QCA808X_CDT_LENGTH_UNIT			BIT(10)
++
++#define QCA808X_MMD3_CDT_STATUS			0x8064
++#define QCA808X_MMD3_CDT_DIAG_PAIR_A		0x8065
++#define QCA808X_MMD3_CDT_DIAG_PAIR_B		0x8066
++#define QCA808X_MMD3_CDT_DIAG_PAIR_C		0x8067
++#define QCA808X_MMD3_CDT_DIAG_PAIR_D		0x8068
++#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT	GENMASK(15, 8)
++#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT	GENMASK(7, 0)
++
++#define QCA808X_CDT_CODE_PAIR_A			GENMASK(15, 12)
++#define QCA808X_CDT_CODE_PAIR_B			GENMASK(11, 8)
++#define QCA808X_CDT_CODE_PAIR_C			GENMASK(7, 4)
++#define QCA808X_CDT_CODE_PAIR_D			GENMASK(3, 0)
++
++#define QCA808X_CDT_STATUS_STAT_TYPE		GENMASK(1, 0)
++#define QCA808X_CDT_STATUS_STAT_FAIL		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
++#define QCA808X_CDT_STATUS_STAT_NORMAL		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
++#define QCA808X_CDT_STATUS_STAT_SAME_OPEN	FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
++#define QCA808X_CDT_STATUS_STAT_SAME_SHORT	FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
++
++#define QCA808X_CDT_STATUS_STAT_MDI		GENMASK(3, 2)
++#define QCA808X_CDT_STATUS_STAT_MDI1		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
++#define QCA808X_CDT_STATUS_STAT_MDI2		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
++#define QCA808X_CDT_STATUS_STAT_MDI3		FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
++
++/* NORMAL are MDI with type set to 0 */
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI1
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
++									 QCA808X_CDT_STATUS_STAT_MDI1)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
++									 QCA808X_CDT_STATUS_STAT_MDI1)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI2
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
++									 QCA808X_CDT_STATUS_STAT_MDI2)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
++									 QCA808X_CDT_STATUS_STAT_MDI2)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL	QCA808X_CDT_STATUS_STAT_MDI3
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN		(QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
++									 QCA808X_CDT_STATUS_STAT_MDI3)
++#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT	(QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
++									 QCA808X_CDT_STATUS_STAT_MDI3)
++
++/* Added for reference of existence but should be handled by wait_for_completion already */
++#define QCA808X_CDT_STATUS_STAT_BUSY		(BIT(1) | BIT(3))
++
+ #define AT803X_LOC_MAC_ADDR_0_15_OFFSET		0x804C
+ #define AT803X_LOC_MAC_ADDR_16_31_OFFSET	0x804B
+ #define AT803X_LOC_MAC_ADDR_32_47_OFFSET	0x804A
+@@ -110,6 +159,7 @@ int at803x_read_specific_status(struct p
+ 				struct at803x_ss_mask ss_mask);
+ int at803x_config_mdix(struct phy_device *phydev, u8 ctrl);
+ int at803x_prepare_config_aneg(struct phy_device *phydev);
++int at803x_read_status(struct phy_device *phydev);
+ int at803x_get_tunable(struct phy_device *phydev,
+ 		       struct ethtool_tunable *tuna, void *data);
+ int at803x_set_tunable(struct phy_device *phydev,
+@@ -118,3 +168,4 @@ int at803x_cdt_fault_length(int dt);
+ int at803x_cdt_start(struct phy_device *phydev, u32 cdt_start);
+ int at803x_cdt_wait_for_completion(struct phy_device *phydev,
+ 				   u32 cdt_en);
++int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished);

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно