703-phy-support-layerscape.patch 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753
  1. From be07319b9897738a4ab1501880b7dd9be26eba66 Mon Sep 17 00:00:00 2001
  2. From: Yangbo Lu <[email protected]>
  3. Date: Mon, 25 Sep 2017 11:54:28 +0800
  4. Subject: [PATCH] phy: support layerscape
  5. This is a integrated patch for layerscape mdio-phy support.
  6. Signed-off-by: Bogdan Purcareata <[email protected]>
  7. Signed-off-by: Zhang Ying-22455 <[email protected]>
  8. Signed-off-by: costi <[email protected]>
  9. Signed-off-by: Madalin Bucur <[email protected]>
  10. Signed-off-by: Shaohui Xie <[email protected]>
  11. Signed-off-by: Florian Fainelli <[email protected]>
  12. Signed-off-by: Yangbo Lu <[email protected]>
  13. ---
  14. drivers/net/phy/Kconfig | 11 +
  15. drivers/net/phy/Makefile | 2 +
  16. drivers/net/phy/aquantia.c | 28 +
  17. drivers/net/phy/cortina.c | 118 ++++
  18. drivers/net/phy/fsl_backplane.c | 1358 +++++++++++++++++++++++++++++++++++++++
  19. drivers/net/phy/phy.c | 23 +-
  20. drivers/net/phy/phy_device.c | 6 +-
  21. drivers/net/phy/swphy.c | 1 +
  22. include/linux/phy.h | 4 +
  23. 9 files changed, 1544 insertions(+), 7 deletions(-)
  24. create mode 100644 drivers/net/phy/cortina.c
  25. create mode 100644 drivers/net/phy/fsl_backplane.c
  26. --- a/drivers/net/phy/Kconfig
  27. +++ b/drivers/net/phy/Kconfig
  28. @@ -89,6 +89,12 @@ config MDIO_BUS_MUX_MMIOREG
  29. config MDIO_CAVIUM
  30. tristate
  31. +config MDIO_FSL_BACKPLANE
  32. + tristate "Support for backplane on Freescale XFI interface"
  33. + depends on OF_MDIO
  34. + help
  35. + This module provides a driver for Freescale XFI's backplane.
  36. +
  37. config MDIO_GPIO
  38. tristate "GPIO lib-based bitbanged MDIO buses"
  39. depends on MDIO_BITBANG && GPIOLIB
  40. @@ -298,6 +304,11 @@ config CICADA_PHY
  41. ---help---
  42. Currently supports the cis8204
  43. +config CORTINA_PHY
  44. + tristate "Cortina EDC CDR 10G Ethernet PHY"
  45. + ---help---
  46. + Currently supports the CS4340 phy.
  47. +
  48. config DAVICOM_PHY
  49. tristate "Davicom PHYs"
  50. ---help---
  51. --- a/drivers/net/phy/Makefile
  52. +++ b/drivers/net/phy/Makefile
  53. @@ -30,6 +30,7 @@ obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) +=
  54. obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
  55. obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
  56. obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
  57. +obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane.o
  58. obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
  59. obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
  60. obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
  61. @@ -48,6 +49,7 @@ obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygn
  62. obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
  63. obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
  64. obj-$(CONFIG_CICADA_PHY) += cicada.o
  65. +obj-$(CONFIG_CORTINA_PHY) += cortina.o
  66. obj-$(CONFIG_DAVICOM_PHY) += davicom.o
  67. obj-$(CONFIG_DP83640_PHY) += dp83640.o
  68. obj-$(CONFIG_DP83848_PHY) += dp83848.o
  69. --- a/drivers/net/phy/aquantia.c
  70. +++ b/drivers/net/phy/aquantia.c
  71. @@ -21,6 +21,8 @@
  72. #define PHY_ID_AQ1202 0x03a1b445
  73. #define PHY_ID_AQ2104 0x03a1b460
  74. #define PHY_ID_AQR105 0x03a1b4a2
  75. +#define PHY_ID_AQR106 0x03a1b4d0
  76. +#define PHY_ID_AQR107 0x03a1b4e0
  77. #define PHY_ID_AQR405 0x03a1b4b0
  78. #define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \
  79. @@ -154,6 +156,30 @@ static struct phy_driver aquantia_driver
  80. .read_status = aquantia_read_status,
  81. },
  82. {
  83. + .phy_id = PHY_ID_AQR106,
  84. + .phy_id_mask = 0xfffffff0,
  85. + .name = "Aquantia AQR106",
  86. + .features = PHY_AQUANTIA_FEATURES,
  87. + .flags = PHY_HAS_INTERRUPT,
  88. + .aneg_done = aquantia_aneg_done,
  89. + .config_aneg = aquantia_config_aneg,
  90. + .config_intr = aquantia_config_intr,
  91. + .ack_interrupt = aquantia_ack_interrupt,
  92. + .read_status = aquantia_read_status,
  93. +},
  94. +{
  95. + .phy_id = PHY_ID_AQR107,
  96. + .phy_id_mask = 0xfffffff0,
  97. + .name = "Aquantia AQR107",
  98. + .features = PHY_AQUANTIA_FEATURES,
  99. + .flags = PHY_HAS_INTERRUPT,
  100. + .aneg_done = aquantia_aneg_done,
  101. + .config_aneg = aquantia_config_aneg,
  102. + .config_intr = aquantia_config_intr,
  103. + .ack_interrupt = aquantia_ack_interrupt,
  104. + .read_status = aquantia_read_status,
  105. +},
  106. +{
  107. .phy_id = PHY_ID_AQR405,
  108. .phy_id_mask = 0xfffffff0,
  109. .name = "Aquantia AQR405",
  110. @@ -173,6 +199,8 @@ static struct mdio_device_id __maybe_unu
  111. { PHY_ID_AQ1202, 0xfffffff0 },
  112. { PHY_ID_AQ2104, 0xfffffff0 },
  113. { PHY_ID_AQR105, 0xfffffff0 },
  114. + { PHY_ID_AQR106, 0xfffffff0 },
  115. + { PHY_ID_AQR107, 0xfffffff0 },
  116. { PHY_ID_AQR405, 0xfffffff0 },
  117. { }
  118. };
  119. --- /dev/null
  120. +++ b/drivers/net/phy/cortina.c
  121. @@ -0,0 +1,118 @@
  122. +/*
  123. + * Copyright 2017 NXP
  124. + *
  125. + * This program is free software; you can redistribute it and/or modify
  126. + * it under the terms of the GNU General Public License as published by
  127. + * the Free Software Foundation; either version 2 of the License, or
  128. + * (at your option) any later version.
  129. + *
  130. + * This program is distributed in the hope that it will be useful,
  131. + * but WITHOUT ANY WARRANTY; without even the implied warranty of
  132. + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  133. + * GNU General Public License for more details.
  134. + *
  135. + * CORTINA is a registered trademark of Cortina Systems, Inc.
  136. + *
  137. + */
  138. +#include <linux/module.h>
  139. +#include <linux/phy.h>
  140. +
  141. +#define PHY_ID_CS4340 0x13e51002
  142. +
  143. +#define VILLA_GLOBAL_CHIP_ID_LSB 0x0
  144. +#define VILLA_GLOBAL_CHIP_ID_MSB 0x1
  145. +
  146. +#define VILLA_GLOBAL_GPIO_1_INTS 0x017
  147. +
  148. +static int cortina_read_reg(struct phy_device *phydev, u16 regnum)
  149. +{
  150. + return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
  151. + MII_ADDR_C45 | regnum);
  152. +}
  153. +
  154. +static int cortina_config_aneg(struct phy_device *phydev)
  155. +{
  156. + phydev->supported = SUPPORTED_10000baseT_Full;
  157. + phydev->advertising = SUPPORTED_10000baseT_Full;
  158. +
  159. + return 0;
  160. +}
  161. +
  162. +static int cortina_read_status(struct phy_device *phydev)
  163. +{
  164. + int gpio_int_status, ret = 0;
  165. +
  166. + gpio_int_status = cortina_read_reg(phydev, VILLA_GLOBAL_GPIO_1_INTS);
  167. + if (gpio_int_status < 0) {
  168. + ret = gpio_int_status;
  169. + goto err;
  170. + }
  171. +
  172. + if (gpio_int_status & 0x8) {
  173. + /* up when edc_convergedS set */
  174. + phydev->speed = SPEED_10000;
  175. + phydev->duplex = DUPLEX_FULL;
  176. + phydev->link = 1;
  177. + } else {
  178. + phydev->link = 0;
  179. + }
  180. +
  181. +err:
  182. + return ret;
  183. +}
  184. +
  185. +static int cortina_soft_reset(struct phy_device *phydev)
  186. +{
  187. + return 0;
  188. +}
  189. +
  190. +static int cortina_probe(struct phy_device *phydev)
  191. +{
  192. + u32 phy_id = 0;
  193. + int id_lsb = 0, id_msb = 0;
  194. +
  195. + /* Read device id from phy registers. */
  196. + id_lsb = cortina_read_reg(phydev, VILLA_GLOBAL_CHIP_ID_LSB);
  197. + if (id_lsb < 0)
  198. + return -ENXIO;
  199. +
  200. + phy_id = id_lsb << 16;
  201. +
  202. + id_msb = cortina_read_reg(phydev, VILLA_GLOBAL_CHIP_ID_MSB);
  203. + if (id_msb < 0)
  204. + return -ENXIO;
  205. +
  206. + phy_id |= id_msb;
  207. +
  208. + /* Make sure the device tree binding matched the driver with the
  209. + * right device.
  210. + */
  211. + if (phy_id != phydev->drv->phy_id) {
  212. + phydev_err(phydev, "Error matching phy with %s driver\n",
  213. + phydev->drv->name);
  214. + return -ENODEV;
  215. + }
  216. +
  217. + return 0;
  218. +}
  219. +
  220. +static struct phy_driver cortina_driver[] = {
  221. +{
  222. + .phy_id = PHY_ID_CS4340,
  223. + .phy_id_mask = 0xffffffff,
  224. + .name = "Cortina CS4340",
  225. + .config_aneg = cortina_config_aneg,
  226. + .read_status = cortina_read_status,
  227. + .soft_reset = cortina_soft_reset,
  228. + .probe = cortina_probe,
  229. +},
  230. +};
  231. +
  232. +module_phy_driver(cortina_driver);
  233. +
  234. +static struct mdio_device_id __maybe_unused cortina_tbl[] = {
  235. + { PHY_ID_CS4340, 0xffffffff},
  236. + {},
  237. +};
  238. +
  239. +MODULE_DEVICE_TABLE(mdio, cortina_tbl);
  240. --- /dev/null
  241. +++ b/drivers/net/phy/fsl_backplane.c
  242. @@ -0,0 +1,1358 @@
  243. +/* Freescale backplane driver.
  244. + * Author: Shaohui Xie <[email protected]>
  245. + *
  246. + * Copyright 2015 Freescale Semiconductor, Inc.
  247. + *
  248. + * Licensed under the GPL-2 or later.
  249. + */
  250. +
  251. +#include <linux/kernel.h>
  252. +#include <linux/module.h>
  253. +#include <linux/mii.h>
  254. +#include <linux/mdio.h>
  255. +#include <linux/ethtool.h>
  256. +#include <linux/phy.h>
  257. +#include <linux/io.h>
  258. +#include <linux/of.h>
  259. +#include <linux/of_net.h>
  260. +#include <linux/of_address.h>
  261. +#include <linux/of_platform.h>
  262. +#include <linux/timer.h>
  263. +#include <linux/delay.h>
  264. +#include <linux/workqueue.h>
  265. +
  266. +/* XFI PCS Device Identifier */
  267. +#define FSL_PCS_PHY_ID 0x0083e400
  268. +
  269. +/* Freescale KR PMD registers */
  270. +#define FSL_KR_PMD_CTRL 0x96
  271. +#define FSL_KR_PMD_STATUS 0x97
  272. +#define FSL_KR_LP_CU 0x98
  273. +#define FSL_KR_LP_STATUS 0x99
  274. +#define FSL_KR_LD_CU 0x9a
  275. +#define FSL_KR_LD_STATUS 0x9b
  276. +
  277. +/* Freescale KR PMD defines */
  278. +#define PMD_RESET 0x1
  279. +#define PMD_STATUS_SUP_STAT 0x4
  280. +#define PMD_STATUS_FRAME_LOCK 0x2
  281. +#define TRAIN_EN 0x3
  282. +#define TRAIN_DISABLE 0x1
  283. +#define RX_STAT 0x1
  284. +
  285. +#define FSL_KR_RX_LINK_STAT_MASK 0x1000
  286. +#define FSL_XFI_PCS_10GR_SR1 0x20
  287. +
  288. +/* Freescale KX PCS mode register */
  289. +#define FSL_PCS_IF_MODE 0x8014
  290. +
  291. +/* Freescale KX PCS mode register init value */
  292. +#define IF_MODE_INIT 0x8
  293. +
  294. +/* Freescale KX/KR AN registers */
  295. +#define FSL_AN_AD1 0x11
  296. +#define FSL_AN_BP_STAT 0x30
  297. +
  298. +/* Freescale KX/KR AN registers defines */
  299. +#define AN_CTRL_INIT 0x1200
  300. +#define KX_AN_AD1_INIT 0x25
  301. +#define KR_AN_AD1_INIT 0x85
  302. +#define AN_LNK_UP_MASK 0x4
  303. +#define KR_AN_MASK 0x8
  304. +#define TRAIN_FAIL 0x8
  305. +
  306. +/* C(-1) */
  307. +#define BIN_M1 0
  308. +/* C(1) */
  309. +#define BIN_LONG 1
  310. +#define BIN_M1_SEL 6
  311. +#define BIN_Long_SEL 7
  312. +#define CDR_SEL_MASK 0x00070000
  313. +#define BIN_SNAPSHOT_NUM 5
  314. +#define BIN_M1_THRESHOLD 3
  315. +#define BIN_LONG_THRESHOLD 2
  316. +
  317. +#define PRE_COE_SHIFT 22
  318. +#define POST_COE_SHIFT 16
  319. +#define ZERO_COE_SHIFT 8
  320. +
  321. +#define PRE_COE_MAX 0x0
  322. +#define PRE_COE_MIN 0x8
  323. +#define POST_COE_MAX 0x0
  324. +#define POST_COE_MIN 0x10
  325. +#define ZERO_COE_MAX 0x30
  326. +#define ZERO_COE_MIN 0x0
  327. +
  328. +#define TECR0_INIT 0x24200000
  329. +#define RATIO_PREQ 0x3
  330. +#define RATIO_PST1Q 0xd
  331. +#define RATIO_EQ 0x20
  332. +
  333. +#define GCR0_RESET_MASK 0x600000
  334. +#define GCR1_SNP_START_MASK 0x00000040
  335. +#define GCR1_CTL_SNP_START_MASK 0x00002000
  336. +#define GCR1_REIDL_TH_MASK 0x00700000
  337. +#define GCR1_REIDL_EX_SEL_MASK 0x000c0000
  338. +#define GCR1_REIDL_ET_MAS_MASK 0x00004000
  339. +#define TECR0_AMP_RED_MASK 0x0000003f
  340. +
  341. +#define RECR1_CTL_SNP_DONE_MASK 0x00000002
  342. +#define RECR1_SNP_DONE_MASK 0x00000004
  343. +#define TCSR1_SNP_DATA_MASK 0x0000ffc0
  344. +#define TCSR1_SNP_DATA_SHIFT 6
  345. +#define TCSR1_EQ_SNPBIN_SIGN_MASK 0x100
  346. +
  347. +#define RECR1_GAINK2_MASK 0x0f000000
  348. +#define RECR1_GAINK2_SHIFT 24
  349. +#define RECR1_GAINK3_MASK 0x000f0000
  350. +#define RECR1_GAINK3_SHIFT 16
  351. +#define RECR1_OFFSET_MASK 0x00003f80
  352. +#define RECR1_OFFSET_SHIFT 7
  353. +#define RECR1_BLW_MASK 0x00000f80
  354. +#define RECR1_BLW_SHIFT 7
  355. +#define EYE_CTRL_SHIFT 12
  356. +#define BASE_WAND_SHIFT 10
  357. +
  358. +#define XGKR_TIMEOUT 1050
  359. +
  360. +#define INCREMENT 1
  361. +#define DECREMENT 2
  362. +#define TIMEOUT_LONG 3
  363. +#define TIMEOUT_M1 3
  364. +
  365. +#define RX_READY_MASK 0x8000
  366. +#define PRESET_MASK 0x2000
  367. +#define INIT_MASK 0x1000
  368. +#define COP1_MASK 0x30
  369. +#define COP1_SHIFT 4
  370. +#define COZ_MASK 0xc
  371. +#define COZ_SHIFT 2
  372. +#define COM1_MASK 0x3
  373. +#define COM1_SHIFT 0
  374. +#define REQUEST_MASK 0x3f
  375. +#define LD_ALL_MASK (PRESET_MASK | INIT_MASK | \
  376. + COP1_MASK | COZ_MASK | COM1_MASK)
  377. +
  378. +#define NEW_ALGORITHM_TRAIN_TX
  379. +#ifdef NEW_ALGORITHM_TRAIN_TX
  380. +#define FORCE_INC_COP1_NUMBER 0
  381. +#define FORCE_INC_COM1_NUMBER 1
  382. +#endif
  383. +
  384. +#define VAL_INVALID 0xff
  385. +
  386. +static const u32 preq_table[] = {0x0, 0x1, 0x3, 0x5,
  387. + 0x7, 0x9, 0xb, 0xc, VAL_INVALID};
  388. +static const u32 pst1q_table[] = {0x0, 0x1, 0x3, 0x5, 0x7,
  389. + 0x9, 0xb, 0xd, 0xf, 0x10, VAL_INVALID};
  390. +
  391. +enum backplane_mode {
  392. + PHY_BACKPLANE_1000BASE_KX,
  393. + PHY_BACKPLANE_10GBASE_KR,
  394. + PHY_BACKPLANE_INVAL
  395. +};
  396. +
  397. +enum coe_filed {
  398. + COE_COP1,
  399. + COE_COZ,
  400. + COE_COM
  401. +};
  402. +
  403. +enum coe_update {
  404. + COE_NOTUPDATED,
  405. + COE_UPDATED,
  406. + COE_MIN,
  407. + COE_MAX,
  408. + COE_INV
  409. +};
  410. +
  411. +enum train_state {
  412. + DETECTING_LP,
  413. + TRAINED,
  414. +};
  415. +
  416. +struct per_lane_ctrl_status {
  417. + __be32 gcr0; /* 0x.000 - General Control Register 0 */
  418. + __be32 gcr1; /* 0x.004 - General Control Register 1 */
  419. + __be32 gcr2; /* 0x.008 - General Control Register 2 */
  420. + __be32 resv1; /* 0x.00C - Reserved */
  421. + __be32 recr0; /* 0x.010 - Receive Equalization Control Register 0 */
  422. + __be32 recr1; /* 0x.014 - Receive Equalization Control Register 1 */
  423. + __be32 tecr0; /* 0x.018 - Transmit Equalization Control Register 0 */
  424. + __be32 resv2; /* 0x.01C - Reserved */
  425. + __be32 tlcr0; /* 0x.020 - TTL Control Register 0 */
  426. + __be32 tlcr1; /* 0x.024 - TTL Control Register 1 */
  427. + __be32 tlcr2; /* 0x.028 - TTL Control Register 2 */
  428. + __be32 tlcr3; /* 0x.02C - TTL Control Register 3 */
  429. + __be32 tcsr0; /* 0x.030 - Test Control/Status Register 0 */
  430. + __be32 tcsr1; /* 0x.034 - Test Control/Status Register 1 */
  431. + __be32 tcsr2; /* 0x.038 - Test Control/Status Register 2 */
  432. + __be32 tcsr3; /* 0x.03C - Test Control/Status Register 3 */
  433. +};
  434. +
  435. +struct tx_condition {
  436. + bool bin_m1_late_early;
  437. + bool bin_long_late_early;
  438. + bool bin_m1_stop;
  439. + bool bin_long_stop;
  440. + bool tx_complete;
  441. + bool sent_init;
  442. + int m1_min_max_cnt;
  443. + int long_min_max_cnt;
  444. +#ifdef NEW_ALGORITHM_TRAIN_TX
  445. + int pre_inc;
  446. + int post_inc;
  447. +#endif
  448. +};
  449. +
  450. +struct fsl_xgkr_inst {
  451. + void *reg_base;
  452. + struct phy_device *phydev;
  453. + struct tx_condition tx_c;
  454. + struct delayed_work xgkr_wk;
  455. + enum train_state state;
  456. + u32 ld_update;
  457. + u32 ld_status;
  458. + u32 ratio_preq;
  459. + u32 ratio_pst1q;
  460. + u32 adpt_eq;
  461. +};
  462. +
  463. +static void tx_condition_init(struct tx_condition *tx_c)
  464. +{
  465. + tx_c->bin_m1_late_early = true;
  466. + tx_c->bin_long_late_early = false;
  467. + tx_c->bin_m1_stop = false;
  468. + tx_c->bin_long_stop = false;
  469. + tx_c->tx_complete = false;
  470. + tx_c->sent_init = false;
  471. + tx_c->m1_min_max_cnt = 0;
  472. + tx_c->long_min_max_cnt = 0;
  473. +#ifdef NEW_ALGORITHM_TRAIN_TX
  474. + tx_c->pre_inc = FORCE_INC_COM1_NUMBER;
  475. + tx_c->post_inc = FORCE_INC_COP1_NUMBER;
  476. +#endif
  477. +}
  478. +
  479. +void tune_tecr0(struct fsl_xgkr_inst *inst)
  480. +{
  481. + struct per_lane_ctrl_status *reg_base = inst->reg_base;
  482. + u32 val;
  483. +
  484. + val = TECR0_INIT |
  485. + inst->adpt_eq << ZERO_COE_SHIFT |
  486. + inst->ratio_preq << PRE_COE_SHIFT |
  487. + inst->ratio_pst1q << POST_COE_SHIFT;
  488. +
  489. + /* reset the lane */
  490. + iowrite32(ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
  491. + &reg_base->gcr0);
  492. + udelay(1);
  493. + iowrite32(val, &reg_base->tecr0);
  494. + udelay(1);
  495. + /* unreset the lane */
  496. + iowrite32(ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
  497. + &reg_base->gcr0);
  498. + udelay(1);
  499. +}
  500. +
  501. +static void start_lt(struct phy_device *phydev)
  502. +{
  503. + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_EN);
  504. +}
  505. +
  506. +static void stop_lt(struct phy_device *phydev)
  507. +{
  508. + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_DISABLE);
  509. +}
  510. +
  511. +static void reset_gcr0(struct fsl_xgkr_inst *inst)
  512. +{
  513. + struct per_lane_ctrl_status *reg_base = inst->reg_base;
  514. +
  515. + iowrite32(ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
  516. + &reg_base->gcr0);
  517. + udelay(1);
  518. + iowrite32(ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
  519. + &reg_base->gcr0);
  520. + udelay(1);
  521. +}
  522. +
  523. +void lane_set_1gkx(void *reg)
  524. +{
  525. + struct per_lane_ctrl_status *reg_base = reg;
  526. + u32 val;
  527. +
  528. + /* reset the lane */
  529. + iowrite32(ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
  530. + &reg_base->gcr0);
  531. + udelay(1);
  532. +
  533. + /* set gcr1 for 1GKX */
  534. + val = ioread32(&reg_base->gcr1);
  535. + val &= ~(GCR1_REIDL_TH_MASK | GCR1_REIDL_EX_SEL_MASK |
  536. + GCR1_REIDL_ET_MAS_MASK);
  537. + iowrite32(val, &reg_base->gcr1);
  538. + udelay(1);
  539. +
  540. + /* set tecr0 for 1GKX */
  541. + val = ioread32(&reg_base->tecr0);
  542. + val &= ~TECR0_AMP_RED_MASK;
  543. + iowrite32(val, &reg_base->tecr0);
  544. + udelay(1);
  545. +
  546. + /* unreset the lane */
  547. + iowrite32(ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
  548. + &reg_base->gcr0);
  549. + udelay(1);
  550. +}
  551. +
  552. +static void reset_lt(struct phy_device *phydev)
  553. +{
  554. + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, PMD_RESET);
  555. + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_DISABLE);
  556. + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LD_CU, 0);
  557. + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LD_STATUS, 0);
  558. + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_STATUS, 0);
  559. + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_CU, 0);
  560. + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS, 0);
  561. +}
  562. +
  563. +static void start_xgkr_state_machine(struct delayed_work *work)
  564. +{
  565. + queue_delayed_work(system_power_efficient_wq, work,
  566. + msecs_to_jiffies(XGKR_TIMEOUT));
  567. +}
  568. +
  569. +static void start_xgkr_an(struct phy_device *phydev)
  570. +{
  571. + struct fsl_xgkr_inst *inst;
  572. +
  573. + reset_lt(phydev);
  574. + phy_write_mmd(phydev, MDIO_MMD_AN, FSL_AN_AD1, KR_AN_AD1_INIT);
  575. + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
  576. +
  577. + inst = phydev->priv;
  578. +
  579. + /* start state machine*/
  580. + start_xgkr_state_machine(&inst->xgkr_wk);
  581. +}
  582. +
  583. +static void start_1gkx_an(struct phy_device *phydev)
  584. +{
  585. + phy_write_mmd(phydev, MDIO_MMD_PCS, FSL_PCS_IF_MODE, IF_MODE_INIT);
  586. + phy_write_mmd(phydev, MDIO_MMD_AN, FSL_AN_AD1, KX_AN_AD1_INIT);
  587. + phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
  588. + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
  589. +}
  590. +
  591. +static void ld_coe_status(struct fsl_xgkr_inst *inst)
  592. +{
  593. + phy_write_mmd(inst->phydev, MDIO_MMD_PMAPMD,
  594. + FSL_KR_LD_STATUS, inst->ld_status);
  595. +}
  596. +
  597. +static void ld_coe_update(struct fsl_xgkr_inst *inst)
  598. +{
  599. + dev_dbg(&inst->phydev->mdio.dev, "sending request: %x\n", inst->ld_update);
  600. + phy_write_mmd(inst->phydev, MDIO_MMD_PMAPMD,
  601. + FSL_KR_LD_CU, inst->ld_update);
  602. +}
  603. +
  604. +static void init_inst(struct fsl_xgkr_inst *inst, int reset)
  605. +{
  606. + if (reset) {
  607. + inst->ratio_preq = RATIO_PREQ;
  608. + inst->ratio_pst1q = RATIO_PST1Q;
  609. + inst->adpt_eq = RATIO_EQ;
  610. + tune_tecr0(inst);
  611. + }
  612. +
  613. + tx_condition_init(&inst->tx_c);
  614. + inst->state = DETECTING_LP;
  615. + inst->ld_status &= RX_READY_MASK;
  616. + ld_coe_status(inst);
  617. + inst->ld_update = 0;
  618. + inst->ld_status &= ~RX_READY_MASK;
  619. + ld_coe_status(inst);
  620. +}
  621. +
  622. +#ifdef NEW_ALGORITHM_TRAIN_TX
  623. +static int get_median_gaink2(u32 *reg)
  624. +{
  625. + int gaink2_snap_shot[BIN_SNAPSHOT_NUM];
  626. + u32 rx_eq_snp;
  627. + struct per_lane_ctrl_status *reg_base;
  628. + int timeout;
  629. + int i, j, tmp, pos;
  630. +
  631. + reg_base = (struct per_lane_ctrl_status *)reg;
  632. +
  633. + for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
  634. + /* wait RECR1_CTL_SNP_DONE_MASK has cleared */
  635. + timeout = 100;
  636. + while (ioread32(&reg_base->recr1) &
  637. + RECR1_CTL_SNP_DONE_MASK) {
  638. + udelay(1);
  639. + timeout--;
  640. + if (timeout == 0)
  641. + break;
  642. + }
  643. +
  644. + /* start snap shot */
  645. + iowrite32((ioread32(&reg_base->gcr1) |
  646. + GCR1_CTL_SNP_START_MASK),
  647. + &reg_base->gcr1);
  648. +
  649. + /* wait for SNP done */
  650. + timeout = 100;
  651. + while (!(ioread32(&reg_base->recr1) &
  652. + RECR1_CTL_SNP_DONE_MASK)) {
  653. + udelay(1);
  654. + timeout--;
  655. + if (timeout == 0)
  656. + break;
  657. + }
  658. +
  659. + /* read and save the snap shot */
  660. + rx_eq_snp = ioread32(&reg_base->recr1);
  661. + gaink2_snap_shot[i] = (rx_eq_snp & RECR1_GAINK2_MASK) >>
  662. + RECR1_GAINK2_SHIFT;
  663. +
  664. + /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
  665. + iowrite32((ioread32(&reg_base->gcr1) &
  666. + ~GCR1_CTL_SNP_START_MASK),
  667. + &reg_base->gcr1);
  668. + }
  669. +
  670. + /* get median of the 5 snap shot */
  671. + for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) {
  672. + tmp = gaink2_snap_shot[i];
  673. + pos = i;
  674. + for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) {
  675. + if (gaink2_snap_shot[j] < tmp) {
  676. + tmp = gaink2_snap_shot[j];
  677. + pos = j;
  678. + }
  679. + }
  680. +
  681. + gaink2_snap_shot[pos] = gaink2_snap_shot[i];
  682. + gaink2_snap_shot[i] = tmp;
  683. + }
  684. +
  685. + return gaink2_snap_shot[2];
  686. +}
  687. +#endif
  688. +
  689. +static bool is_bin_early(int bin_sel, void *reg)
  690. +{
  691. + bool early = false;
  692. + int bin_snap_shot[BIN_SNAPSHOT_NUM];
  693. + int i, negative_count = 0;
  694. + struct per_lane_ctrl_status *reg_base = reg;
  695. + int timeout;
  696. +
  697. + for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
  698. + /* wait RECR1_SNP_DONE_MASK has cleared */
  699. + timeout = 100;
  700. + while ((ioread32(&reg_base->recr1) & RECR1_SNP_DONE_MASK)) {
  701. + udelay(1);
  702. + timeout--;
  703. + if (timeout == 0)
  704. + break;
  705. + }
  706. +
  707. + /* set TCSR1[CDR_SEL] to BinM1/BinLong */
  708. + if (bin_sel == BIN_M1) {
  709. + iowrite32((ioread32(&reg_base->tcsr1) &
  710. + ~CDR_SEL_MASK) | BIN_M1_SEL,
  711. + &reg_base->tcsr1);
  712. + } else {
  713. + iowrite32((ioread32(&reg_base->tcsr1) &
  714. + ~CDR_SEL_MASK) | BIN_Long_SEL,
  715. + &reg_base->tcsr1);
  716. + }
  717. +
  718. + /* start snap shot */
  719. + iowrite32(ioread32(&reg_base->gcr1) | GCR1_SNP_START_MASK,
  720. + &reg_base->gcr1);
  721. +
  722. + /* wait for SNP done */
  723. + timeout = 100;
  724. + while (!(ioread32(&reg_base->recr1) & RECR1_SNP_DONE_MASK)) {
  725. + udelay(1);
  726. + timeout--;
  727. + if (timeout == 0)
  728. + break;
  729. + }
  730. +
  731. + /* read and save the snap shot */
  732. + bin_snap_shot[i] = (ioread32(&reg_base->tcsr1) &
  733. + TCSR1_SNP_DATA_MASK) >> TCSR1_SNP_DATA_SHIFT;
  734. + if (bin_snap_shot[i] & TCSR1_EQ_SNPBIN_SIGN_MASK)
  735. + negative_count++;
  736. +
  737. + /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
  738. + iowrite32(ioread32(&reg_base->gcr1) & ~GCR1_SNP_START_MASK,
  739. + &reg_base->gcr1);
  740. + }
  741. +
  742. + if (((bin_sel == BIN_M1) && (negative_count > BIN_M1_THRESHOLD)) ||
  743. + ((bin_sel == BIN_LONG && (negative_count > BIN_LONG_THRESHOLD)))) {
  744. + early = true;
  745. + }
  746. +
  747. + return early;
  748. +}
  749. +
  750. +static void train_tx(struct fsl_xgkr_inst *inst)
  751. +{
  752. + struct phy_device *phydev = inst->phydev;
  753. + struct tx_condition *tx_c = &inst->tx_c;
  754. + bool bin_m1_early, bin_long_early;
  755. + u32 lp_status, old_ld_update;
  756. + u32 status_cop1, status_coz, status_com1;
  757. + u32 req_cop1, req_coz, req_com1, req_preset, req_init;
  758. + u32 temp;
  759. +#ifdef NEW_ALGORITHM_TRAIN_TX
  760. + u32 median_gaink2;
  761. +#endif
  762. +
  763. +recheck:
  764. + if (tx_c->bin_long_stop && tx_c->bin_m1_stop) {
  765. + tx_c->tx_complete = true;
  766. + inst->ld_status |= RX_READY_MASK;
  767. + ld_coe_status(inst);
  768. + /* tell LP we are ready */
  769. + phy_write_mmd(phydev, MDIO_MMD_PMAPMD,
  770. + FSL_KR_PMD_STATUS, RX_STAT);
  771. + return;
  772. + }
  773. +
  774. + /* We start by checking the current LP status. If we got any responses,
  775. + * we can clear up the appropriate update request so that the
  776. + * subsequent code may easily issue new update requests if needed.
  777. + */
  778. + lp_status = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS) &
  779. + REQUEST_MASK;
  780. + status_cop1 = (lp_status & COP1_MASK) >> COP1_SHIFT;
  781. + status_coz = (lp_status & COZ_MASK) >> COZ_SHIFT;
  782. + status_com1 = (lp_status & COM1_MASK) >> COM1_SHIFT;
  783. +
  784. + old_ld_update = inst->ld_update;
  785. + req_cop1 = (old_ld_update & COP1_MASK) >> COP1_SHIFT;
  786. + req_coz = (old_ld_update & COZ_MASK) >> COZ_SHIFT;
  787. + req_com1 = (old_ld_update & COM1_MASK) >> COM1_SHIFT;
  788. + req_preset = old_ld_update & PRESET_MASK;
  789. + req_init = old_ld_update & INIT_MASK;
  790. +
  791. + /* IEEE802.3-2008, 72.6.10.2.3.1
  792. + * We may clear PRESET when all coefficients show UPDATED or MAX.
  793. + */
  794. + if (req_preset) {
  795. + if ((status_cop1 == COE_UPDATED || status_cop1 == COE_MAX) &&
  796. + (status_coz == COE_UPDATED || status_coz == COE_MAX) &&
  797. + (status_com1 == COE_UPDATED || status_com1 == COE_MAX)) {
  798. + inst->ld_update &= ~PRESET_MASK;
  799. + }
  800. + }
  801. +
  802. + /* IEEE802.3-2008, 72.6.10.2.3.2
  803. + * We may clear INITIALIZE when no coefficients show NOT UPDATED.
  804. + */
  805. + if (req_init) {
  806. + if (status_cop1 != COE_NOTUPDATED &&
  807. + status_coz != COE_NOTUPDATED &&
  808. + status_com1 != COE_NOTUPDATED) {
  809. + inst->ld_update &= ~INIT_MASK;
  810. + }
  811. + }
  812. +
  813. + /* IEEE802.3-2008, 72.6.10.2.3.2
  814. + * we send initialize to the other side to ensure default settings
  815. + * for the LP. Naturally, we should do this only once.
  816. + */
  817. + if (!tx_c->sent_init) {
  818. + if (!lp_status && !(old_ld_update & (LD_ALL_MASK))) {
  819. + inst->ld_update = INIT_MASK;
  820. + tx_c->sent_init = true;
  821. + }
  822. + }
  823. +
  824. + /* IEEE802.3-2008, 72.6.10.2.3.3
  825. + * We set coefficient requests to HOLD when we get the information
  826. + * about any updates On clearing our prior response, we also update
  827. + * our internal status.
  828. + */
  829. + if (status_cop1 != COE_NOTUPDATED) {
  830. + if (req_cop1) {
  831. + inst->ld_update &= ~COP1_MASK;
  832. +#ifdef NEW_ALGORITHM_TRAIN_TX
  833. + if (tx_c->post_inc) {
  834. + if (req_cop1 == INCREMENT &&
  835. + status_cop1 == COE_MAX) {
  836. + tx_c->post_inc = 0;
  837. + tx_c->bin_long_stop = true;
  838. + tx_c->bin_m1_stop = true;
  839. + } else {
  840. + tx_c->post_inc -= 1;
  841. + }
  842. +
  843. + ld_coe_update(inst);
  844. + goto recheck;
  845. + }
  846. +#endif
  847. + if ((req_cop1 == DECREMENT && status_cop1 == COE_MIN) ||
  848. + (req_cop1 == INCREMENT && status_cop1 == COE_MAX)) {
  849. + dev_dbg(&inst->phydev->mdio.dev, "COP1 hit limit %s",
  850. + (status_cop1 == COE_MIN) ?
  851. + "DEC MIN" : "INC MAX");
  852. + tx_c->long_min_max_cnt++;
  853. + if (tx_c->long_min_max_cnt >= TIMEOUT_LONG) {
  854. + tx_c->bin_long_stop = true;
  855. + ld_coe_update(inst);
  856. + goto recheck;
  857. + }
  858. + }
  859. + }
  860. + }
  861. +
  862. + if (status_coz != COE_NOTUPDATED) {
  863. + if (req_coz)
  864. + inst->ld_update &= ~COZ_MASK;
  865. + }
  866. +
  867. + if (status_com1 != COE_NOTUPDATED) {
  868. + if (req_com1) {
  869. + inst->ld_update &= ~COM1_MASK;
  870. +#ifdef NEW_ALGORITHM_TRAIN_TX
  871. + if (tx_c->pre_inc) {
  872. + if (req_com1 == INCREMENT &&
  873. + status_com1 == COE_MAX)
  874. + tx_c->pre_inc = 0;
  875. + else
  876. + tx_c->pre_inc -= 1;
  877. +
  878. + ld_coe_update(inst);
  879. + goto recheck;
  880. + }
  881. +#endif
  882. + /* Stop If we have reached the limit for a parameter. */
  883. + if ((req_com1 == DECREMENT && status_com1 == COE_MIN) ||
  884. + (req_com1 == INCREMENT && status_com1 == COE_MAX)) {
  885. + dev_dbg(&inst->phydev->mdio.dev, "COM1 hit limit %s",
  886. + (status_com1 == COE_MIN) ?
  887. + "DEC MIN" : "INC MAX");
  888. + tx_c->m1_min_max_cnt++;
  889. + if (tx_c->m1_min_max_cnt >= TIMEOUT_M1) {
  890. + tx_c->bin_m1_stop = true;
  891. + ld_coe_update(inst);
  892. + goto recheck;
  893. + }
  894. + }
  895. + }
  896. + }
  897. +
  898. + if (old_ld_update != inst->ld_update) {
  899. + ld_coe_update(inst);
  900. + /* Redo these status checks and updates until we have no more
  901. + * changes, to speed up the overall process.
  902. + */
  903. + goto recheck;
  904. + }
  905. +
  906. + /* Do nothing if we have pending request. */
  907. + if ((req_coz || req_com1 || req_cop1))
  908. + return;
  909. + else if (lp_status)
  910. + /* No pending request but LP status was not reverted to
  911. + * not updated.
  912. + */
  913. + return;
  914. +
  915. +#ifdef NEW_ALGORITHM_TRAIN_TX
  916. + if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) {
  917. + if (tx_c->pre_inc) {
  918. + inst->ld_update = INCREMENT << COM1_SHIFT;
  919. + ld_coe_update(inst);
  920. + return;
  921. + }
  922. +
  923. + if (status_cop1 != COE_MAX) {
  924. + median_gaink2 = get_median_gaink2(inst->reg_base);
  925. + if (median_gaink2 == 0xf) {
  926. + tx_c->post_inc = 1;
  927. + } else {
  928. + /* Gaink2 median lower than "F" */
  929. + tx_c->bin_m1_stop = true;
  930. + tx_c->bin_long_stop = true;
  931. + goto recheck;
  932. + }
  933. + } else {
  934. + /* C1 MAX */
  935. + tx_c->bin_m1_stop = true;
  936. + tx_c->bin_long_stop = true;
  937. + goto recheck;
  938. + }
  939. +
  940. + if (tx_c->post_inc) {
  941. + inst->ld_update = INCREMENT << COP1_SHIFT;
  942. + ld_coe_update(inst);
  943. + return;
  944. + }
  945. + }
  946. +#endif
  947. +
  948. + /* snapshot and select bin */
  949. + bin_m1_early = is_bin_early(BIN_M1, inst->reg_base);
  950. + bin_long_early = is_bin_early(BIN_LONG, inst->reg_base);
  951. +
  952. + if (!tx_c->bin_m1_stop && !tx_c->bin_m1_late_early && bin_m1_early) {
  953. + tx_c->bin_m1_stop = true;
  954. + goto recheck;
  955. + }
  956. +
  957. + if (!tx_c->bin_long_stop &&
  958. + tx_c->bin_long_late_early && !bin_long_early) {
  959. + tx_c->bin_long_stop = true;
  960. + goto recheck;
  961. + }
  962. +
  963. + /* IEEE802.3-2008, 72.6.10.2.3.3
  964. + * We only request coefficient updates when no PRESET/INITIALIZE is
  965. + * pending. We also only request coefficient updates when the
  966. + * corresponding status is NOT UPDATED and nothing is pending.
  967. + */
  968. + if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) {
  969. + if (!tx_c->bin_long_stop) {
  970. + /* BinM1 correction means changing COM1 */
  971. + if (!status_com1 && !(inst->ld_update & COM1_MASK)) {
  972. + /* Avoid BinM1Late by requesting an
  973. + * immediate decrement.
  974. + */
  975. + if (!bin_m1_early) {
  976. + /* request decrement c(-1) */
  977. + temp = DECREMENT << COM1_SHIFT;
  978. + inst->ld_update = temp;
  979. + ld_coe_update(inst);
  980. + tx_c->bin_m1_late_early = bin_m1_early;
  981. + return;
  982. + }
  983. + }
  984. +
  985. + /* BinLong correction means changing COP1 */
  986. + if (!status_cop1 && !(inst->ld_update & COP1_MASK)) {
  987. + /* Locate BinLong transition point (if any)
  988. + * while avoiding BinM1Late.
  989. + */
  990. + if (bin_long_early) {
  991. + /* request increment c(1) */
  992. + temp = INCREMENT << COP1_SHIFT;
  993. + inst->ld_update = temp;
  994. + } else {
  995. + /* request decrement c(1) */
  996. + temp = DECREMENT << COP1_SHIFT;
  997. + inst->ld_update = temp;
  998. + }
  999. +
  1000. + ld_coe_update(inst);
  1001. + tx_c->bin_long_late_early = bin_long_early;
  1002. + }
  1003. + /* We try to finish BinLong before we do BinM1 */
  1004. + return;
  1005. + }
  1006. +
  1007. + if (!tx_c->bin_m1_stop) {
  1008. + /* BinM1 correction means changing COM1 */
  1009. + if (!status_com1 && !(inst->ld_update & COM1_MASK)) {
  1010. + /* Locate BinM1 transition point (if any) */
  1011. + if (bin_m1_early) {
  1012. + /* request increment c(-1) */
  1013. + temp = INCREMENT << COM1_SHIFT;
  1014. + inst->ld_update = temp;
  1015. + } else {
  1016. + /* request decrement c(-1) */
  1017. + temp = DECREMENT << COM1_SHIFT;
  1018. + inst->ld_update = temp;
  1019. + }
  1020. +
  1021. + ld_coe_update(inst);
  1022. + tx_c->bin_m1_late_early = bin_m1_early;
  1023. + }
  1024. + }
  1025. + }
  1026. +}
  1027. +
  1028. +static int is_link_up(struct phy_device *phydev)
  1029. +{
  1030. + int val;
  1031. +
  1032. + phy_read_mmd(phydev, MDIO_MMD_PCS, FSL_XFI_PCS_10GR_SR1);
  1033. + val = phy_read_mmd(phydev, MDIO_MMD_PCS, FSL_XFI_PCS_10GR_SR1);
  1034. +
  1035. + return (val & FSL_KR_RX_LINK_STAT_MASK) ? 1 : 0;
  1036. +}
  1037. +
  1038. +static int is_link_training_fail(struct phy_device *phydev)
  1039. +{
  1040. + int val;
  1041. + int timeout = 100;
  1042. +
  1043. + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_STATUS);
  1044. + if (!(val & TRAIN_FAIL) && (val & RX_STAT)) {
  1045. + /* check LNK_STAT for sure */
  1046. + while (timeout--) {
  1047. + if (is_link_up(phydev))
  1048. + return 0;
  1049. +
  1050. + usleep_range(100, 500);
  1051. + }
  1052. + }
  1053. +
  1054. + return 1;
  1055. +}
  1056. +
  1057. +static int check_rx(struct phy_device *phydev)
  1058. +{
  1059. + return phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS) &
  1060. + RX_READY_MASK;
  1061. +}
  1062. +
  1063. +/* Coefficient values have hardware restrictions */
  1064. +static int is_ld_valid(struct fsl_xgkr_inst *inst)
  1065. +{
  1066. + u32 ratio_pst1q = inst->ratio_pst1q;
  1067. + u32 adpt_eq = inst->adpt_eq;
  1068. + u32 ratio_preq = inst->ratio_preq;
  1069. +
  1070. + if ((ratio_pst1q + adpt_eq + ratio_preq) > 48)
  1071. + return 0;
  1072. +
  1073. + if (((ratio_pst1q + adpt_eq + ratio_preq) * 4) >=
  1074. + ((adpt_eq - ratio_pst1q - ratio_preq) * 17))
  1075. + return 0;
  1076. +
  1077. + if (ratio_preq > ratio_pst1q)
  1078. + return 0;
  1079. +
  1080. + if (ratio_preq > 8)
  1081. + return 0;
  1082. +
  1083. + if (adpt_eq < 26)
  1084. + return 0;
  1085. +
  1086. + if (ratio_pst1q > 16)
  1087. + return 0;
  1088. +
  1089. + return 1;
  1090. +}
  1091. +
  1092. +static int is_value_allowed(const u32 *val_table, u32 val)
  1093. +{
  1094. + int i;
  1095. +
  1096. + for (i = 0;; i++) {
  1097. + if (*(val_table + i) == VAL_INVALID)
  1098. + return 0;
  1099. + if (*(val_table + i) == val)
  1100. + return 1;
  1101. + }
  1102. +}
  1103. +
  1104. +static int inc_dec(struct fsl_xgkr_inst *inst, int field, int request)
  1105. +{
  1106. + u32 ld_limit[3], ld_coe[3], step[3];
  1107. +
  1108. + ld_coe[0] = inst->ratio_pst1q;
  1109. + ld_coe[1] = inst->adpt_eq;
  1110. + ld_coe[2] = inst->ratio_preq;
  1111. +
  1112. + /* Information specific to the Freescale SerDes for 10GBase-KR:
  1113. + * Incrementing C(+1) means *decrementing* RATIO_PST1Q
  1114. + * Incrementing C(0) means incrementing ADPT_EQ
  1115. + * Incrementing C(-1) means *decrementing* RATIO_PREQ
  1116. + */
  1117. + step[0] = -1;
  1118. + step[1] = 1;
  1119. + step[2] = -1;
  1120. +
  1121. + switch (request) {
  1122. + case INCREMENT:
  1123. + ld_limit[0] = POST_COE_MAX;
  1124. + ld_limit[1] = ZERO_COE_MAX;
  1125. + ld_limit[2] = PRE_COE_MAX;
  1126. + if (ld_coe[field] != ld_limit[field])
  1127. + ld_coe[field] += step[field];
  1128. + else
  1129. + /* MAX */
  1130. + return 2;
  1131. + break;
  1132. + case DECREMENT:
  1133. + ld_limit[0] = POST_COE_MIN;
  1134. + ld_limit[1] = ZERO_COE_MIN;
  1135. + ld_limit[2] = PRE_COE_MIN;
  1136. + if (ld_coe[field] != ld_limit[field])
  1137. + ld_coe[field] -= step[field];
  1138. + else
  1139. + /* MIN */
  1140. + return 1;
  1141. + break;
  1142. + default:
  1143. + break;
  1144. + }
  1145. +
  1146. + if (is_ld_valid(inst)) {
  1147. + /* accept new ld */
  1148. + inst->ratio_pst1q = ld_coe[0];
  1149. + inst->adpt_eq = ld_coe[1];
  1150. + inst->ratio_preq = ld_coe[2];
  1151. + /* only some values for preq and pst1q can be used.
  1152. + * for preq: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xc.
  1153. + * for pst1q: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xd, 0xf, 0x10.
  1154. + */
  1155. + if (!is_value_allowed((const u32 *)&preq_table, ld_coe[2])) {
  1156. + dev_dbg(&inst->phydev->mdio.dev,
  1157. + "preq skipped value: %d\n", ld_coe[2]);
  1158. + return 0;
  1159. + }
  1160. +
  1161. + if (!is_value_allowed((const u32 *)&pst1q_table, ld_coe[0])) {
  1162. + dev_dbg(&inst->phydev->mdio.dev,
  1163. + "pst1q skipped value: %d\n", ld_coe[0]);
  1164. + return 0;
  1165. + }
  1166. +
  1167. + tune_tecr0(inst);
  1168. + } else {
  1169. + if (request == DECREMENT)
  1170. + /* MIN */
  1171. + return 1;
  1172. + if (request == INCREMENT)
  1173. + /* MAX */
  1174. + return 2;
  1175. + }
  1176. +
  1177. + return 0;
  1178. +}
  1179. +
  1180. +static void min_max_updated(struct fsl_xgkr_inst *inst, int field, int new_ld)
  1181. +{
  1182. + u32 ld_coe[] = {COE_UPDATED, COE_MIN, COE_MAX};
  1183. + u32 mask, val;
  1184. +
  1185. + switch (field) {
  1186. + case COE_COP1:
  1187. + mask = COP1_MASK;
  1188. + val = ld_coe[new_ld] << COP1_SHIFT;
  1189. + break;
  1190. + case COE_COZ:
  1191. + mask = COZ_MASK;
  1192. + val = ld_coe[new_ld] << COZ_SHIFT;
  1193. + break;
  1194. + case COE_COM:
  1195. + mask = COM1_MASK;
  1196. + val = ld_coe[new_ld] << COM1_SHIFT;
  1197. + break;
  1198. + default:
  1199. + return;
  1200. + }
  1201. +
  1202. + inst->ld_status &= ~mask;
  1203. + inst->ld_status |= val;
  1204. +}
  1205. +
  1206. +static void check_request(struct fsl_xgkr_inst *inst, int request)
  1207. +{
  1208. + int cop1_req, coz_req, com_req;
  1209. + int old_status, new_ld_sta;
  1210. +
  1211. + cop1_req = (request & COP1_MASK) >> COP1_SHIFT;
  1212. + coz_req = (request & COZ_MASK) >> COZ_SHIFT;
  1213. + com_req = (request & COM1_MASK) >> COM1_SHIFT;
  1214. +
  1215. + /* IEEE802.3-2008, 72.6.10.2.5
  1216. + * Ensure we only act on INCREMENT/DECREMENT when we are in NOT UPDATED
  1217. + */
  1218. + old_status = inst->ld_status;
  1219. +
  1220. + if (cop1_req && !(inst->ld_status & COP1_MASK)) {
  1221. + new_ld_sta = inc_dec(inst, COE_COP1, cop1_req);
  1222. + min_max_updated(inst, COE_COP1, new_ld_sta);
  1223. + }
  1224. +
  1225. + if (coz_req && !(inst->ld_status & COZ_MASK)) {
  1226. + new_ld_sta = inc_dec(inst, COE_COZ, coz_req);
  1227. + min_max_updated(inst, COE_COZ, new_ld_sta);
  1228. + }
  1229. +
  1230. + if (com_req && !(inst->ld_status & COM1_MASK)) {
  1231. + new_ld_sta = inc_dec(inst, COE_COM, com_req);
  1232. + min_max_updated(inst, COE_COM, new_ld_sta);
  1233. + }
  1234. +
  1235. + if (old_status != inst->ld_status)
  1236. + ld_coe_status(inst);
  1237. +}
  1238. +
  1239. +static void preset(struct fsl_xgkr_inst *inst)
  1240. +{
  1241. + /* These are all MAX values from the IEEE802.3 perspective. */
  1242. + inst->ratio_pst1q = POST_COE_MAX;
  1243. + inst->adpt_eq = ZERO_COE_MAX;
  1244. + inst->ratio_preq = PRE_COE_MAX;
  1245. +
  1246. + tune_tecr0(inst);
  1247. + inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
  1248. + inst->ld_status |= COE_MAX << COP1_SHIFT |
  1249. + COE_MAX << COZ_SHIFT |
  1250. + COE_MAX << COM1_SHIFT;
  1251. + ld_coe_status(inst);
  1252. +}
  1253. +
  1254. +static void initialize(struct fsl_xgkr_inst *inst)
  1255. +{
  1256. + inst->ratio_preq = RATIO_PREQ;
  1257. + inst->ratio_pst1q = RATIO_PST1Q;
  1258. + inst->adpt_eq = RATIO_EQ;
  1259. +
  1260. + tune_tecr0(inst);
  1261. + inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
  1262. + inst->ld_status |= COE_UPDATED << COP1_SHIFT |
  1263. + COE_UPDATED << COZ_SHIFT |
  1264. + COE_UPDATED << COM1_SHIFT;
  1265. + ld_coe_status(inst);
  1266. +}
  1267. +
  1268. +static void train_rx(struct fsl_xgkr_inst *inst)
  1269. +{
  1270. + struct phy_device *phydev = inst->phydev;
  1271. + int request, old_ld_status;
  1272. +
  1273. + /* get request from LP */
  1274. + request = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_CU) &
  1275. + (LD_ALL_MASK);
  1276. + old_ld_status = inst->ld_status;
  1277. +
  1278. + /* IEEE802.3-2008, 72.6.10.2.5
  1279. + * Ensure we always go to NOT UDPATED for status reporting in
  1280. + * response to HOLD requests.
  1281. + * IEEE802.3-2008, 72.6.10.2.3.1/2
  1282. + * ... but only if PRESET/INITIALIZE are not active to ensure
  1283. + * we keep status until they are released.
  1284. + */
  1285. + if (!(request & (PRESET_MASK | INIT_MASK))) {
  1286. + if (!(request & COP1_MASK))
  1287. + inst->ld_status &= ~COP1_MASK;
  1288. +
  1289. + if (!(request & COZ_MASK))
  1290. + inst->ld_status &= ~COZ_MASK;
  1291. +
  1292. + if (!(request & COM1_MASK))
  1293. + inst->ld_status &= ~COM1_MASK;
  1294. +
  1295. + if (old_ld_status != inst->ld_status)
  1296. + ld_coe_status(inst);
  1297. + }
  1298. +
  1299. + /* As soon as the LP shows ready, no need to do any more updates. */
  1300. + if (check_rx(phydev)) {
  1301. + /* LP receiver is ready */
  1302. + if (inst->ld_status & (COP1_MASK | COZ_MASK | COM1_MASK)) {
  1303. + inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
  1304. + ld_coe_status(inst);
  1305. + }
  1306. + } else {
  1307. + /* IEEE802.3-2008, 72.6.10.2.3.1/2
  1308. + * only act on PRESET/INITIALIZE if all status is NOT UPDATED.
  1309. + */
  1310. + if (request & (PRESET_MASK | INIT_MASK)) {
  1311. + if (!(inst->ld_status &
  1312. + (COP1_MASK | COZ_MASK | COM1_MASK))) {
  1313. + if (request & PRESET_MASK)
  1314. + preset(inst);
  1315. +
  1316. + if (request & INIT_MASK)
  1317. + initialize(inst);
  1318. + }
  1319. + }
  1320. +
  1321. + /* LP Coefficient are not in HOLD */
  1322. + if (request & REQUEST_MASK)
  1323. + check_request(inst, request & REQUEST_MASK);
  1324. + }
  1325. +}
  1326. +
  1327. +static void xgkr_start_train(struct phy_device *phydev)
  1328. +{
  1329. + struct fsl_xgkr_inst *inst = phydev->priv;
  1330. + struct tx_condition *tx_c = &inst->tx_c;
  1331. + int val = 0, i;
  1332. + int lt_state;
  1333. + unsigned long dead_line;
  1334. + int rx_ok, tx_ok;
  1335. +
  1336. + init_inst(inst, 0);
  1337. + start_lt(phydev);
  1338. +
  1339. + for (i = 0; i < 2;) {
  1340. + dead_line = jiffies + msecs_to_jiffies(500);
  1341. + while (time_before(jiffies, dead_line)) {
  1342. + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
  1343. + FSL_KR_PMD_STATUS);
  1344. + if (val & TRAIN_FAIL) {
  1345. + /* LT failed already, reset lane to avoid
  1346. + * it run into hanging, then start LT again.
  1347. + */
  1348. + reset_gcr0(inst);
  1349. + start_lt(phydev);
  1350. + } else if ((val & PMD_STATUS_SUP_STAT) &&
  1351. + (val & PMD_STATUS_FRAME_LOCK))
  1352. + break;
  1353. + usleep_range(100, 500);
  1354. + }
  1355. +
  1356. + if (!((val & PMD_STATUS_FRAME_LOCK) &&
  1357. + (val & PMD_STATUS_SUP_STAT))) {
  1358. + i++;
  1359. + continue;
  1360. + }
  1361. +
  1362. + /* init process */
  1363. + rx_ok = false;
  1364. + tx_ok = false;
  1365. + /* the LT should be finished in 500ms, failed or OK. */
  1366. + dead_line = jiffies + msecs_to_jiffies(500);
  1367. +
  1368. + while (time_before(jiffies, dead_line)) {
  1369. + /* check if the LT is already failed */
  1370. + lt_state = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
  1371. + FSL_KR_PMD_STATUS);
  1372. + if (lt_state & TRAIN_FAIL) {
  1373. + reset_gcr0(inst);
  1374. + break;
  1375. + }
  1376. +
  1377. + rx_ok = check_rx(phydev);
  1378. + tx_ok = tx_c->tx_complete;
  1379. +
  1380. + if (rx_ok && tx_ok)
  1381. + break;
  1382. +
  1383. + if (!rx_ok)
  1384. + train_rx(inst);
  1385. +
  1386. + if (!tx_ok)
  1387. + train_tx(inst);
  1388. +
  1389. + usleep_range(100, 500);
  1390. + }
  1391. +
  1392. + i++;
  1393. + /* check LT result */
  1394. + if (is_link_training_fail(phydev)) {
  1395. + init_inst(inst, 0);
  1396. + continue;
  1397. + } else {
  1398. + stop_lt(phydev);
  1399. + inst->state = TRAINED;
  1400. + break;
  1401. + }
  1402. + }
  1403. +}
  1404. +
  1405. +static void xgkr_state_machine(struct work_struct *work)
  1406. +{
  1407. + struct delayed_work *dwork = to_delayed_work(work);
  1408. + struct fsl_xgkr_inst *inst = container_of(dwork,
  1409. + struct fsl_xgkr_inst,
  1410. + xgkr_wk);
  1411. + struct phy_device *phydev = inst->phydev;
  1412. + int an_state;
  1413. + bool needs_train = false;
  1414. +
  1415. + mutex_lock(&phydev->lock);
  1416. +
  1417. + switch (inst->state) {
  1418. + case DETECTING_LP:
  1419. + phy_read_mmd(phydev, MDIO_MMD_AN, FSL_AN_BP_STAT);
  1420. + an_state = phy_read_mmd(phydev, MDIO_MMD_AN, FSL_AN_BP_STAT);
  1421. + if ((an_state & KR_AN_MASK))
  1422. + needs_train = true;
  1423. + break;
  1424. + case TRAINED:
  1425. + if (!is_link_up(phydev)) {
  1426. + dev_info(&phydev->mdio.dev,
  1427. + "Detect hotplug, restart training\n");
  1428. + init_inst(inst, 1);
  1429. + start_xgkr_an(phydev);
  1430. + inst->state = DETECTING_LP;
  1431. + }
  1432. + break;
  1433. + }
  1434. +
  1435. + if (needs_train)
  1436. + xgkr_start_train(phydev);
  1437. +
  1438. + mutex_unlock(&phydev->lock);
  1439. + queue_delayed_work(system_power_efficient_wq, &inst->xgkr_wk,
  1440. + msecs_to_jiffies(XGKR_TIMEOUT));
  1441. +}
  1442. +
  1443. +static int fsl_backplane_probe(struct phy_device *phydev)
  1444. +{
  1445. + struct fsl_xgkr_inst *xgkr_inst;
  1446. + struct device_node *phy_node, *lane_node;
  1447. + struct resource res_lane;
  1448. + const char *bm;
  1449. + int ret;
  1450. + int bp_mode;
  1451. + u32 lane[2];
  1452. +
  1453. + phy_node = phydev->mdio.dev.of_node;
  1454. + bp_mode = of_property_read_string(phy_node, "backplane-mode", &bm);
  1455. + if (bp_mode < 0)
  1456. + return 0;
  1457. +
  1458. + if (!strcasecmp(bm, "1000base-kx")) {
  1459. + bp_mode = PHY_BACKPLANE_1000BASE_KX;
  1460. + } else if (!strcasecmp(bm, "10gbase-kr")) {
  1461. + bp_mode = PHY_BACKPLANE_10GBASE_KR;
  1462. + } else {
  1463. + dev_err(&phydev->mdio.dev, "Unknown backplane-mode\n");
  1464. + return -EINVAL;
  1465. + }
  1466. +
  1467. + lane_node = of_parse_phandle(phy_node, "fsl,lane-handle", 0);
  1468. + if (!lane_node) {
  1469. + dev_err(&phydev->mdio.dev, "parse fsl,lane-handle failed\n");
  1470. + return -EINVAL;
  1471. + }
  1472. +
  1473. + ret = of_address_to_resource(lane_node, 0, &res_lane);
  1474. + if (ret) {
  1475. + dev_err(&phydev->mdio.dev, "could not obtain memory map\n");
  1476. + return ret;
  1477. + }
  1478. +
  1479. + of_node_put(lane_node);
  1480. + ret = of_property_read_u32_array(phy_node, "fsl,lane-reg",
  1481. + (u32 *)&lane, 2);
  1482. + if (ret) {
  1483. + dev_err(&phydev->mdio.dev, "could not get fsl,lane-reg\n");
  1484. + return -EINVAL;
  1485. + }
  1486. +
  1487. + phydev->priv = devm_ioremap_nocache(&phydev->mdio.dev,
  1488. + res_lane.start + lane[0],
  1489. + lane[1]);
  1490. + if (!phydev->priv) {
  1491. + dev_err(&phydev->mdio.dev, "ioremap_nocache failed\n");
  1492. + return -ENOMEM;
  1493. + }
  1494. +
  1495. + if (bp_mode == PHY_BACKPLANE_1000BASE_KX) {
  1496. + phydev->speed = SPEED_1000;
  1497. + /* configure the lane for 1000BASE-KX */
  1498. + lane_set_1gkx(phydev->priv);
  1499. + return 0;
  1500. + }
  1501. +
  1502. + xgkr_inst = devm_kzalloc(&phydev->mdio.dev,
  1503. + sizeof(*xgkr_inst), GFP_KERNEL);
  1504. + if (!xgkr_inst)
  1505. + return -ENOMEM;
  1506. +
  1507. + xgkr_inst->reg_base = phydev->priv;
  1508. + xgkr_inst->phydev = phydev;
  1509. + phydev->priv = xgkr_inst;
  1510. +
  1511. + if (bp_mode == PHY_BACKPLANE_10GBASE_KR) {
  1512. + phydev->speed = SPEED_10000;
  1513. + INIT_DELAYED_WORK(&xgkr_inst->xgkr_wk, xgkr_state_machine);
  1514. + }
  1515. +
  1516. + return 0;
  1517. +}
  1518. +
  1519. +static int fsl_backplane_aneg_done(struct phy_device *phydev)
  1520. +{
  1521. + return 1;
  1522. +}
  1523. +
  1524. +static int fsl_backplane_config_aneg(struct phy_device *phydev)
  1525. +{
  1526. + if (phydev->speed == SPEED_10000) {
  1527. + phydev->supported |= SUPPORTED_10000baseKR_Full;
  1528. + start_xgkr_an(phydev);
  1529. + } else if (phydev->speed == SPEED_1000) {
  1530. + phydev->supported |= SUPPORTED_1000baseKX_Full;
  1531. + start_1gkx_an(phydev);
  1532. + }
  1533. +
  1534. + phydev->advertising = phydev->supported;
  1535. + phydev->duplex = 1;
  1536. +
  1537. + return 0;
  1538. +}
  1539. +
  1540. +static int fsl_backplane_suspend(struct phy_device *phydev)
  1541. +{
  1542. + if (phydev->speed == SPEED_10000) {
  1543. + struct fsl_xgkr_inst *xgkr_inst = phydev->priv;
  1544. +
  1545. + cancel_delayed_work_sync(&xgkr_inst->xgkr_wk);
  1546. + }
  1547. + return 0;
  1548. +}
  1549. +
  1550. +static int fsl_backplane_resume(struct phy_device *phydev)
  1551. +{
  1552. + if (phydev->speed == SPEED_10000) {
  1553. + struct fsl_xgkr_inst *xgkr_inst = phydev->priv;
  1554. +
  1555. + init_inst(xgkr_inst, 1);
  1556. + queue_delayed_work(system_power_efficient_wq,
  1557. + &xgkr_inst->xgkr_wk,
  1558. + msecs_to_jiffies(XGKR_TIMEOUT));
  1559. + }
  1560. + return 0;
  1561. +}
  1562. +
  1563. +static int fsl_backplane_read_status(struct phy_device *phydev)
  1564. +{
  1565. + if (is_link_up(phydev))
  1566. + phydev->link = 1;
  1567. + else
  1568. + phydev->link = 0;
  1569. +
  1570. + return 0;
  1571. +}
  1572. +
  1573. +static struct phy_driver fsl_backplane_driver[] = {
  1574. + {
  1575. + .phy_id = FSL_PCS_PHY_ID,
  1576. + .name = "Freescale Backplane",
  1577. + .phy_id_mask = 0xffffffff,
  1578. + .features = SUPPORTED_Backplane | SUPPORTED_Autoneg |
  1579. + SUPPORTED_MII,
  1580. + .probe = fsl_backplane_probe,
  1581. + .aneg_done = fsl_backplane_aneg_done,
  1582. + .config_aneg = fsl_backplane_config_aneg,
  1583. + .read_status = fsl_backplane_read_status,
  1584. + .suspend = fsl_backplane_suspend,
  1585. + .resume = fsl_backplane_resume,
  1586. + },
  1587. +};
  1588. +
  1589. +module_phy_driver(fsl_backplane_driver);
  1590. +
  1591. +static struct mdio_device_id __maybe_unused freescale_tbl[] = {
  1592. + { FSL_PCS_PHY_ID, 0xffffffff },
  1593. + { }
  1594. +};
  1595. +
  1596. +MODULE_DEVICE_TABLE(mdio, freescale_tbl);
  1597. +
  1598. +MODULE_DESCRIPTION("Freescale Backplane driver");
  1599. +MODULE_AUTHOR("Shaohui Xie <[email protected]>");
  1600. +MODULE_LICENSE("GPL v2");
  1601. --- a/drivers/net/phy/phy.c
  1602. +++ b/drivers/net/phy/phy.c
  1603. @@ -585,7 +585,7 @@ int phy_mii_ioctl(struct phy_device *phy
  1604. return 0;
  1605. case SIOCSHWTSTAMP:
  1606. - if (phydev->drv->hwtstamp)
  1607. + if (phydev->drv && phydev->drv->hwtstamp)
  1608. return phydev->drv->hwtstamp(phydev, ifr);
  1609. /* fall through */
  1610. @@ -610,6 +610,9 @@ static int phy_start_aneg_priv(struct ph
  1611. bool trigger = 0;
  1612. int err;
  1613. + if (!phydev->drv)
  1614. + return -EIO;
  1615. +
  1616. mutex_lock(&phydev->lock);
  1617. if (AUTONEG_DISABLE == phydev->autoneg)
  1618. @@ -1009,7 +1012,7 @@ void phy_state_machine(struct work_struc
  1619. old_state = phydev->state;
  1620. - if (phydev->drv->link_change_notify)
  1621. + if (phydev->drv && phydev->drv->link_change_notify)
  1622. phydev->drv->link_change_notify(phydev);
  1623. switch (phydev->state) {
  1624. @@ -1311,6 +1314,9 @@ EXPORT_SYMBOL(phy_write_mmd_indirect);
  1625. */
  1626. int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
  1627. {
  1628. + if (!phydev->drv)
  1629. + return -EIO;
  1630. +
  1631. /* According to 802.3az,the EEE is supported only in full duplex-mode.
  1632. * Also EEE feature is active when core is operating with MII, GMII
  1633. * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
  1634. @@ -1388,6 +1394,9 @@ EXPORT_SYMBOL(phy_init_eee);
  1635. */
  1636. int phy_get_eee_err(struct phy_device *phydev)
  1637. {
  1638. + if (!phydev->drv)
  1639. + return -EIO;
  1640. +
  1641. return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, MDIO_MMD_PCS);
  1642. }
  1643. EXPORT_SYMBOL(phy_get_eee_err);
  1644. @@ -1404,6 +1413,9 @@ int phy_ethtool_get_eee(struct phy_devic
  1645. {
  1646. int val;
  1647. + if (!phydev->drv)
  1648. + return -EIO;
  1649. +
  1650. /* Get Supported EEE */
  1651. val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS);
  1652. if (val < 0)
  1653. @@ -1437,6 +1449,9 @@ int phy_ethtool_set_eee(struct phy_devic
  1654. {
  1655. int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
  1656. + if (!phydev->drv)
  1657. + return -EIO;
  1658. +
  1659. /* Mask prohibited EEE modes */
  1660. val &= ~phydev->eee_broken_modes;
  1661. @@ -1448,7 +1463,7 @@ EXPORT_SYMBOL(phy_ethtool_set_eee);
  1662. int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
  1663. {
  1664. - if (phydev->drv->set_wol)
  1665. + if (phydev->drv && phydev->drv->set_wol)
  1666. return phydev->drv->set_wol(phydev, wol);
  1667. return -EOPNOTSUPP;
  1668. @@ -1457,7 +1472,7 @@ EXPORT_SYMBOL(phy_ethtool_set_wol);
  1669. void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
  1670. {
  1671. - if (phydev->drv->get_wol)
  1672. + if (phydev->drv && phydev->drv->get_wol)
  1673. phydev->drv->get_wol(phydev, wol);
  1674. }
  1675. EXPORT_SYMBOL(phy_ethtool_get_wol);
  1676. --- a/drivers/net/phy/phy_device.c
  1677. +++ b/drivers/net/phy/phy_device.c
  1678. @@ -1046,7 +1046,7 @@ int phy_suspend(struct phy_device *phyde
  1679. if (wol.wolopts)
  1680. return -EBUSY;
  1681. - if (phydrv->suspend)
  1682. + if (phydev->drv && phydrv->suspend)
  1683. ret = phydrv->suspend(phydev);
  1684. if (ret)
  1685. @@ -1063,7 +1063,7 @@ int phy_resume(struct phy_device *phydev
  1686. struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
  1687. int ret = 0;
  1688. - if (phydrv->resume)
  1689. + if (phydev->drv && phydrv->resume)
  1690. ret = phydrv->resume(phydev);
  1691. if (ret)
  1692. @@ -1726,7 +1726,7 @@ static int phy_remove(struct device *dev
  1693. phydev->state = PHY_DOWN;
  1694. mutex_unlock(&phydev->lock);
  1695. - if (phydev->drv->remove)
  1696. + if (phydev->drv && phydev->drv->remove)
  1697. phydev->drv->remove(phydev);
  1698. phydev->drv = NULL;
  1699. --- a/drivers/net/phy/swphy.c
  1700. +++ b/drivers/net/phy/swphy.c
  1701. @@ -77,6 +77,7 @@ static const struct swmii_regs duplex[]
  1702. static int swphy_decode_speed(int speed)
  1703. {
  1704. switch (speed) {
  1705. + case 10000:
  1706. case 1000:
  1707. return SWMII_SPEED_1000;
  1708. case 100:
  1709. --- a/include/linux/phy.h
  1710. +++ b/include/linux/phy.h
  1711. @@ -81,6 +81,7 @@ typedef enum {
  1712. PHY_INTERFACE_MODE_MOCA,
  1713. PHY_INTERFACE_MODE_QSGMII,
  1714. PHY_INTERFACE_MODE_TRGMII,
  1715. + PHY_INTERFACE_MODE_SGMII_2500,
  1716. PHY_INTERFACE_MODE_MAX,
  1717. } phy_interface_t;
  1718. @@ -784,6 +785,9 @@ int phy_stop_interrupts(struct phy_devic
  1719. static inline int phy_read_status(struct phy_device *phydev)
  1720. {
  1721. + if (!phydev->drv)
  1722. + return -EIO;
  1723. +
  1724. return phydev->drv->read_status(phydev);
  1725. }