213-spi-mediatek-add-mt7986-spi-support.patch 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917
  1. From 7d99750f96fc6904d54affebdc8c9b0bfae1e9e8 Mon Sep 17 00:00:00 2001
  2. From: Sam Shih <[email protected]>
  3. Date: Sun, 17 Apr 2022 11:40:22 +0800
  4. Subject: [PATCH] spi: mediatek: backport document and driver to support mt7986
  5. spi design
  6. this patch add the support of ipm design and upgrade devicetree binding
  7. The patch is comming from following threads
  8. - https://lore.kernel.org/all/[email protected]/
  9. - https://lore.kernel.org/all/[email protected]/
  10. Signed-off-by: Sam Shih <[email protected]>
  11. ---
  12. .../bindings/spi/mediatek,spi-mt65xx.yaml | 111 ++++
  13. drivers/spi/spi-mt65xx.c | 509 ++++++++++++++++--
  14. 2 files changed, 572 insertions(+), 48 deletions(-)
  15. create mode 100644 Documentation/devicetree/bindings/spi/mediatek,spi-mt65xx.yaml
  16. --- /dev/null
  17. +++ b/Documentation/devicetree/bindings/spi/mediatek,spi-mt65xx.yaml
  18. @@ -0,0 +1,111 @@
  19. +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
  20. +%YAML 1.2
  21. +---
  22. +$id: http://devicetree.org/schemas/spi/mediatek,spi-mt65xx.yaml#
  23. +$schema: http://devicetree.org/meta-schemas/core.yaml#
  24. +
  25. +title: SPI Bus controller for MediaTek ARM SoCs
  26. +
  27. +maintainers:
  28. + - Leilk Liu <[email protected]>
  29. +
  30. +allOf:
  31. + - $ref: "/schemas/spi/spi-controller.yaml#"
  32. +
  33. +properties:
  34. + compatible:
  35. + oneOf:
  36. + - items:
  37. + - enum:
  38. + - mediatek,mt7629-spi
  39. + - const: mediatek,mt7622-spi
  40. + - items:
  41. + - enum:
  42. + - mediatek,mt8516-spi
  43. + - const: mediatek,mt2712-spi
  44. + - items:
  45. + - enum:
  46. + - mediatek,mt6779-spi
  47. + - mediatek,mt8186-spi
  48. + - mediatek,mt8192-spi
  49. + - mediatek,mt8195-spi
  50. + - const: mediatek,mt6765-spi
  51. + - items:
  52. + - enum:
  53. + - mediatek,mt7986-spi-ipm
  54. + - const: mediatek,spi-ipm
  55. + - items:
  56. + - enum:
  57. + - mediatek,mt2701-spi
  58. + - mediatek,mt2712-spi
  59. + - mediatek,mt6589-spi
  60. + - mediatek,mt6765-spi
  61. + - mediatek,mt6893-spi
  62. + - mediatek,mt7622-spi
  63. + - mediatek,mt8135-spi
  64. + - mediatek,mt8173-spi
  65. + - mediatek,mt8183-spi
  66. +
  67. + reg:
  68. + maxItems: 1
  69. +
  70. + interrupts:
  71. + maxItems: 1
  72. +
  73. + clocks:
  74. + minItems: 3
  75. + items:
  76. + - description: clock used for the parent clock
  77. + - description: clock used for the muxes clock
  78. + - description: clock used for the clock gate
  79. + - description: clock used for the AHB bus, this clock is optional
  80. +
  81. + clock-names:
  82. + minItems: 3
  83. + items:
  84. + - const: parent-clk
  85. + - const: sel-clk
  86. + - const: spi-clk
  87. + - const: hclk
  88. +
  89. + mediatek,pad-select:
  90. + $ref: /schemas/types.yaml#/definitions/uint32-array
  91. + minItems: 1
  92. + maxItems: 4
  93. + items:
  94. + enum: [0, 1, 2, 3]
  95. + description:
  96. + specify which pins group(ck/mi/mo/cs) spi controller used.
  97. + This is an array.
  98. +
  99. +required:
  100. + - compatible
  101. + - reg
  102. + - interrupts
  103. + - clocks
  104. + - clock-names
  105. + - '#address-cells'
  106. + - '#size-cells'
  107. +
  108. +unevaluatedProperties: false
  109. +
  110. +examples:
  111. + - |
  112. + #include <dt-bindings/clock/mt8173-clk.h>
  113. + #include <dt-bindings/gpio/gpio.h>
  114. + #include <dt-bindings/interrupt-controller/arm-gic.h>
  115. + #include <dt-bindings/interrupt-controller/irq.h>
  116. +
  117. + spi@1100a000 {
  118. + compatible = "mediatek,mt8173-spi";
  119. + #address-cells = <1>;
  120. + #size-cells = <0>;
  121. + reg = <0x1100a000 0x1000>;
  122. + interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>;
  123. + clocks = <&topckgen CLK_TOP_SYSPLL3_D2>,
  124. + <&topckgen CLK_TOP_SPI_SEL>,
  125. + <&pericfg CLK_PERI_SPI0>;
  126. + clock-names = "parent-clk", "sel-clk", "spi-clk";
  127. + cs-gpios = <&pio 105 GPIO_ACTIVE_LOW>, <&pio 72 GPIO_ACTIVE_LOW>;
  128. + mediatek,pad-select = <1>, <0>;
  129. + };
  130. --- a/drivers/spi/spi-mt65xx.c
  131. +++ b/drivers/spi/spi-mt65xx.c
  132. @@ -12,11 +12,12 @@
  133. #include <linux/ioport.h>
  134. #include <linux/module.h>
  135. #include <linux/of.h>
  136. -#include <linux/of_gpio.h>
  137. +#include <linux/gpio/consumer.h>
  138. #include <linux/platform_device.h>
  139. #include <linux/platform_data/spi-mt65xx.h>
  140. #include <linux/pm_runtime.h>
  141. #include <linux/spi/spi.h>
  142. +#include <linux/spi/spi-mem.h>
  143. #include <linux/dma-mapping.h>
  144. #define SPI_CFG0_REG 0x0000
  145. @@ -31,6 +32,7 @@
  146. #define SPI_CFG2_REG 0x0028
  147. #define SPI_TX_SRC_REG_64 0x002c
  148. #define SPI_RX_DST_REG_64 0x0030
  149. +#define SPI_CFG3_IPM_REG 0x0040
  150. #define SPI_CFG0_SCK_HIGH_OFFSET 0
  151. #define SPI_CFG0_SCK_LOW_OFFSET 8
  152. @@ -51,6 +53,7 @@
  153. #define SPI_CFG1_CS_IDLE_MASK 0xff
  154. #define SPI_CFG1_PACKET_LOOP_MASK 0xff00
  155. #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
  156. +#define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
  157. #define SPI_CFG2_SCK_HIGH_OFFSET 0
  158. #define SPI_CFG2_SCK_LOW_OFFSET 16
  159. @@ -71,6 +74,24 @@
  160. #define SPI_CMD_TX_ENDIAN BIT(15)
  161. #define SPI_CMD_FINISH_IE BIT(16)
  162. #define SPI_CMD_PAUSE_IE BIT(17)
  163. +#define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
  164. +#define SPI_CMD_IPM_SPIM_LOOP BIT(21)
  165. +#define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
  166. +
  167. +#define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
  168. +
  169. +#define PIN_MODE_CFG(x) ((x) / 2)
  170. +
  171. +#define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
  172. +#define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
  173. +#define SPI_CFG3_IPM_XMODE_EN BIT(4)
  174. +#define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
  175. +#define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
  176. +#define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
  177. +
  178. +#define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
  179. +#define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
  180. +#define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
  181. #define MT8173_SPI_MAX_PAD_SEL 3
  182. @@ -81,6 +102,9 @@
  183. #define MTK_SPI_MAX_FIFO_SIZE 32U
  184. #define MTK_SPI_PACKET_SIZE 1024
  185. +#define MTK_SPI_IPM_PACKET_SIZE SZ_64K
  186. +#define MTK_SPI_IPM_PACKET_LOOP SZ_256
  187. +
  188. #define MTK_SPI_32BITS_MASK (0xffffffff)
  189. #define DMA_ADDR_EXT_BITS (36)
  190. @@ -96,6 +120,8 @@ struct mtk_spi_compatible {
  191. bool dma_ext;
  192. /* some IC no need unprepare SPI clk */
  193. bool no_need_unprepare;
  194. + /* IPM design adjust and extend register to support more features */
  195. + bool ipm_design;
  196. };
  197. struct mtk_spi {
  198. @@ -103,7 +129,7 @@ struct mtk_spi {
  199. u32 state;
  200. int pad_num;
  201. u32 *pad_sel;
  202. - struct clk *parent_clk, *sel_clk, *spi_clk;
  203. + struct clk *parent_clk, *sel_clk, *spi_clk, *spi_hclk;
  204. struct spi_transfer *cur_transfer;
  205. u32 xfer_len;
  206. u32 num_xfered;
  207. @@ -111,6 +137,11 @@ struct mtk_spi {
  208. u32 tx_sgl_len, rx_sgl_len;
  209. const struct mtk_spi_compatible *dev_comp;
  210. u32 spi_clk_hz;
  211. + struct completion spimem_done;
  212. + bool use_spimem;
  213. + struct device *dev;
  214. + dma_addr_t tx_dma;
  215. + dma_addr_t rx_dma;
  216. };
  217. static const struct mtk_spi_compatible mtk_common_compat;
  218. @@ -119,6 +150,12 @@ static const struct mtk_spi_compatible m
  219. .must_tx = true,
  220. };
  221. +static const struct mtk_spi_compatible mtk_ipm_compat = {
  222. + .enhance_timing = true,
  223. + .dma_ext = true,
  224. + .ipm_design = true,
  225. +};
  226. +
  227. static const struct mtk_spi_compatible mt6765_compat = {
  228. .need_pad_sel = true,
  229. .must_tx = true,
  230. @@ -160,6 +197,9 @@ static const struct mtk_chip_config mtk_
  231. };
  232. static const struct of_device_id mtk_spi_of_match[] = {
  233. + { .compatible = "mediatek,spi-ipm",
  234. + .data = (void *)&mtk_ipm_compat,
  235. + },
  236. { .compatible = "mediatek,mt2701-spi",
  237. .data = (void *)&mtk_common_compat,
  238. },
  239. @@ -278,12 +318,11 @@ static int mtk_spi_set_hw_cs_timing(stru
  240. return 0;
  241. }
  242. -static int mtk_spi_prepare_message(struct spi_master *master,
  243. - struct spi_message *msg)
  244. +static int mtk_spi_hw_init(struct spi_master *master,
  245. + struct spi_device *spi)
  246. {
  247. u16 cpha, cpol;
  248. u32 reg_val;
  249. - struct spi_device *spi = msg->spi;
  250. struct mtk_chip_config *chip_config = spi->controller_data;
  251. struct mtk_spi *mdata = spi_master_get_devdata(master);
  252. @@ -291,6 +330,15 @@ static int mtk_spi_prepare_message(struc
  253. cpol = spi->mode & SPI_CPOL ? 1 : 0;
  254. reg_val = readl(mdata->base + SPI_CMD_REG);
  255. + if (mdata->dev_comp->ipm_design) {
  256. + /* SPI transfer without idle time until packet length done */
  257. + reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
  258. + if (spi->mode & SPI_LOOP)
  259. + reg_val |= SPI_CMD_IPM_SPIM_LOOP;
  260. + else
  261. + reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
  262. + }
  263. +
  264. if (cpha)
  265. reg_val |= SPI_CMD_CPHA;
  266. else
  267. @@ -348,23 +396,39 @@ static int mtk_spi_prepare_message(struc
  268. mdata->base + SPI_PAD_SEL_REG);
  269. /* tick delay */
  270. - reg_val = readl(mdata->base + SPI_CFG1_REG);
  271. if (mdata->dev_comp->enhance_timing) {
  272. - reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
  273. - reg_val |= ((chip_config->tick_delay & 0x7)
  274. - << SPI_CFG1_GET_TICK_DLY_OFFSET);
  275. + if (mdata->dev_comp->ipm_design) {
  276. + reg_val = readl(mdata->base + SPI_CMD_REG);
  277. + reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
  278. + reg_val |= ((chip_config->tick_delay & 0x7)
  279. + << SPI_CMD_IPM_GET_TICKDLY_OFFSET);
  280. + writel(reg_val, mdata->base + SPI_CMD_REG);
  281. + } else {
  282. + reg_val = readl(mdata->base + SPI_CFG1_REG);
  283. + reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
  284. + reg_val |= ((chip_config->tick_delay & 0x7)
  285. + << SPI_CFG1_GET_TICK_DLY_OFFSET);
  286. + writel(reg_val, mdata->base + SPI_CFG1_REG);
  287. + }
  288. } else {
  289. + reg_val = readl(mdata->base + SPI_CFG1_REG);
  290. reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1;
  291. reg_val |= ((chip_config->tick_delay & 0x3)
  292. << SPI_CFG1_GET_TICK_DLY_OFFSET_V1);
  293. + writel(reg_val, mdata->base + SPI_CFG1_REG);
  294. }
  295. - writel(reg_val, mdata->base + SPI_CFG1_REG);
  296. /* set hw cs timing */
  297. mtk_spi_set_hw_cs_timing(spi);
  298. return 0;
  299. }
  300. +static int mtk_spi_prepare_message(struct spi_master *master,
  301. + struct spi_message *msg)
  302. +{
  303. + return mtk_spi_hw_init(master, msg->spi);
  304. +}
  305. +
  306. static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
  307. {
  308. u32 reg_val;
  309. @@ -386,13 +450,13 @@ static void mtk_spi_set_cs(struct spi_de
  310. }
  311. static void mtk_spi_prepare_transfer(struct spi_master *master,
  312. - struct spi_transfer *xfer)
  313. + u32 speed_hz)
  314. {
  315. u32 div, sck_time, reg_val;
  316. struct mtk_spi *mdata = spi_master_get_devdata(master);
  317. - if (xfer->speed_hz < mdata->spi_clk_hz / 2)
  318. - div = DIV_ROUND_UP(mdata->spi_clk_hz, xfer->speed_hz);
  319. + if (speed_hz < mdata->spi_clk_hz / 2)
  320. + div = DIV_ROUND_UP(mdata->spi_clk_hz, speed_hz);
  321. else
  322. div = 1;
  323. @@ -423,12 +487,24 @@ static void mtk_spi_setup_packet(struct
  324. u32 packet_size, packet_loop, reg_val;
  325. struct mtk_spi *mdata = spi_master_get_devdata(master);
  326. - packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE);
  327. + if (mdata->dev_comp->ipm_design)
  328. + packet_size = min_t(u32,
  329. + mdata->xfer_len,
  330. + MTK_SPI_IPM_PACKET_SIZE);
  331. + else
  332. + packet_size = min_t(u32,
  333. + mdata->xfer_len,
  334. + MTK_SPI_PACKET_SIZE);
  335. +
  336. packet_loop = mdata->xfer_len / packet_size;
  337. reg_val = readl(mdata->base + SPI_CFG1_REG);
  338. - reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK);
  339. + if (mdata->dev_comp->ipm_design)
  340. + reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
  341. + else
  342. + reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
  343. reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
  344. + reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
  345. reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
  346. writel(reg_val, mdata->base + SPI_CFG1_REG);
  347. }
  348. @@ -523,7 +599,7 @@ static int mtk_spi_fifo_transfer(struct
  349. mdata->cur_transfer = xfer;
  350. mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
  351. mdata->num_xfered = 0;
  352. - mtk_spi_prepare_transfer(master, xfer);
  353. + mtk_spi_prepare_transfer(master, xfer->speed_hz);
  354. mtk_spi_setup_packet(master);
  355. if (xfer->tx_buf) {
  356. @@ -556,7 +632,7 @@ static int mtk_spi_dma_transfer(struct s
  357. mdata->cur_transfer = xfer;
  358. mdata->num_xfered = 0;
  359. - mtk_spi_prepare_transfer(master, xfer);
  360. + mtk_spi_prepare_transfer(master, xfer->speed_hz);
  361. cmd = readl(mdata->base + SPI_CMD_REG);
  362. if (xfer->tx_buf)
  363. @@ -591,6 +667,19 @@ static int mtk_spi_transfer_one(struct s
  364. struct spi_device *spi,
  365. struct spi_transfer *xfer)
  366. {
  367. + struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
  368. + u32 reg_val = 0;
  369. +
  370. + /* prepare xfer direction and duplex mode */
  371. + if (mdata->dev_comp->ipm_design) {
  372. + if (!xfer->tx_buf || !xfer->rx_buf) {
  373. + reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
  374. + if (xfer->rx_buf)
  375. + reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
  376. + }
  377. + writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
  378. + }
  379. +
  380. if (master->can_dma(master, spi, xfer))
  381. return mtk_spi_dma_transfer(master, spi, xfer);
  382. else
  383. @@ -614,8 +703,9 @@ static int mtk_spi_setup(struct spi_devi
  384. if (!spi->controller_data)
  385. spi->controller_data = (void *)&mtk_default_chip_info;
  386. - if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio))
  387. - gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
  388. + if (mdata->dev_comp->need_pad_sel && spi->cs_gpiod)
  389. + /* CS de-asserted, gpiolib will handle inversion */
  390. + gpiod_direction_output(spi->cs_gpiod, 0);
  391. return 0;
  392. }
  393. @@ -633,6 +723,12 @@ static irqreturn_t mtk_spi_interrupt(int
  394. else
  395. mdata->state = MTK_SPI_IDLE;
  396. + /* SPI-MEM ops */
  397. + if (mdata->use_spimem) {
  398. + complete(&mdata->spimem_done);
  399. + return IRQ_HANDLED;
  400. + }
  401. +
  402. if (!master->can_dma(master, NULL, trans)) {
  403. if (trans->rx_buf) {
  404. cnt = mdata->xfer_len / 4;
  405. @@ -716,6 +812,274 @@ static irqreturn_t mtk_spi_interrupt(int
  406. return IRQ_HANDLED;
  407. }
  408. +static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem,
  409. + struct spi_mem_op *op)
  410. +{
  411. + int opcode_len;
  412. +
  413. + if (op->data.dir != SPI_MEM_NO_DATA) {
  414. + opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
  415. + if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
  416. + op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len;
  417. + /* force data buffer dma-aligned. */
  418. + op->data.nbytes -= op->data.nbytes % 4;
  419. + }
  420. + }
  421. +
  422. + return 0;
  423. +}
  424. +
  425. +static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
  426. + const struct spi_mem_op *op)
  427. +{
  428. + if (!spi_mem_default_supports_op(mem, op))
  429. + return false;
  430. +
  431. + if (op->addr.nbytes && op->dummy.nbytes &&
  432. + op->addr.buswidth != op->dummy.buswidth)
  433. + return false;
  434. +
  435. + if (op->addr.nbytes + op->dummy.nbytes > 16)
  436. + return false;
  437. +
  438. + if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
  439. + if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
  440. + MTK_SPI_IPM_PACKET_LOOP ||
  441. + op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
  442. + return false;
  443. + }
  444. +
  445. + return true;
  446. +}
  447. +
  448. +static void mtk_spi_mem_setup_dma_xfer(struct spi_master *master,
  449. + const struct spi_mem_op *op)
  450. +{
  451. + struct mtk_spi *mdata = spi_master_get_devdata(master);
  452. +
  453. + writel((u32)(mdata->tx_dma & MTK_SPI_32BITS_MASK),
  454. + mdata->base + SPI_TX_SRC_REG);
  455. +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  456. + if (mdata->dev_comp->dma_ext)
  457. + writel((u32)(mdata->tx_dma >> 32),
  458. + mdata->base + SPI_TX_SRC_REG_64);
  459. +#endif
  460. +
  461. + if (op->data.dir == SPI_MEM_DATA_IN) {
  462. + writel((u32)(mdata->rx_dma & MTK_SPI_32BITS_MASK),
  463. + mdata->base + SPI_RX_DST_REG);
  464. +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  465. + if (mdata->dev_comp->dma_ext)
  466. + writel((u32)(mdata->rx_dma >> 32),
  467. + mdata->base + SPI_RX_DST_REG_64);
  468. +#endif
  469. + }
  470. +}
  471. +
  472. +static int mtk_spi_transfer_wait(struct spi_mem *mem,
  473. + const struct spi_mem_op *op)
  474. +{
  475. + struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
  476. + /*
  477. + * For each byte we wait for 8 cycles of the SPI clock.
  478. + * Since speed is defined in Hz and we want milliseconds,
  479. + * so it should be 8 * 1000.
  480. + */
  481. + u64 ms = 8000LL;
  482. +
  483. + if (op->data.dir == SPI_MEM_NO_DATA)
  484. + ms *= 32; /* prevent we may get 0 for short transfers. */
  485. + else
  486. + ms *= op->data.nbytes;
  487. + ms = div_u64(ms, mem->spi->max_speed_hz);
  488. + ms += ms + 1000; /* 1s tolerance */
  489. +
  490. + if (ms > UINT_MAX)
  491. + ms = UINT_MAX;
  492. +
  493. + if (!wait_for_completion_timeout(&mdata->spimem_done,
  494. + msecs_to_jiffies(ms))) {
  495. + dev_err(mdata->dev, "spi-mem transfer timeout\n");
  496. + return -ETIMEDOUT;
  497. + }
  498. +
  499. + return 0;
  500. +}
  501. +
  502. +static int mtk_spi_mem_exec_op(struct spi_mem *mem,
  503. + const struct spi_mem_op *op)
  504. +{
  505. + struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
  506. + u32 reg_val, nio, tx_size;
  507. + char *tx_tmp_buf, *rx_tmp_buf;
  508. + int ret = 0;
  509. +
  510. + mdata->use_spimem = true;
  511. + reinit_completion(&mdata->spimem_done);
  512. +
  513. + mtk_spi_reset(mdata);
  514. + mtk_spi_hw_init(mem->spi->master, mem->spi);
  515. + mtk_spi_prepare_transfer(mem->spi->master, mem->spi->max_speed_hz);
  516. +
  517. + reg_val = readl(mdata->base + SPI_CFG3_IPM_REG);
  518. + /* opcode byte len */
  519. + reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
  520. + reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
  521. +
  522. + /* addr & dummy byte len */
  523. + reg_val &= ~SPI_CFG3_IPM_ADDR_BYTELEN_MASK;
  524. + if (op->addr.nbytes || op->dummy.nbytes)
  525. + reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
  526. + SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
  527. +
  528. + /* data byte len */
  529. + if (op->data.dir == SPI_MEM_NO_DATA) {
  530. + reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
  531. + writel(0, mdata->base + SPI_CFG1_REG);
  532. + } else {
  533. + reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
  534. + mdata->xfer_len = op->data.nbytes;
  535. + mtk_spi_setup_packet(mem->spi->master);
  536. + }
  537. +
  538. + if (op->addr.nbytes || op->dummy.nbytes) {
  539. + if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
  540. + reg_val |= SPI_CFG3_IPM_XMODE_EN;
  541. + else
  542. + reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
  543. + }
  544. +
  545. + if (op->addr.buswidth == 2 ||
  546. + op->dummy.buswidth == 2 ||
  547. + op->data.buswidth == 2)
  548. + nio = 2;
  549. + else if (op->addr.buswidth == 4 ||
  550. + op->dummy.buswidth == 4 ||
  551. + op->data.buswidth == 4)
  552. + nio = 4;
  553. + else
  554. + nio = 1;
  555. +
  556. + reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
  557. + reg_val |= PIN_MODE_CFG(nio);
  558. +
  559. + reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
  560. + if (op->data.dir == SPI_MEM_DATA_IN)
  561. + reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
  562. + else
  563. + reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
  564. + writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
  565. +
  566. + tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
  567. + if (op->data.dir == SPI_MEM_DATA_OUT)
  568. + tx_size += op->data.nbytes;
  569. +
  570. + tx_size = max_t(u32, tx_size, 32);
  571. +
  572. + tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA);
  573. + if (!tx_tmp_buf) {
  574. + mdata->use_spimem = false;
  575. + return -ENOMEM;
  576. + }
  577. +
  578. + tx_tmp_buf[0] = op->cmd.opcode;
  579. +
  580. + if (op->addr.nbytes) {
  581. + int i;
  582. +
  583. + for (i = 0; i < op->addr.nbytes; i++)
  584. + tx_tmp_buf[i + 1] = op->addr.val >>
  585. + (8 * (op->addr.nbytes - i - 1));
  586. + }
  587. +
  588. + if (op->dummy.nbytes)
  589. + memset(tx_tmp_buf + op->addr.nbytes + 1,
  590. + 0xff,
  591. + op->dummy.nbytes);
  592. +
  593. + if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
  594. + memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
  595. + op->data.buf.out,
  596. + op->data.nbytes);
  597. +
  598. + mdata->tx_dma = dma_map_single(mdata->dev, tx_tmp_buf,
  599. + tx_size, DMA_TO_DEVICE);
  600. + if (dma_mapping_error(mdata->dev, mdata->tx_dma)) {
  601. + ret = -ENOMEM;
  602. + goto err_exit;
  603. + }
  604. +
  605. + if (op->data.dir == SPI_MEM_DATA_IN) {
  606. + if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
  607. + rx_tmp_buf = kzalloc(op->data.nbytes,
  608. + GFP_KERNEL | GFP_DMA);
  609. + if (!rx_tmp_buf) {
  610. + ret = -ENOMEM;
  611. + goto unmap_tx_dma;
  612. + }
  613. + } else {
  614. + rx_tmp_buf = op->data.buf.in;
  615. + }
  616. +
  617. + mdata->rx_dma = dma_map_single(mdata->dev,
  618. + rx_tmp_buf,
  619. + op->data.nbytes,
  620. + DMA_FROM_DEVICE);
  621. + if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
  622. + ret = -ENOMEM;
  623. + goto kfree_rx_tmp_buf;
  624. + }
  625. + }
  626. +
  627. + reg_val = readl(mdata->base + SPI_CMD_REG);
  628. + reg_val |= SPI_CMD_TX_DMA;
  629. + if (op->data.dir == SPI_MEM_DATA_IN)
  630. + reg_val |= SPI_CMD_RX_DMA;
  631. + writel(reg_val, mdata->base + SPI_CMD_REG);
  632. +
  633. + mtk_spi_mem_setup_dma_xfer(mem->spi->master, op);
  634. +
  635. + mtk_spi_enable_transfer(mem->spi->master);
  636. +
  637. + /* Wait for the interrupt. */
  638. + ret = mtk_spi_transfer_wait(mem, op);
  639. + if (ret)
  640. + goto unmap_rx_dma;
  641. +
  642. + /* spi disable dma */
  643. + reg_val = readl(mdata->base + SPI_CMD_REG);
  644. + reg_val &= ~SPI_CMD_TX_DMA;
  645. + if (op->data.dir == SPI_MEM_DATA_IN)
  646. + reg_val &= ~SPI_CMD_RX_DMA;
  647. + writel(reg_val, mdata->base + SPI_CMD_REG);
  648. +
  649. +unmap_rx_dma:
  650. + if (op->data.dir == SPI_MEM_DATA_IN) {
  651. + dma_unmap_single(mdata->dev, mdata->rx_dma,
  652. + op->data.nbytes, DMA_FROM_DEVICE);
  653. + if (!IS_ALIGNED((size_t)op->data.buf.in, 4))
  654. + memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
  655. + }
  656. +kfree_rx_tmp_buf:
  657. + if (op->data.dir == SPI_MEM_DATA_IN &&
  658. + !IS_ALIGNED((size_t)op->data.buf.in, 4))
  659. + kfree(rx_tmp_buf);
  660. +unmap_tx_dma:
  661. + dma_unmap_single(mdata->dev, mdata->tx_dma,
  662. + tx_size, DMA_TO_DEVICE);
  663. +err_exit:
  664. + kfree(tx_tmp_buf);
  665. + mdata->use_spimem = false;
  666. +
  667. + return ret;
  668. +}
  669. +
  670. +static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
  671. + .adjust_op_size = mtk_spi_mem_adjust_op_size,
  672. + .supports_op = mtk_spi_mem_supports_op,
  673. + .exec_op = mtk_spi_mem_exec_op,
  674. +};
  675. +
  676. static int mtk_spi_probe(struct platform_device *pdev)
  677. {
  678. struct spi_master *master;
  679. @@ -739,6 +1103,7 @@ static int mtk_spi_probe(struct platform
  680. master->can_dma = mtk_spi_can_dma;
  681. master->setup = mtk_spi_setup;
  682. master->set_cs_timing = mtk_spi_set_hw_cs_timing;
  683. + master->use_gpio_descriptors = true;
  684. of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node);
  685. if (!of_id) {
  686. @@ -755,6 +1120,14 @@ static int mtk_spi_probe(struct platform
  687. if (mdata->dev_comp->must_tx)
  688. master->flags = SPI_MASTER_MUST_TX;
  689. + if (mdata->dev_comp->ipm_design)
  690. + master->mode_bits |= SPI_LOOP;
  691. +
  692. + if (mdata->dev_comp->ipm_design) {
  693. + mdata->dev = &pdev->dev;
  694. + master->mem_ops = &mtk_spi_mem_ops;
  695. + init_completion(&mdata->spimem_done);
  696. + }
  697. if (mdata->dev_comp->need_pad_sel) {
  698. mdata->pad_num = of_property_count_u32_elems(
  699. @@ -831,25 +1204,40 @@ static int mtk_spi_probe(struct platform
  700. goto err_put_master;
  701. }
  702. + mdata->spi_hclk = devm_clk_get_optional(&pdev->dev, "hclk");
  703. + if (IS_ERR(mdata->spi_hclk)) {
  704. + ret = PTR_ERR(mdata->spi_hclk);
  705. + dev_err(&pdev->dev, "failed to get hclk: %d\n", ret);
  706. + goto err_put_master;
  707. + }
  708. +
  709. + ret = clk_prepare_enable(mdata->spi_hclk);
  710. + if (ret < 0) {
  711. + dev_err(&pdev->dev, "failed to enable hclk (%d)\n", ret);
  712. + goto err_put_master;
  713. + }
  714. +
  715. ret = clk_prepare_enable(mdata->spi_clk);
  716. if (ret < 0) {
  717. dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
  718. - goto err_put_master;
  719. + goto err_disable_spi_hclk;
  720. }
  721. ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
  722. if (ret < 0) {
  723. dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
  724. - clk_disable_unprepare(mdata->spi_clk);
  725. - goto err_put_master;
  726. + goto err_disable_spi_clk;
  727. }
  728. mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
  729. - if (mdata->dev_comp->no_need_unprepare)
  730. + if (mdata->dev_comp->no_need_unprepare) {
  731. clk_disable(mdata->spi_clk);
  732. - else
  733. + clk_disable(mdata->spi_hclk);
  734. + } else {
  735. clk_disable_unprepare(mdata->spi_clk);
  736. + clk_disable_unprepare(mdata->spi_hclk);
  737. + }
  738. pm_runtime_enable(&pdev->dev);
  739. @@ -862,25 +1250,12 @@ static int mtk_spi_probe(struct platform
  740. goto err_disable_runtime_pm;
  741. }
  742. - if (!master->cs_gpios && master->num_chipselect > 1) {
  743. + if (!master->cs_gpiods && master->num_chipselect > 1) {
  744. dev_err(&pdev->dev,
  745. "cs_gpios not specified and num_chipselect > 1\n");
  746. ret = -EINVAL;
  747. goto err_disable_runtime_pm;
  748. }
  749. -
  750. - if (master->cs_gpios) {
  751. - for (i = 0; i < master->num_chipselect; i++) {
  752. - ret = devm_gpio_request(&pdev->dev,
  753. - master->cs_gpios[i],
  754. - dev_name(&pdev->dev));
  755. - if (ret) {
  756. - dev_err(&pdev->dev,
  757. - "can't get CS GPIO %i\n", i);
  758. - goto err_disable_runtime_pm;
  759. - }
  760. - }
  761. - }
  762. }
  763. if (mdata->dev_comp->dma_ext)
  764. @@ -902,6 +1277,10 @@ static int mtk_spi_probe(struct platform
  765. err_disable_runtime_pm:
  766. pm_runtime_disable(&pdev->dev);
  767. +err_disable_spi_clk:
  768. + clk_disable_unprepare(mdata->spi_clk);
  769. +err_disable_spi_hclk:
  770. + clk_disable_unprepare(mdata->spi_hclk);
  771. err_put_master:
  772. spi_master_put(master);
  773. @@ -920,8 +1299,10 @@ static int mtk_spi_remove(struct platfor
  774. mtk_spi_reset(mdata);
  775. - if (mdata->dev_comp->no_need_unprepare)
  776. + if (mdata->dev_comp->no_need_unprepare) {
  777. clk_unprepare(mdata->spi_clk);
  778. + clk_unprepare(mdata->spi_hclk);
  779. + }
  780. pm_runtime_put_noidle(&pdev->dev);
  781. pm_runtime_disable(&pdev->dev);
  782. @@ -940,8 +1321,10 @@ static int mtk_spi_suspend(struct device
  783. if (ret)
  784. return ret;
  785. - if (!pm_runtime_suspended(dev))
  786. + if (!pm_runtime_suspended(dev)) {
  787. clk_disable_unprepare(mdata->spi_clk);
  788. + clk_disable_unprepare(mdata->spi_hclk);
  789. + }
  790. return ret;
  791. }
  792. @@ -958,11 +1341,20 @@ static int mtk_spi_resume(struct device
  793. dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
  794. return ret;
  795. }
  796. +
  797. + ret = clk_prepare_enable(mdata->spi_hclk);
  798. + if (ret < 0) {
  799. + dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
  800. + clk_disable_unprepare(mdata->spi_clk);
  801. + return ret;
  802. + }
  803. }
  804. ret = spi_master_resume(master);
  805. - if (ret < 0)
  806. + if (ret < 0) {
  807. clk_disable_unprepare(mdata->spi_clk);
  808. + clk_disable_unprepare(mdata->spi_hclk);
  809. + }
  810. return ret;
  811. }
  812. @@ -974,10 +1366,13 @@ static int mtk_spi_runtime_suspend(struc
  813. struct spi_master *master = dev_get_drvdata(dev);
  814. struct mtk_spi *mdata = spi_master_get_devdata(master);
  815. - if (mdata->dev_comp->no_need_unprepare)
  816. + if (mdata->dev_comp->no_need_unprepare) {
  817. clk_disable(mdata->spi_clk);
  818. - else
  819. + clk_disable(mdata->spi_hclk);
  820. + } else {
  821. clk_disable_unprepare(mdata->spi_clk);
  822. + clk_disable_unprepare(mdata->spi_hclk);
  823. + }
  824. return 0;
  825. }
  826. @@ -988,13 +1383,31 @@ static int mtk_spi_runtime_resume(struct
  827. struct mtk_spi *mdata = spi_master_get_devdata(master);
  828. int ret;
  829. - if (mdata->dev_comp->no_need_unprepare)
  830. + if (mdata->dev_comp->no_need_unprepare) {
  831. ret = clk_enable(mdata->spi_clk);
  832. - else
  833. + if (ret < 0) {
  834. + dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
  835. + return ret;
  836. + }
  837. + ret = clk_enable(mdata->spi_hclk);
  838. + if (ret < 0) {
  839. + dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
  840. + clk_disable(mdata->spi_clk);
  841. + return ret;
  842. + }
  843. + } else {
  844. ret = clk_prepare_enable(mdata->spi_clk);
  845. - if (ret < 0) {
  846. - dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
  847. - return ret;
  848. + if (ret < 0) {
  849. + dev_err(dev, "failed to prepare_enable spi_clk (%d)\n", ret);
  850. + return ret;
  851. + }
  852. +
  853. + ret = clk_prepare_enable(mdata->spi_hclk);
  854. + if (ret < 0) {
  855. + dev_err(dev, "failed to prepare_enable spi_hclk (%d)\n", ret);
  856. + clk_disable_unprepare(mdata->spi_clk);
  857. + return ret;
  858. + }
  859. }
  860. return 0;