050-0003-mtd-nand-qcom-support-for-command-descriptor-formati.patch 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. From 8d6b6d7e135e9bbfc923d34a45cb0e72695e63ed Mon Sep 17 00:00:00 2001
  2. From: Abhishek Sahu <[email protected]>
  3. Date: Mon, 25 Sep 2017 13:21:26 +0530
  4. Subject: [PATCH 3/7] mtd: nand: qcom: support for command descriptor formation
  5. 1. Add the function for command descriptor preparation which will
  6. be used only by BAM DMA and it will form the DMA descriptors
  7. containing command elements
  8. 2. DMA_PREP_CMD flag should be used for forming command DMA
  9. descriptors
  10. Reviewed-by: Archit Taneja <[email protected]>
  11. Signed-off-by: Abhishek Sahu <[email protected]>
  12. Signed-off-by: Boris Brezillon <[email protected]>
  13. ---
  14. drivers/mtd/nand/qcom_nandc.c | 108 +++++++++++++++++++++++++++++++++++-------
  15. 1 file changed, 92 insertions(+), 16 deletions(-)
  16. --- a/drivers/mtd/nand/qcom_nandc.c
  17. +++ b/drivers/mtd/nand/qcom_nandc.c
  18. @@ -200,6 +200,14 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_
  19. */
  20. #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
  21. +/* Returns the NAND register physical address */
  22. +#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
  23. +
  24. +/* Returns the dma address for reg read buffer */
  25. +#define reg_buf_dma_addr(chip, vaddr) \
  26. + ((chip)->reg_read_dma + \
  27. + ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
  28. +
  29. #define QPIC_PER_CW_CMD_ELEMENTS 32
  30. #define QPIC_PER_CW_CMD_SGL 32
  31. #define QPIC_PER_CW_DATA_SGL 8
  32. @@ -317,7 +325,8 @@ struct nandc_regs {
  33. * controller
  34. * @dev: parent device
  35. * @base: MMIO base
  36. - * @base_dma: physical base address of controller registers
  37. + * @base_phys: physical base address of controller registers
  38. + * @base_dma: dma base address of controller registers
  39. * @core_clk: controller clock
  40. * @aon_clk: another controller clock
  41. *
  42. @@ -350,6 +359,7 @@ struct qcom_nand_controller {
  43. struct device *dev;
  44. void __iomem *base;
  45. + phys_addr_t base_phys;
  46. dma_addr_t base_dma;
  47. struct clk *core_clk;
  48. @@ -751,6 +761,66 @@ static int prepare_bam_async_desc(struct
  49. }
  50. /*
  51. + * Prepares the command descriptor for BAM DMA which will be used for NAND
  52. + * register reads and writes. The command descriptor requires the command
  53. + * to be formed in command element type so this function uses the command
  54. + * element from bam transaction ce array and fills the same with required
  55. + * data. A single SGL can contain multiple command elements so
  56. + * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
  57. + * after the current command element.
  58. + */
  59. +static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
  60. + int reg_off, const void *vaddr,
  61. + int size, unsigned int flags)
  62. +{
  63. + int bam_ce_size;
  64. + int i, ret;
  65. + struct bam_cmd_element *bam_ce_buffer;
  66. + struct bam_transaction *bam_txn = nandc->bam_txn;
  67. +
  68. + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
  69. +
  70. + /* fill the command desc */
  71. + for (i = 0; i < size; i++) {
  72. + if (read)
  73. + bam_prep_ce(&bam_ce_buffer[i],
  74. + nandc_reg_phys(nandc, reg_off + 4 * i),
  75. + BAM_READ_COMMAND,
  76. + reg_buf_dma_addr(nandc,
  77. + (__le32 *)vaddr + i));
  78. + else
  79. + bam_prep_ce_le32(&bam_ce_buffer[i],
  80. + nandc_reg_phys(nandc, reg_off + 4 * i),
  81. + BAM_WRITE_COMMAND,
  82. + *((__le32 *)vaddr + i));
  83. + }
  84. +
  85. + bam_txn->bam_ce_pos += size;
  86. +
  87. + /* use the separate sgl after this command */
  88. + if (flags & NAND_BAM_NEXT_SGL) {
  89. + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
  90. + bam_ce_size = (bam_txn->bam_ce_pos -
  91. + bam_txn->bam_ce_start) *
  92. + sizeof(struct bam_cmd_element);
  93. + sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
  94. + bam_ce_buffer, bam_ce_size);
  95. + bam_txn->cmd_sgl_pos++;
  96. + bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
  97. +
  98. + if (flags & NAND_BAM_NWD) {
  99. + ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
  100. + DMA_PREP_FENCE |
  101. + DMA_PREP_CMD);
  102. + if (ret)
  103. + return ret;
  104. + }
  105. + }
  106. +
  107. + return 0;
  108. +}
  109. +
  110. +/*
  111. * Prepares the data descriptor for BAM DMA which will be used for NAND
  112. * data reads and writes.
  113. */
  114. @@ -868,19 +938,22 @@ static int read_reg_dma(struct qcom_nand
  115. {
  116. bool flow_control = false;
  117. void *vaddr;
  118. - int size;
  119. - if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
  120. - flow_control = true;
  121. + vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
  122. + nandc->reg_read_pos += num_regs;
  123. if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
  124. first = dev_cmd_reg_addr(nandc, first);
  125. - size = num_regs * sizeof(u32);
  126. - vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
  127. - nandc->reg_read_pos += num_regs;
  128. + if (nandc->props->is_bam)
  129. + return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
  130. + num_regs, flags);
  131. +
  132. + if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
  133. + flow_control = true;
  134. - return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control);
  135. + return prep_adm_dma_desc(nandc, true, first, vaddr,
  136. + num_regs * sizeof(u32), flow_control);
  137. }
  138. /*
  139. @@ -897,13 +970,9 @@ static int write_reg_dma(struct qcom_nan
  140. bool flow_control = false;
  141. struct nandc_regs *regs = nandc->regs;
  142. void *vaddr;
  143. - int size;
  144. vaddr = offset_to_nandc_reg(regs, first);
  145. - if (first == NAND_FLASH_CMD)
  146. - flow_control = true;
  147. -
  148. if (first == NAND_ERASED_CW_DETECT_CFG) {
  149. if (flags & NAND_ERASED_CW_SET)
  150. vaddr = &regs->erased_cw_detect_cfg_set;
  151. @@ -920,10 +989,15 @@ static int write_reg_dma(struct qcom_nan
  152. if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
  153. first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
  154. - size = num_regs * sizeof(u32);
  155. + if (nandc->props->is_bam)
  156. + return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
  157. + num_regs, flags);
  158. +
  159. + if (first == NAND_FLASH_CMD)
  160. + flow_control = true;
  161. - return prep_adm_dma_desc(nandc, false, first, vaddr, size,
  162. - flow_control);
  163. + return prep_adm_dma_desc(nandc, false, first, vaddr,
  164. + num_regs * sizeof(u32), flow_control);
  165. }
  166. /*
  167. @@ -1187,7 +1261,8 @@ static int submit_descs(struct qcom_nand
  168. }
  169. if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
  170. - r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0);
  171. + r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
  172. + DMA_PREP_CMD);
  173. if (r)
  174. return r;
  175. }
  176. @@ -2722,6 +2797,7 @@ static int qcom_nandc_probe(struct platf
  177. if (IS_ERR(nandc->base))
  178. return PTR_ERR(nandc->base);
  179. + nandc->base_phys = res->start;
  180. nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
  181. nandc->core_clk = devm_clk_get(dev, "core");