862-dmaengine-qcom-bam_dma-Add-custom-data-mapping.patch 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. From 5a7ccdf845d64b385affdcffaf2defbe9848be15 Mon Sep 17 00:00:00 2001
  2. From: Ram Chandra Jangir <[email protected]>
  3. Date: Thu, 20 Apr 2017 10:39:00 +0530
  4. Subject: [PATCH] dmaengine: qcom: bam_dma: Add custom data mapping
  5. Add a new function to support for preparing DMA descriptor
  6. for custom data.
  7. Signed-off-by: Abhishek Sahu <[email protected]>
  8. Signed-off-by: Ram Chandra Jangir <[email protected]>
  9. ---
  10. drivers/dma/qcom/bam_dma.c | 97 +++++++++++++++++++++++++++++++++++++---
  11. include/linux/dma/qcom_bam_dma.h | 14 ++++++
  12. include/linux/dmaengine.h | 14 ++++++
  13. 3 files changed, 119 insertions(+), 6 deletions(-)
  14. --- a/drivers/dma/qcom/bam_dma.c
  15. +++ b/drivers/dma/qcom/bam_dma.c
  16. @@ -49,6 +49,7 @@
  17. #include <linux/clk.h>
  18. #include <linux/dmaengine.h>
  19. #include <linux/pm_runtime.h>
  20. +#include <linux/dma/qcom_bam_dma.h>
  21. #include "../dmaengine.h"
  22. #include "../virt-dma.h"
  23. @@ -61,11 +62,6 @@ struct bam_desc_hw {
  24. #define BAM_DMA_AUTOSUSPEND_DELAY 100
  25. -#define DESC_FLAG_INT BIT(15)
  26. -#define DESC_FLAG_EOT BIT(14)
  27. -#define DESC_FLAG_EOB BIT(13)
  28. -#define DESC_FLAG_NWD BIT(12)
  29. -
  30. struct bam_async_desc {
  31. struct virt_dma_desc vd;
  32. @@ -670,6 +666,93 @@ err_out:
  33. }
  34. /**
  35. + * bam_prep_dma_custom_mapping - Prep DMA descriptor from custom data
  36. + *
  37. + * @chan: dma channel
  38. + * @data: custom data
  39. + * @flags: DMA flags
  40. + */
  41. +static struct dma_async_tx_descriptor *bam_prep_dma_custom_mapping(
  42. + struct dma_chan *chan,
  43. + void *data, unsigned long flags)
  44. +{
  45. + struct bam_chan *bchan = to_bam_chan(chan);
  46. + struct bam_device *bdev = bchan->bdev;
  47. + struct bam_async_desc *async_desc;
  48. + struct qcom_bam_custom_data *desc_data = data;
  49. + u32 i;
  50. + struct bam_desc_hw *desc;
  51. + unsigned int num_alloc = 0;
  52. +
  53. +
  54. + if (!is_slave_direction(desc_data->dir)) {
  55. + dev_err(bdev->dev, "invalid dma direction\n");
  56. + return NULL;
  57. + }
  58. +
  59. + /* calculate number of required entries */
  60. + for (i = 0; i < desc_data->sgl_cnt; i++)
  61. + num_alloc += DIV_ROUND_UP(
  62. + sg_dma_len(&desc_data->bam_sgl[i].sgl), BAM_FIFO_SIZE);
  63. +
  64. + /* allocate enough room to accommodate the number of entries */
  65. + async_desc = kzalloc(sizeof(*async_desc) +
  66. + (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);
  67. +
  68. + if (!async_desc)
  69. + goto err_out;
  70. +
  71. + if (flags & DMA_PREP_FENCE)
  72. + async_desc->flags |= DESC_FLAG_NWD;
  73. +
  74. + if (flags & DMA_PREP_INTERRUPT)
  75. + async_desc->flags |= DESC_FLAG_EOT;
  76. + else
  77. + async_desc->flags |= DESC_FLAG_INT;
  78. +
  79. + async_desc->num_desc = num_alloc;
  80. + async_desc->curr_desc = async_desc->desc;
  81. + async_desc->dir = desc_data->dir;
  82. +
  83. + /* fill in temporary descriptors */
  84. + desc = async_desc->desc;
  85. + for (i = 0; i < desc_data->sgl_cnt; i++) {
  86. + unsigned int remainder;
  87. + unsigned int curr_offset = 0;
  88. +
  89. + remainder = sg_dma_len(&desc_data->bam_sgl[i].sgl);
  90. +
  91. + do {
  92. + desc->addr = cpu_to_le32(
  93. + sg_dma_address(&desc_data->bam_sgl[i].sgl) +
  94. + curr_offset);
  95. +
  96. + if (desc_data->bam_sgl[i].dma_flags)
  97. + desc->flags |= cpu_to_le16(
  98. + desc_data->bam_sgl[i].dma_flags);
  99. +
  100. + if (remainder > BAM_FIFO_SIZE) {
  101. + desc->size = cpu_to_le16(BAM_FIFO_SIZE);
  102. + remainder -= BAM_FIFO_SIZE;
  103. + curr_offset += BAM_FIFO_SIZE;
  104. + } else {
  105. + desc->size = cpu_to_le16(remainder);
  106. + remainder = 0;
  107. + }
  108. +
  109. + async_desc->length += desc->size;
  110. + desc++;
  111. + } while (remainder > 0);
  112. + }
  113. +
  114. + return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
  115. +
  116. +err_out:
  117. + kfree(async_desc);
  118. + return NULL;
  119. +}
  120. +
  121. +/**
  122. * bam_dma_terminate_all - terminate all transactions on a channel
  123. * @bchan: bam dma channel
  124. *
  125. @@ -960,7 +1043,7 @@ static void bam_start_dma(struct bam_cha
  126. /* set any special flags on the last descriptor */
  127. if (async_desc->num_desc == async_desc->xfer_len)
  128. - desc[async_desc->xfer_len - 1].flags =
  129. + desc[async_desc->xfer_len - 1].flags |=
  130. cpu_to_le16(async_desc->flags);
  131. else
  132. desc[async_desc->xfer_len - 1].flags |=
  133. @@ -1237,6 +1320,8 @@ static int bam_dma_probe(struct platform
  134. bdev->common.device_alloc_chan_resources = bam_alloc_chan;
  135. bdev->common.device_free_chan_resources = bam_free_chan;
  136. bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
  137. + bdev->common.device_prep_dma_custom_mapping =
  138. + bam_prep_dma_custom_mapping;
  139. bdev->common.device_config = bam_slave_config;
  140. bdev->common.device_pause = bam_pause;
  141. bdev->common.device_resume = bam_resume;
  142. --- a/include/linux/dma/qcom_bam_dma.h
  143. +++ b/include/linux/dma/qcom_bam_dma.h
  144. @@ -65,6 +65,19 @@ enum bam_command_type {
  145. };
  146. /*
  147. + * QCOM BAM DMA custom data
  148. + *
  149. + * @sgl_cnt: number of sgl in bam_sgl
  150. + * @dir: DMA data transfer direction
  151. + * @bam_sgl: BAM SGL pointer
  152. + */
  153. +struct qcom_bam_custom_data {
  154. + u32 sgl_cnt;
  155. + enum dma_transfer_direction dir;
  156. + struct qcom_bam_sgl *bam_sgl;
  157. +};
  158. +
  159. +/*
  160. * qcom_bam_sg_init_table - Init QCOM BAM SGL
  161. * @bam_sgl: bam sgl
  162. * @nents: number of entries in bam sgl
  163. --- a/include/linux/dmaengine.h
  164. +++ b/include/linux/dmaengine.h
  165. @@ -692,6 +692,8 @@ struct dma_filter {
  166. * be called after period_len bytes have been transferred.
  167. * @device_prep_interleaved_dma: Transfer expression in a generic way.
  168. * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
  169. + * @device_prep_dma_custom_mapping: prepares a dma operation from dma driver
  170. + * specific custom data
  171. * @device_config: Pushes a new configuration to a channel, return 0 or an error
  172. * code
  173. * @device_pause: Pauses any transfer happening on a channel. Returns
  174. @@ -783,6 +785,9 @@ struct dma_device {
  175. struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
  176. struct dma_chan *chan, dma_addr_t dst, u64 data,
  177. unsigned long flags);
  178. + struct dma_async_tx_descriptor *(*device_prep_dma_custom_mapping)(
  179. + struct dma_chan *chan, void *data,
  180. + unsigned long flags);
  181. int (*device_config)(struct dma_chan *chan,
  182. struct dma_slave_config *config);
  183. @@ -899,6 +904,15 @@ static inline struct dma_async_tx_descri
  184. src_sg, src_nents, flags);
  185. }
  186. +static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_custom_mapping(
  187. + struct dma_chan *chan,
  188. + void *data,
  189. + unsigned long flags)
  190. +{
  191. + return chan->device->device_prep_dma_custom_mapping(chan, data,
  192. + flags);
  193. +}
  194. +
  195. /**
  196. * dmaengine_terminate_all() - Terminate all active DMA transfers
  197. * @chan: The channel for which to terminate the transfers