015-dma_ops.patch 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. From c6d8f92cfd7f4f19eb3b16545b3b68c561978fe8 Mon Sep 17 00:00:00 2001
  2. From: Kristoffer Glembo <[email protected]>
  3. Date: Mon, 7 Jun 2010 14:00:30 +0200
  4. Subject: [PATCH] sparc32: Added LEON dma_ops.
  5. Added leon3_dma_ops and mmu_inval_dma_area.
  6. ---
  7. arch/sparc/kernel/ioport.c | 139 +++++++++++++++++++++++++++++++------------
  8. 1 files changed, 100 insertions(+), 39 deletions(-)
  9. --- a/arch/sparc/kernel/ioport.c
  10. +++ b/arch/sparc/kernel/ioport.c
  11. @@ -50,10 +50,15 @@
  12. #include <asm/io-unit.h>
  13. #include <asm/leon.h>
  14. -#ifdef CONFIG_SPARC_LEON
  15. -#define mmu_inval_dma_area(p, l) leon_flush_dcache_all()
  16. -#else
  17. +#ifndef CONFIG_SPARC_LEON
  18. #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
  19. +#else
  20. +static inline void mmu_inval_dma_area(unsigned long va, unsigned long len)
  21. +{
  22. + if (!sparc_leon3_snooping_enabled()) {
  23. + leon_flush_dcache_all();
  24. + }
  25. +}
  26. #endif
  27. static struct resource *_sparc_find_resource(struct resource *r,
  28. @@ -254,7 +259,7 @@ static void *sbus_alloc_coherent(struct
  29. dma_addr_t *dma_addrp, gfp_t gfp)
  30. {
  31. struct platform_device *op = to_platform_device(dev);
  32. - unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
  33. + unsigned long len_total = PAGE_ALIGN(len);
  34. unsigned long va;
  35. struct resource *res;
  36. int order;
  37. @@ -287,15 +292,19 @@ static void *sbus_alloc_coherent(struct
  38. * XXX That's where sdev would be used. Currently we load
  39. * all iommu tables with the same translations.
  40. */
  41. - if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
  42. - goto err_noiommu;
  43. -
  44. - res->name = op->dev.of_node->name;
  45. +#ifdef CONFIG_SPARC_LEON
  46. + sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
  47. + *dma_addrp = virt_to_phys(va);
  48. +#else
  49. + if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0) {
  50. + release_resource(res);
  51. + goto err_nova;
  52. + }
  53. +#endif
  54. + res->name = op->node->name;
  55. return (void *)(unsigned long)res->start;
  56. -err_noiommu:
  57. - release_resource(res);
  58. err_nova:
  59. free_pages(va, order);
  60. err_nomem:
  61. @@ -321,7 +330,7 @@ static void sbus_free_coherent(struct de
  62. return;
  63. }
  64. - n = (n + PAGE_SIZE-1) & PAGE_MASK;
  65. + n = PAGE_ALIGN(n);
  66. if ((res->end-res->start)+1 != n) {
  67. printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
  68. (long)((res->end-res->start)+1), n);
  69. @@ -333,7 +342,12 @@ static void sbus_free_coherent(struct de
  70. /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
  71. pgv = virt_to_page(p);
  72. - mmu_unmap_dma_area(dev, ba, n);
  73. +
  74. +#ifdef CONFIG_SPARC_LEON
  75. + sparc_unmapiorange((unsigned long)p, n);
  76. +#else
  77. + mmu_unmap_dma_area(dev, ba, n);
  78. +#endif
  79. __free_pages(pgv, get_order(n));
  80. }
  81. @@ -408,9 +422,6 @@ struct dma_map_ops sbus_dma_ops = {
  82. .sync_sg_for_device = sbus_sync_sg_for_device,
  83. };
  84. -struct dma_map_ops *dma_ops = &sbus_dma_ops;
  85. -EXPORT_SYMBOL(dma_ops);
  86. -
  87. static int __init sparc_register_ioport(void)
  88. {
  89. register_proc_sparc_ioport();
  90. @@ -422,7 +433,7 @@ arch_initcall(sparc_register_ioport);
  91. #endif /* CONFIG_SBUS */
  92. -#ifdef CONFIG_PCI
  93. +#if defined(CONFIG_PCI) || defined(CONFIG_SPARC_LEON)
  94. /* Allocate and map kernel buffer using consistent mode DMA for a device.
  95. * hwdev should be valid struct pci_dev pointer for PCI devices.
  96. @@ -430,7 +441,7 @@ arch_initcall(sparc_register_ioport);
  97. static void *pci32_alloc_coherent(struct device *dev, size_t len,
  98. dma_addr_t *pba, gfp_t gfp)
  99. {
  100. - unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
  101. + unsigned long len_total = PAGE_ALIGN(len);
  102. unsigned long va;
  103. struct resource *res;
  104. int order;
  105. @@ -463,10 +474,6 @@ static void *pci32_alloc_coherent(struct
  106. return NULL;
  107. }
  108. mmu_inval_dma_area(va, len_total);
  109. -#if 0
  110. -/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
  111. - (long)va, (long)res->start, (long)virt_to_phys(va), len_total);
  112. -#endif
  113. sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
  114. *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
  115. @@ -498,7 +505,7 @@ static void pci32_free_coherent(struct d
  116. return;
  117. }
  118. - n = (n + PAGE_SIZE-1) & PAGE_MASK;
  119. + n = PAGE_ALIGN(n);
  120. if ((res->end-res->start)+1 != n) {
  121. printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
  122. (long)((res->end-res->start)+1), (long)n);
  123. @@ -515,6 +522,14 @@ static void pci32_free_coherent(struct d
  124. free_pages(pgp, get_order(n));
  125. }
  126. +static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
  127. + enum dma_data_direction dir, struct dma_attrs *attrs)
  128. +{
  129. + if (dir != PCI_DMA_TODEVICE) {
  130. + mmu_inval_dma_area((unsigned long)phys_to_virt(ba), PAGE_ALIGN(size));
  131. + }
  132. +}
  133. +
  134. /*
  135. * Same as pci_map_single, but with pages.
  136. */
  137. @@ -551,8 +566,7 @@ static int pci32_map_sg(struct device *d
  138. /* IIep is write-through, not flushing. */
  139. for_each_sg(sgl, sg, nents, n) {
  140. - BUG_ON(page_address(sg_page(sg)) == NULL);
  141. - sg->dma_address = virt_to_phys(sg_virt(sg));
  142. + sg->dma_address = sg_phys(sg);
  143. sg->dma_length = sg->length;
  144. }
  145. return nents;
  146. @@ -571,10 +585,7 @@ static void pci32_unmap_sg(struct device
  147. if (dir != PCI_DMA_TODEVICE) {
  148. for_each_sg(sgl, sg, nents, n) {
  149. - BUG_ON(page_address(sg_page(sg)) == NULL);
  150. - mmu_inval_dma_area(
  151. - (unsigned long) page_address(sg_page(sg)),
  152. - (sg->length + PAGE_SIZE-1) & PAGE_MASK);
  153. + mmu_inval_dma_area((unsigned long)sg_virt(sg), PAGE_ALIGN(sg->length));
  154. }
  155. }
  156. }
  157. @@ -594,7 +605,7 @@ static void pci32_sync_single_for_cpu(st
  158. {
  159. if (dir != PCI_DMA_TODEVICE) {
  160. mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
  161. - (size + PAGE_SIZE-1) & PAGE_MASK);
  162. + PAGE_ALIGN(size));
  163. }
  164. }
  165. @@ -621,10 +632,7 @@ static void pci32_sync_sg_for_cpu(struct
  166. if (dir != PCI_DMA_TODEVICE) {
  167. for_each_sg(sgl, sg, nents, n) {
  168. - BUG_ON(page_address(sg_page(sg)) == NULL);
  169. - mmu_inval_dma_area(
  170. - (unsigned long) page_address(sg_page(sg)),
  171. - (sg->length + PAGE_SIZE-1) & PAGE_MASK);
  172. + mmu_inval_dma_area((unsigned long)sg_virt(sg), PAGE_ALIGN(sg->length));
  173. }
  174. }
  175. }
  176. @@ -637,18 +645,38 @@ static void pci32_sync_sg_for_device(str
  177. if (dir != PCI_DMA_TODEVICE) {
  178. for_each_sg(sgl, sg, nents, n) {
  179. - BUG_ON(page_address(sg_page(sg)) == NULL);
  180. - mmu_inval_dma_area(
  181. - (unsigned long) page_address(sg_page(sg)),
  182. - (sg->length + PAGE_SIZE-1) & PAGE_MASK);
  183. + mmu_inval_dma_area((unsigned long)sg_virt(sg), PAGE_ALIGN(sg->length));
  184. }
  185. }
  186. }
  187. +/* LEON3 unmapping functions
  188. + *
  189. + * We can only invalidate the whole cache so unmap_page and unmap_sg do the same thing
  190. + */
  191. +static void leon3_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
  192. + enum dma_data_direction dir, struct dma_attrs *attrs)
  193. +{
  194. + if (dir != PCI_DMA_TODEVICE) {
  195. + mmu_inval_dma_area(0, 0);
  196. + }
  197. +}
  198. +
  199. +static void leon3_unmap_sg(struct device *dev, struct scatterlist *sgl,
  200. + int nents, enum dma_data_direction dir,
  201. + struct dma_attrs *attrs)
  202. +{
  203. +
  204. + if (dir != PCI_DMA_TODEVICE) {
  205. + mmu_inval_dma_area(0, 0);
  206. + }
  207. +}
  208. +
  209. struct dma_map_ops pci32_dma_ops = {
  210. .alloc_coherent = pci32_alloc_coherent,
  211. .free_coherent = pci32_free_coherent,
  212. .map_page = pci32_map_page,
  213. + .unmap_page = pci32_unmap_page,
  214. .map_sg = pci32_map_sg,
  215. .unmap_sg = pci32_unmap_sg,
  216. .sync_single_for_cpu = pci32_sync_single_for_cpu,
  217. @@ -658,7 +686,30 @@ struct dma_map_ops pci32_dma_ops = {
  218. };
  219. EXPORT_SYMBOL(pci32_dma_ops);
  220. -#endif /* CONFIG_PCI */
  221. +struct dma_map_ops leon3_dma_ops = {
  222. + .alloc_coherent = sbus_alloc_coherent,
  223. + .free_coherent = sbus_free_coherent,
  224. + .map_page = pci32_map_page,
  225. + .unmap_page = leon3_unmap_page,
  226. + .map_sg = pci32_map_sg,
  227. + .unmap_sg = leon3_unmap_sg,
  228. + .sync_single_for_cpu = pci32_sync_single_for_cpu,
  229. + .sync_single_for_device = pci32_sync_single_for_device,
  230. + .sync_sg_for_cpu = pci32_sync_sg_for_cpu,
  231. + .sync_sg_for_device = pci32_sync_sg_for_device,
  232. +};
  233. +
  234. +#endif /* CONFIG_PCI || CONFIG_SPARC_LEON */
  235. +
  236. +#ifdef CONFIG_SPARC_LEON
  237. +struct dma_map_ops *dma_ops = &leon3_dma_ops;
  238. +#else
  239. +struct dma_map_ops *dma_ops = &sbus_dma_ops;
  240. +#endif
  241. +
  242. +#ifdef CONFIG_SBUS
  243. +EXPORT_SYMBOL(dma_ops);
  244. +#endif
  245. /*
  246. * Return whether the given PCI device DMA address mask can be
  247. @@ -676,6 +727,16 @@ int dma_supported(struct device *dev, u6
  248. }
  249. EXPORT_SYMBOL(dma_supported);
  250. +int dma_set_mask(struct device *dev, u64 dma_mask)
  251. +{
  252. +#ifdef CONFIG_PCI
  253. + if (dev->bus == &pci_bus_type)
  254. + return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
  255. +#endif
  256. + return -EOPNOTSUPP;
  257. +}
  258. +EXPORT_SYMBOL(dma_set_mask);
  259. +
  260. #ifdef CONFIG_PROC_FS
  261. static int sparc_io_proc_show(struct seq_file *m, void *v)
  262. @@ -717,7 +778,7 @@ static const struct file_operations spar
  263. static struct resource *_sparc_find_resource(struct resource *root,
  264. unsigned long hit)
  265. {
  266. - struct resource *tmp;
  267. + struct resource *tmp;
  268. for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
  269. if (tmp->start <= hit && tmp->end >= hit)