050-disable_dmabounce.patch 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. --- a/arch/arm/Kconfig
  2. +++ b/arch/arm/Kconfig
  3. @@ -435,7 +435,6 @@ config ARCH_IXP4XX
  4. select CPU_XSCALE
  5. select GENERIC_GPIO
  6. select GENERIC_CLOCKEVENTS
  7. - select DMABOUNCE if PCI
  8. help
  9. Support for Intel's IXP4XX (XScale) family of processors.
  10. --- a/arch/arm/mach-ixp4xx/Kconfig
  11. +++ b/arch/arm/mach-ixp4xx/Kconfig
  12. @@ -199,6 +199,43 @@ config IXP4XX_INDIRECT_PCI
  13. need to use the indirect method instead. If you don't know
  14. what you need, leave this option unselected.
  15. +config IXP4XX_LEGACY_DMABOUNCE
  16. + bool "Legacy PCI DMA bounce support"
  17. + depends on PCI
  18. + default n
  19. + select DMABOUNCE
  20. + help
  21. + The IXP4xx is limited to a 64MB window for PCI DMA, which
  22. + requires that PCI accesses >= 64MB are bounced via buffers
  23. + below 64MB.
  24. +
  25. + The kernel has traditionally handled this issue by using ARM
  26. + specific DMA bounce support code for all accesses >= 64MB.
  27. + That code causes problems of its own, so it is desirable to
  28. + disable it.
  29. +
  30. + Enabling this option makes IXP4xx continue to use the problematic
  31. + ARM DMA bounce code. Disabling this option makes IXP4xx use the
  32. + kernel's generic bounce code.
  33. +
  34. + Say 'N'.
  35. +
  36. +config IXP4XX_ZONE_DMA
  37. + bool "Support > 64MB RAM"
  38. + depends on !IXP4XX_LEGACY_DMABOUNCE
  39. + default y
  40. + select ZONE_DMA
  41. + help
  42. + The IXP4xx is limited to a 64MB window for PCI DMA, which
  43. + requires that PCI accesses above 64MB are bounced via buffers
  44. + below 64MB.
  45. +
  46. + Disabling this option allows you to omit the support code for
  47. + DMA-able memory allocations and DMA bouncing, but the kernel
  48. + will then not work properly if more than 64MB of RAM is present.
  49. +
  50. + Say 'Y' unless your platform is limited to <= 64MB of RAM.
  51. +
  52. config IXP4XX_QMGR
  53. tristate "IXP4xx Queue Manager support"
  54. help
  55. --- a/arch/arm/mach-ixp4xx/common-pci.c
  56. +++ b/arch/arm/mach-ixp4xx/common-pci.c
  57. @@ -321,27 +321,33 @@ static int abort_handler(unsigned long a
  58. */
  59. static int ixp4xx_pci_platform_notify(struct device *dev)
  60. {
  61. - if(dev->bus == &pci_bus_type) {
  62. - *dev->dma_mask = SZ_64M - 1;
  63. + if (dev->bus == &pci_bus_type) {
  64. + *dev->dma_mask = SZ_64M - 1;
  65. dev->coherent_dma_mask = SZ_64M - 1;
  66. +#ifdef CONFIG_DMABOUNCE
  67. dmabounce_register_dev(dev, 2048, 4096);
  68. +#endif
  69. }
  70. return 0;
  71. }
  72. static int ixp4xx_pci_platform_notify_remove(struct device *dev)
  73. {
  74. - if(dev->bus == &pci_bus_type) {
  75. +#ifdef CONFIG_DMABOUNCE
  76. + if (dev->bus == &pci_bus_type)
  77. dmabounce_unregister_dev(dev);
  78. - }
  79. +#endif
  80. return 0;
  81. }
  82. -int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
  83. +#ifdef CONFIG_DMABOUNCE
  84. +int dma_needs_bounce_2(struct device *dev, dma_addr_t dma_addr, size_t size)
  85. {
  86. - return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M);
  87. + return (dev->bus == &pci_bus_type ) && ((dma_addr + size) > SZ_64M);
  88. }
  89. +#endif
  90. +#ifdef CONFIG_ZONE_DMA
  91. /*
  92. * Only first 64MB of memory can be accessed via PCI.
  93. * We use GFP_DMA to allocate safe buffers to do map/unmap.
  94. @@ -364,6 +370,7 @@ void __init ixp4xx_adjust_zones(unsigned
  95. zhole_size[1] = zhole_size[0];
  96. zhole_size[0] = 0;
  97. }
  98. +#endif
  99. void __init ixp4xx_pci_preinit(void)
  100. {
  101. --- a/arch/arm/mach-ixp4xx/include/mach/memory.h
  102. +++ b/arch/arm/mach-ixp4xx/include/mach/memory.h
  103. @@ -16,10 +16,12 @@
  104. #if !defined(__ASSEMBLY__) && defined(CONFIG_PCI)
  105. +#ifdef CONFIG_ZONE_DMA
  106. void ixp4xx_adjust_zones(unsigned long *size, unsigned long *holes);
  107. #define arch_adjust_zones(size, holes) \
  108. ixp4xx_adjust_zones(size, holes)
  109. +#endif
  110. #define ISA_DMA_THRESHOLD (SZ_64M - 1)
  111. #define MAX_DMA_ADDRESS (PAGE_OFFSET + SZ_64M)
  112. --- a/arch/arm/common/dmabounce.c
  113. +++ b/arch/arm/common/dmabounce.c
  114. @@ -30,6 +30,7 @@
  115. #include <linux/dma-mapping.h>
  116. #include <linux/dmapool.h>
  117. #include <linux/list.h>
  118. +#include <linux/pci.h>
  119. #include <linux/scatterlist.h>
  120. #include <asm/cacheflush.h>
  121. @@ -248,8 +249,15 @@ static inline dma_addr_t map_single(stru
  122. needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
  123. }
  124. - if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
  125. +#ifdef CONFIG_DMABOUNCE
  126. +int dma_needs_bounce_2(struct device *dev, dma_addr_t dma_addr, size_t size)
  127. +{
  128. + return (dev->bus == &pci_bus_type ) && ((dma_addr + size) > SZ_64M);
  129. +}
  130. +
  131. + if (device_info && (needs_bounce || dma_needs_bounce_2(dev, dma_addr, size))) {
  132. struct safe_buffer *buf;
  133. +#endif
  134. buf = alloc_safe_buffer(device_info, ptr, size, dir);
  135. if (buf == 0) {