101-MIPS-fix-cache-flushing-for-highmem-pages.patch 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. From: Felix Fietkau <[email protected]>
  2. Date: Sun, 24 Jan 2016 01:03:51 +0100
  3. Subject: [PATCH] MIPS: fix cache flushing for highmem pages
  4. Most cache flush ops were no-op for highmem pages. This led to nasty
  5. segfaults and (in the case of page_address(page) == NULL) kernel
  6. crashes.
  7. Fix this by always flushing highmem pages using kmap/kunmap_atomic
  8. around the actual cache flush. This might be a bit inefficient, but at
  9. least it's stable.
  10. Signed-off-by: Felix Fietkau <[email protected]>
  11. ---
  12. --- a/arch/mips/mm/cache.c
  13. +++ b/arch/mips/mm/cache.c
  14. @@ -14,6 +14,7 @@
  15. #include <linux/sched.h>
  16. #include <linux/syscalls.h>
  17. #include <linux/mm.h>
  18. +#include <linux/highmem.h>
  19. #include <asm/cacheflush.h>
  20. #include <asm/processor.h>
  21. @@ -78,18 +79,29 @@ SYSCALL_DEFINE3(cacheflush, unsigned lon
  22. return 0;
  23. }
  24. +static void
  25. +flush_highmem_page(struct page *page)
  26. +{
  27. + void *addr = kmap_atomic(page);
  28. + flush_data_cache_page((unsigned long)addr);
  29. + kunmap_atomic(addr);
  30. +}
  31. +
  32. void __flush_dcache_page(struct page *page)
  33. {
  34. struct address_space *mapping = page_mapping(page);
  35. unsigned long addr;
  36. - if (PageHighMem(page))
  37. - return;
  38. if (mapping && !mapping_mapped(mapping)) {
  39. SetPageDcacheDirty(page);
  40. return;
  41. }
  42. + if (PageHighMem(page)) {
  43. + flush_highmem_page(page);
  44. + return;
  45. + }
  46. +
  47. /*
  48. * We could delay the flush for the !page_mapping case too. But that
  49. * case is for exec env/arg pages and those are %99 certainly going to
  50. @@ -105,6 +117,11 @@ void __flush_anon_page(struct page *page
  51. {
  52. unsigned long addr = (unsigned long) page_address(page);
  53. + if (PageHighMem(page)) {
  54. + flush_highmem_page(page);
  55. + return;
  56. + }
  57. +
  58. if (pages_do_alias(addr, vmaddr)) {
  59. if (page_mapped(page) && !Page_dcache_dirty(page)) {
  60. void *kaddr;
  61. @@ -123,8 +140,10 @@ void __flush_icache_page(struct vm_area_
  62. {
  63. unsigned long addr;
  64. - if (PageHighMem(page))
  65. + if (PageHighMem(page)) {
  66. + flush_highmem_page(page);
  67. return;
  68. + }
  69. addr = (unsigned long) page_address(page);
  70. flush_data_cache_page(addr);
  71. @@ -142,12 +161,17 @@ void __update_cache(struct vm_area_struc
  72. if (unlikely(!pfn_valid(pfn)))
  73. return;
  74. page = pfn_to_page(pfn);
  75. - if (page_mapping(page) && Page_dcache_dirty(page)) {
  76. + if (!Page_dcache_dirty(page) || !page_mapping(page))
  77. + return;
  78. +
  79. + if (PageHighMem(page)) {
  80. + flush_highmem_page(page);
  81. + } else {
  82. addr = (unsigned long) page_address(page);
  83. if (exec || pages_do_alias(addr, address & PAGE_MASK))
  84. flush_data_cache_page(addr);
  85. - ClearPageDcacheDirty(page);
  86. }
  87. + ClearPageDcacheDirty(page);
  88. }
  89. unsigned long _page_cachable_default;