0128-mm-sparsemem-Fix-ARM64-boot-crash-when-CONFIG_SPARSE.patch 3.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. From 783e517bb1fd814658cdb26d1d7b5d8f67b448e4 Mon Sep 17 00:00:00 2001
  2. From: "Kirill A. Shutemov" <[email protected]>
  3. Date: Tue, 7 Nov 2017 11:33:37 +0300
  4. Subject: [PATCH 128/242] mm/sparsemem: Fix ARM64 boot crash when
  5. CONFIG_SPARSEMEM_EXTREME=y
  6. MIME-Version: 1.0
  7. Content-Type: text/plain; charset=UTF-8
  8. Content-Transfer-Encoding: 8bit
  9. CVE-2017-5754
  10. Since commit:
  11. 83e3c48729d9 ("mm/sparsemem: Allocate mem_section at runtime for CONFIG_SPARSEMEM_EXTREME=y")
  12. we allocate the mem_section array dynamically in sparse_memory_present_with_active_regions(),
  13. but some architectures, like arm64, don't call the routine to initialize sparsemem.
  14. Let's move the initialization into memory_present() it should cover all
  15. architectures.
  16. Reported-and-tested-by: Sudeep Holla <[email protected]>
  17. Tested-by: Bjorn Andersson <[email protected]>
  18. Signed-off-by: Kirill A. Shutemov <[email protected]>
  19. Acked-by: Will Deacon <[email protected]>
  20. Cc: Andrew Morton <[email protected]>
  21. Cc: Linus Torvalds <[email protected]>
  22. Cc: Peter Zijlstra <[email protected]>
  23. Cc: Thomas Gleixner <[email protected]>
  24. Cc: [email protected]
  25. Fixes: 83e3c48729d9 ("mm/sparsemem: Allocate mem_section at runtime for CONFIG_SPARSEMEM_EXTREME=y")
  26. Link: http://lkml.kernel.org/r/[email protected]
  27. Signed-off-by: Ingo Molnar <[email protected]>
  28. (cherry picked from commit 629a359bdb0e0652a8227b4ff3125431995fec6e)
  29. Signed-off-by: Andy Whitcroft <[email protected]>
  30. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  31. (cherry picked from commit fbc3acbf37de68310eb5bbc7f4d1977e7b90100e)
  32. Signed-off-by: Fabian Grünbichler <[email protected]>
  33. ---
  34. mm/page_alloc.c | 10 ----------
  35. mm/sparse.c | 10 ++++++++++
  36. 2 files changed, 10 insertions(+), 10 deletions(-)
  37. diff --git a/mm/page_alloc.c b/mm/page_alloc.c
  38. index 66eb23ab658d..1423da8dd16f 100644
  39. --- a/mm/page_alloc.c
  40. +++ b/mm/page_alloc.c
  41. @@ -5707,16 +5707,6 @@ void __init sparse_memory_present_with_active_regions(int nid)
  42. unsigned long start_pfn, end_pfn;
  43. int i, this_nid;
  44. -#ifdef CONFIG_SPARSEMEM_EXTREME
  45. - if (!mem_section) {
  46. - unsigned long size, align;
  47. -
  48. - size = sizeof(struct mem_section) * NR_SECTION_ROOTS;
  49. - align = 1 << (INTERNODE_CACHE_SHIFT);
  50. - mem_section = memblock_virt_alloc(size, align);
  51. - }
  52. -#endif
  53. -
  54. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
  55. memory_present(this_nid, start_pfn, end_pfn);
  56. }
  57. diff --git a/mm/sparse.c b/mm/sparse.c
  58. index 308a0789d1bb..9c48e4fe8ce0 100644
  59. --- a/mm/sparse.c
  60. +++ b/mm/sparse.c
  61. @@ -210,6 +210,16 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
  62. {
  63. unsigned long pfn;
  64. +#ifdef CONFIG_SPARSEMEM_EXTREME
  65. + if (unlikely(!mem_section)) {
  66. + unsigned long size, align;
  67. +
  68. + size = sizeof(struct mem_section) * NR_SECTION_ROOTS;
  69. + align = 1 << (INTERNODE_CACHE_SHIFT);
  70. + mem_section = memblock_virt_alloc(size, align);
  71. + }
  72. +#endif
  73. +
  74. start &= PAGE_SECTION_MASK;
  75. mminit_validate_memmodel_limits(&start, &end);
  76. for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
  77. --
  78. 2.14.2