0238-x86-kaslr-Fix-the-vaddr_end-mess.patch 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144
  1. From 58d3cea9911d75f766b67dfc639405360b6ee6e5 Mon Sep 17 00:00:00 2001
  2. From: Thomas Gleixner <[email protected]>
  3. Date: Thu, 4 Jan 2018 12:32:03 +0100
  4. Subject: [PATCH 238/242] x86/kaslr: Fix the vaddr_end mess
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. commit 1dddd25125112ba49706518ac9077a1026a18f37 upstream.
  9. vaddr_end for KASLR is only documented in the KASLR code itself and is
  10. adjusted depending on config options. So it's not surprising that a change
  11. of the memory layout causes KASLR to have the wrong vaddr_end. This can map
  12. arbitrary stuff into other areas causing hard to understand problems.
  13. Remove the whole ifdef magic and define the start of the cpu_entry_area to
  14. be the end of the KASLR vaddr range.
  15. Add documentation to that effect.
  16. Fixes: 92a0f81d8957 ("x86/cpu_entry_area: Move it out of the fixmap")
  17. Reported-by: Benjamin Gilbert <[email protected]>
  18. Signed-off-by: Thomas Gleixner <[email protected]>
  19. Tested-by: Benjamin Gilbert <[email protected]>
  20. Cc: Andy Lutomirski <[email protected]>
  21. Cc: Greg Kroah-Hartman <[email protected]>
  22. Cc: Dave Hansen <[email protected]>
  23. Cc: Peter Zijlstra <[email protected]>
  24. Cc: Thomas Garnier <[email protected]>,
  25. Cc: Alexander Kuleshov <[email protected]>
  26. Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1801041320360.1771@nanos
  27. Signed-off-by: Greg Kroah-Hartman <[email protected]>
  28. Signed-off-by: Fabian Grünbichler <[email protected]>
  29. ---
  30. Documentation/x86/x86_64/mm.txt | 6 ++++++
  31. arch/x86/include/asm/pgtable_64_types.h | 8 +++++++-
  32. arch/x86/mm/kaslr.c | 32 +++++++++-----------------------
  33. 3 files changed, 22 insertions(+), 24 deletions(-)
  34. diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
  35. index f7dabe1f01e9..ea91cb61a602 100644
  36. --- a/Documentation/x86/x86_64/mm.txt
  37. +++ b/Documentation/x86/x86_64/mm.txt
  38. @@ -12,6 +12,7 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
  39. ... unused hole ...
  40. ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
  41. ... unused hole ...
  42. + vaddr_end for KASLR
  43. fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
  44. fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI
  45. ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
  46. @@ -37,6 +38,7 @@ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
  47. ... unused hole ...
  48. ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
  49. ... unused hole ...
  50. + vaddr_end for KASLR
  51. fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping
  52. ... unused hole ...
  53. ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
  54. @@ -71,3 +73,7 @@ during EFI runtime calls.
  55. Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
  56. physical memory, vmalloc/ioremap space and virtual memory map are randomized.
  57. Their order is preserved but their base will be offset early at boot time.
  58. +
  59. +Be very careful vs. KASLR when changing anything here. The KASLR address
  60. +range must not overlap with anything except the KASAN shadow area, which is
  61. +correct as KASAN disables KASLR.
  62. diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
  63. index 0dd48d17a4a1..928d558e7778 100644
  64. --- a/arch/x86/include/asm/pgtable_64_types.h
  65. +++ b/arch/x86/include/asm/pgtable_64_types.h
  66. @@ -74,7 +74,13 @@ typedef struct { pteval_t pte; } pte_t;
  67. #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
  68. #define PGDIR_MASK (~(PGDIR_SIZE - 1))
  69. -/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
  70. +/*
  71. + * See Documentation/x86/x86_64/mm.txt for a description of the memory map.
  72. + *
  73. + * Be very careful vs. KASLR when changing anything here. The KASLR address
  74. + * range must not overlap with anything except the KASAN shadow area, which
  75. + * is correct as KASAN disables KASLR.
  76. + */
  77. #define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
  78. #ifdef CONFIG_X86_5LEVEL
  79. diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
  80. index af599167fe3c..debc7cc8e152 100644
  81. --- a/arch/x86/mm/kaslr.c
  82. +++ b/arch/x86/mm/kaslr.c
  83. @@ -33,25 +33,14 @@
  84. #define TB_SHIFT 40
  85. /*
  86. - * Virtual address start and end range for randomization. The end changes base
  87. - * on configuration to have the highest amount of space for randomization.
  88. - * It increases the possible random position for each randomized region.
  89. + * Virtual address start and end range for randomization.
  90. *
  91. - * You need to add an if/def entry if you introduce a new memory region
  92. - * compatible with KASLR. Your entry must be in logical order with memory
  93. - * layout. For example, ESPFIX is before EFI because its virtual address is
  94. - * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to
  95. - * ensure that this order is correct and won't be changed.
  96. + * The end address could depend on more configuration options to make the
  97. + * highest amount of space for randomization available, but that's too hard
  98. + * to keep straight and caused issues already.
  99. */
  100. static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
  101. -
  102. -#if defined(CONFIG_X86_ESPFIX64)
  103. -static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
  104. -#elif defined(CONFIG_EFI)
  105. -static const unsigned long vaddr_end = EFI_VA_END;
  106. -#else
  107. -static const unsigned long vaddr_end = __START_KERNEL_map;
  108. -#endif
  109. +static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE;
  110. /* Default values */
  111. unsigned long page_offset_base = __PAGE_OFFSET_BASE;
  112. @@ -100,15 +89,12 @@ void __init kernel_randomize_memory(void)
  113. unsigned long remain_entropy;
  114. /*
  115. - * All these BUILD_BUG_ON checks ensures the memory layout is
  116. - * consistent with the vaddr_start/vaddr_end variables.
  117. + * These BUILD_BUG_ON checks ensure the memory layout is consistent
  118. + * with the vaddr_start/vaddr_end variables. These checks are very
  119. + * limited....
  120. */
  121. BUILD_BUG_ON(vaddr_start >= vaddr_end);
  122. - BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
  123. - vaddr_end >= EFI_VA_END);
  124. - BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
  125. - IS_ENABLED(CONFIG_EFI)) &&
  126. - vaddr_end >= __START_KERNEL_map);
  127. + BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE);
  128. BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
  129. if (!kaslr_memory_enabled())
  130. --
  131. 2.14.2