0169-x86-ldt-Rework-locking.patch 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. From b37d3e3a9b29caf78e2da6efba8959fc912e47a0 Mon Sep 17 00:00:00 2001
  2. From: Peter Zijlstra <[email protected]>
  3. Date: Thu, 14 Dec 2017 12:27:30 +0100
  4. Subject: [PATCH 169/242] x86/ldt: Rework locking
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. CVE-2017-5754
  9. The LDT is duplicated on fork() and on exec(), which is wrong as exec()
  10. should start from a clean state, i.e. without LDT. To fix this the LDT
  11. duplication code will be moved into arch_dup_mmap() which is only called
  12. for fork().
  13. This introduces a locking problem. arch_dup_mmap() holds mmap_sem of the
  14. parent process, but the LDT duplication code needs to acquire
  15. mm->context.lock to access the LDT data safely, which is the reverse lock
  16. order of write_ldt() where mmap_sem nests into context.lock.
  17. Solve this by introducing a new rw semaphore which serializes the
  18. read/write_ldt() syscall operations and use context.lock to protect the
  19. actual installment of the LDT descriptor.
  20. So context.lock stabilizes mm->context.ldt and can nest inside of the new
  21. semaphore or mmap_sem.
  22. Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
  23. Signed-off-by: Thomas Gleixner <[email protected]>
  24. Cc: Andy Lutomirski <[email protected]>
  25. Cc: Andy Lutomirsky <[email protected]>
  26. Cc: Boris Ostrovsky <[email protected]>
  27. Cc: Borislav Petkov <[email protected]>
  28. Cc: Borislav Petkov <[email protected]>
  29. Cc: Brian Gerst <[email protected]>
  30. Cc: Dave Hansen <[email protected]>
  31. Cc: Dave Hansen <[email protected]>
  32. Cc: David Laight <[email protected]>
  33. Cc: Denys Vlasenko <[email protected]>
  34. Cc: Eduardo Valentin <[email protected]>
  35. Cc: Greg KH <[email protected]>
  36. Cc: H. Peter Anvin <[email protected]>
  37. Cc: Josh Poimboeuf <[email protected]>
  38. Cc: Juergen Gross <[email protected]>
  39. Cc: Linus Torvalds <[email protected]>
  40. Cc: Peter Zijlstra <[email protected]>
  41. Cc: Will Deacon <[email protected]>
  42. Cc: [email protected]
  43. Cc: [email protected]
  44. Cc: [email protected]
  45. Cc: [email protected]
  46. Cc: [email protected]
  47. Cc: [email protected]
  48. Signed-off-by: Ingo Molnar <[email protected]>
  49. (cherry picked from commit c2b3496bb30bd159e9de42e5c952e1f1f33c9a77)
  50. Signed-off-by: Andy Whitcroft <[email protected]>
  51. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  52. (cherry picked from commit bf7ee649ccc71ef9acb713a00472886c19e78684)
  53. Signed-off-by: Fabian Grünbichler <[email protected]>
  54. ---
  55. arch/x86/include/asm/mmu.h | 4 +++-
  56. arch/x86/include/asm/mmu_context.h | 2 ++
  57. arch/x86/kernel/ldt.c | 33 +++++++++++++++++++++------------
  58. 3 files changed, 26 insertions(+), 13 deletions(-)
  59. diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
  60. index bb8c597c2248..2d7e852b2dad 100644
  61. --- a/arch/x86/include/asm/mmu.h
  62. +++ b/arch/x86/include/asm/mmu.h
  63. @@ -2,6 +2,7 @@
  64. #define _ASM_X86_MMU_H
  65. #include <linux/spinlock.h>
  66. +#include <linux/rwsem.h>
  67. #include <linux/mutex.h>
  68. #include <linux/atomic.h>
  69. @@ -26,7 +27,8 @@ typedef struct {
  70. atomic64_t tlb_gen;
  71. #ifdef CONFIG_MODIFY_LDT_SYSCALL
  72. - struct ldt_struct *ldt;
  73. + struct rw_semaphore ldt_usr_sem;
  74. + struct ldt_struct *ldt;
  75. #endif
  76. #ifdef CONFIG_X86_64
  77. diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
  78. index 9be54d9c04c4..dd865c2acb9d 100644
  79. --- a/arch/x86/include/asm/mmu_context.h
  80. +++ b/arch/x86/include/asm/mmu_context.h
  81. @@ -131,6 +131,8 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
  82. static inline int init_new_context(struct task_struct *tsk,
  83. struct mm_struct *mm)
  84. {
  85. + mutex_init(&mm->context.lock);
  86. +
  87. mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
  88. atomic64_set(&mm->context.tlb_gen, 0);
  89. diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
  90. index b8be2413cb74..3e7208f0c350 100644
  91. --- a/arch/x86/kernel/ldt.c
  92. +++ b/arch/x86/kernel/ldt.c
  93. @@ -4,6 +4,11 @@
  94. * Copyright (C) 2002 Andi Kleen
  95. *
  96. * This handles calls from both 32bit and 64bit mode.
  97. + *
  98. + * Lock order:
  99. + * contex.ldt_usr_sem
  100. + * mmap_sem
  101. + * context.lock
  102. */
  103. #include <linux/errno.h>
  104. @@ -41,7 +46,7 @@ static void refresh_ldt_segments(void)
  105. #endif
  106. }
  107. -/* context.lock is held for us, so we don't need any locking. */
  108. +/* context.lock is held by the task which issued the smp function call */
  109. static void flush_ldt(void *__mm)
  110. {
  111. struct mm_struct *mm = __mm;
  112. @@ -98,15 +103,17 @@ static void finalize_ldt_struct(struct ldt_struct *ldt)
  113. paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
  114. }
  115. -/* context.lock is held */
  116. -static void install_ldt(struct mm_struct *current_mm,
  117. - struct ldt_struct *ldt)
  118. +static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
  119. {
  120. + mutex_lock(&mm->context.lock);
  121. +
  122. /* Synchronizes with READ_ONCE in load_mm_ldt. */
  123. - smp_store_release(&current_mm->context.ldt, ldt);
  124. + smp_store_release(&mm->context.ldt, ldt);
  125. - /* Activate the LDT for all CPUs using current_mm. */
  126. - on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
  127. + /* Activate the LDT for all CPUs using currents mm. */
  128. + on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
  129. +
  130. + mutex_unlock(&mm->context.lock);
  131. }
  132. static void free_ldt_struct(struct ldt_struct *ldt)
  133. @@ -132,7 +139,8 @@ int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
  134. struct mm_struct *old_mm;
  135. int retval = 0;
  136. - mutex_init(&mm->context.lock);
  137. + init_rwsem(&mm->context.ldt_usr_sem);
  138. +
  139. old_mm = current->mm;
  140. if (!old_mm) {
  141. mm->context.ldt = NULL;
  142. @@ -179,7 +187,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount)
  143. unsigned long entries_size;
  144. int retval;
  145. - mutex_lock(&mm->context.lock);
  146. + down_read(&mm->context.ldt_usr_sem);
  147. if (!mm->context.ldt) {
  148. retval = 0;
  149. @@ -208,7 +216,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount)
  150. retval = bytecount;
  151. out_unlock:
  152. - mutex_unlock(&mm->context.lock);
  153. + up_read(&mm->context.ldt_usr_sem);
  154. return retval;
  155. }
  156. @@ -268,7 +276,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
  157. ldt.avl = 0;
  158. }
  159. - mutex_lock(&mm->context.lock);
  160. + if (down_write_killable(&mm->context.ldt_usr_sem))
  161. + return -EINTR;
  162. old_ldt = mm->context.ldt;
  163. old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
  164. @@ -290,7 +299,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
  165. error = 0;
  166. out_unlock:
  167. - mutex_unlock(&mm->context.lock);
  168. + up_write(&mm->context.ldt_usr_sem);
  169. out:
  170. return error;
  171. }
  172. --
  173. 2.14.2