0126-locking-barriers-Convert-users-of-lockless_dereferen.patch 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. From 9d02a406fe5f64f282832e7d0ab8fcd2631fc15a Mon Sep 17 00:00:00 2001
  2. From: Will Deacon <[email protected]>
  3. Date: Tue, 24 Oct 2017 11:22:48 +0100
  4. Subject: [PATCH 126/242] locking/barriers: Convert users of
  5. lockless_dereference() to READ_ONCE()
  6. MIME-Version: 1.0
  7. Content-Type: text/plain; charset=UTF-8
  8. Content-Transfer-Encoding: 8bit
  9. CVE-2017-5754
  10. [ Note, this is a Git cherry-pick of the following commit:
  11. 506458efaf15 ("locking/barriers: Convert users of lockless_dereference() to READ_ONCE()")
  12. ... for easier x86 PTI code testing and back-porting. ]
  13. READ_ONCE() now has an implicit smp_read_barrier_depends() call, so it
  14. can be used instead of lockless_dereference() without any change in
  15. semantics.
  16. Signed-off-by: Will Deacon <[email protected]>
  17. Cc: Linus Torvalds <[email protected]>
  18. Cc: Paul E. McKenney <[email protected]>
  19. Cc: Peter Zijlstra <[email protected]>
  20. Cc: Thomas Gleixner <[email protected]>
  21. Link: http://lkml.kernel.org/r/[email protected]
  22. Signed-off-by: Ingo Molnar <[email protected]>
  23. (cherry picked from commit 3382290ed2d5e275429cef510ab21889d3ccd164)
  24. Signed-off-by: Andy Whitcroft <[email protected]>
  25. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  26. (cherry picked from commit 7252704bfd83e951d00ec75526ed2bf64a7f6ee1)
  27. Signed-off-by: Fabian Grünbichler <[email protected]>
  28. ---
  29. arch/x86/include/asm/mmu_context.h | 4 ++--
  30. fs/overlayfs/ovl_entry.h | 2 +-
  31. include/linux/rculist.h | 4 ++--
  32. include/linux/rcupdate.h | 4 ++--
  33. mm/slab.h | 2 +-
  34. arch/x86/events/core.c | 2 +-
  35. arch/x86/kernel/ldt.c | 2 +-
  36. drivers/md/dm-mpath.c | 20 ++++++++++----------
  37. fs/dcache.c | 4 ++--
  38. fs/overlayfs/readdir.c | 2 +-
  39. kernel/events/core.c | 4 ++--
  40. kernel/seccomp.c | 2 +-
  41. kernel/task_work.c | 2 +-
  42. 13 files changed, 27 insertions(+), 27 deletions(-)
  43. diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
  44. index 3c856a15b98e..efc530642f7d 100644
  45. --- a/arch/x86/include/asm/mmu_context.h
  46. +++ b/arch/x86/include/asm/mmu_context.h
  47. @@ -72,8 +72,8 @@ static inline void load_mm_ldt(struct mm_struct *mm)
  48. #ifdef CONFIG_MODIFY_LDT_SYSCALL
  49. struct ldt_struct *ldt;
  50. - /* lockless_dereference synchronizes with smp_store_release */
  51. - ldt = lockless_dereference(mm->context.ldt);
  52. + /* READ_ONCE synchronizes with smp_store_release */
  53. + ldt = READ_ONCE(mm->context.ldt);
  54. /*
  55. * Any change to mm->context.ldt is followed by an IPI to all
  56. diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
  57. index 25d9b5adcd42..36b49bd09264 100644
  58. --- a/fs/overlayfs/ovl_entry.h
  59. +++ b/fs/overlayfs/ovl_entry.h
  60. @@ -77,5 +77,5 @@ static inline struct ovl_inode *OVL_I(struct inode *inode)
  61. static inline struct dentry *ovl_upperdentry_dereference(struct ovl_inode *oi)
  62. {
  63. - return lockless_dereference(oi->__upperdentry);
  64. + return READ_ONCE(oi->__upperdentry);
  65. }
  66. diff --git a/include/linux/rculist.h b/include/linux/rculist.h
  67. index b1fd8bf85fdc..3a2bb7d8ed4d 100644
  68. --- a/include/linux/rculist.h
  69. +++ b/include/linux/rculist.h
  70. @@ -274,7 +274,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
  71. * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
  72. */
  73. #define list_entry_rcu(ptr, type, member) \
  74. - container_of(lockless_dereference(ptr), type, member)
  75. + container_of(READ_ONCE(ptr), type, member)
  76. /**
  77. * Where are list_empty_rcu() and list_first_entry_rcu()?
  78. @@ -367,7 +367,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
  79. * example is when items are added to the list, but never deleted.
  80. */
  81. #define list_entry_lockless(ptr, type, member) \
  82. - container_of((typeof(ptr))lockless_dereference(ptr), type, member)
  83. + container_of((typeof(ptr))READ_ONCE(ptr), type, member)
  84. /**
  85. * list_for_each_entry_lockless - iterate over rcu list of given type
  86. diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
  87. index f816fc72b51e..ae494eb7b401 100644
  88. --- a/include/linux/rcupdate.h
  89. +++ b/include/linux/rcupdate.h
  90. @@ -341,7 +341,7 @@ static inline void rcu_preempt_sleep_check(void) { }
  91. #define __rcu_dereference_check(p, c, space) \
  92. ({ \
  93. /* Dependency order vs. p above. */ \
  94. - typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
  95. + typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
  96. RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
  97. rcu_dereference_sparse(p, space); \
  98. ((typeof(*p) __force __kernel *)(________p1)); \
  99. @@ -355,7 +355,7 @@ static inline void rcu_preempt_sleep_check(void) { }
  100. #define rcu_dereference_raw(p) \
  101. ({ \
  102. /* Dependency order vs. p above. */ \
  103. - typeof(p) ________p1 = lockless_dereference(p); \
  104. + typeof(p) ________p1 = READ_ONCE(p); \
  105. ((typeof(*p) __force __kernel *)(________p1)); \
  106. })
  107. diff --git a/mm/slab.h b/mm/slab.h
  108. index 6885e1192ec5..494cccef822a 100644
  109. --- a/mm/slab.h
  110. +++ b/mm/slab.h
  111. @@ -257,7 +257,7 @@ cache_from_memcg_idx(struct kmem_cache *s, int idx)
  112. * memcg_caches issues a write barrier to match this (see
  113. * memcg_create_kmem_cache()).
  114. */
  115. - cachep = lockless_dereference(arr->entries[idx]);
  116. + cachep = READ_ONCE(arr->entries[idx]);
  117. rcu_read_unlock();
  118. return cachep;
  119. diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
  120. index 939050169d12..18685de61288 100644
  121. --- a/arch/x86/events/core.c
  122. +++ b/arch/x86/events/core.c
  123. @@ -2336,7 +2336,7 @@ static unsigned long get_segment_base(unsigned int segment)
  124. struct ldt_struct *ldt;
  125. /* IRQs are off, so this synchronizes with smp_store_release */
  126. - ldt = lockless_dereference(current->active_mm->context.ldt);
  127. + ldt = READ_ONCE(current->active_mm->context.ldt);
  128. if (!ldt || idx >= ldt->nr_entries)
  129. return 0;
  130. diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
  131. index 0402d44deb4d..b8be2413cb74 100644
  132. --- a/arch/x86/kernel/ldt.c
  133. +++ b/arch/x86/kernel/ldt.c
  134. @@ -102,7 +102,7 @@ static void finalize_ldt_struct(struct ldt_struct *ldt)
  135. static void install_ldt(struct mm_struct *current_mm,
  136. struct ldt_struct *ldt)
  137. {
  138. - /* Synchronizes with lockless_dereference in load_mm_ldt. */
  139. + /* Synchronizes with READ_ONCE in load_mm_ldt. */
  140. smp_store_release(&current_mm->context.ldt, ldt);
  141. /* Activate the LDT for all CPUs using current_mm. */
  142. diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
  143. index d24e4b05f5da..731b7ffc7e37 100644
  144. --- a/drivers/md/dm-mpath.c
  145. +++ b/drivers/md/dm-mpath.c
  146. @@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m,
  147. pgpath = path_to_pgpath(path);
  148. - if (unlikely(lockless_dereference(m->current_pg) != pg)) {
  149. + if (unlikely(READ_ONCE(m->current_pg) != pg)) {
  150. /* Only update current_pgpath if pg changed */
  151. spin_lock_irqsave(&m->lock, flags);
  152. m->current_pgpath = pgpath;
  153. @@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
  154. }
  155. /* Were we instructed to switch PG? */
  156. - if (lockless_dereference(m->next_pg)) {
  157. + if (READ_ONCE(m->next_pg)) {
  158. spin_lock_irqsave(&m->lock, flags);
  159. pg = m->next_pg;
  160. if (!pg) {
  161. @@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
  162. /* Don't change PG until it has no remaining paths */
  163. check_current_pg:
  164. - pg = lockless_dereference(m->current_pg);
  165. + pg = READ_ONCE(m->current_pg);
  166. if (pg) {
  167. pgpath = choose_path_in_pg(m, pg, nr_bytes);
  168. if (!IS_ERR_OR_NULL(pgpath))
  169. @@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
  170. struct request *clone;
  171. /* Do we need to select a new pgpath? */
  172. - pgpath = lockless_dereference(m->current_pgpath);
  173. + pgpath = READ_ONCE(m->current_pgpath);
  174. if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
  175. pgpath = choose_pgpath(m, nr_bytes);
  176. @@ -535,7 +535,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
  177. bool queue_io;
  178. /* Do we need to select a new pgpath? */
  179. - pgpath = lockless_dereference(m->current_pgpath);
  180. + pgpath = READ_ONCE(m->current_pgpath);
  181. queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
  182. if (!pgpath || !queue_io)
  183. pgpath = choose_pgpath(m, nr_bytes);
  184. @@ -1799,7 +1799,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
  185. struct pgpath *current_pgpath;
  186. int r;
  187. - current_pgpath = lockless_dereference(m->current_pgpath);
  188. + current_pgpath = READ_ONCE(m->current_pgpath);
  189. if (!current_pgpath)
  190. current_pgpath = choose_pgpath(m, 0);
  191. @@ -1821,7 +1821,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
  192. }
  193. if (r == -ENOTCONN) {
  194. - if (!lockless_dereference(m->current_pg)) {
  195. + if (!READ_ONCE(m->current_pg)) {
  196. /* Path status changed, redo selection */
  197. (void) choose_pgpath(m, 0);
  198. }
  199. @@ -1890,9 +1890,9 @@ static int multipath_busy(struct dm_target *ti)
  200. return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
  201. /* Guess which priority_group will be used at next mapping time */
  202. - pg = lockless_dereference(m->current_pg);
  203. - next_pg = lockless_dereference(m->next_pg);
  204. - if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
  205. + pg = READ_ONCE(m->current_pg);
  206. + next_pg = READ_ONCE(m->next_pg);
  207. + if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
  208. pg = next_pg;
  209. if (!pg) {
  210. diff --git a/fs/dcache.c b/fs/dcache.c
  211. index 3203470c59c2..ccc2bcdcfdfb 100644
  212. --- a/fs/dcache.c
  213. +++ b/fs/dcache.c
  214. @@ -231,7 +231,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
  215. {
  216. /*
  217. * Be careful about RCU walk racing with rename:
  218. - * use 'lockless_dereference' to fetch the name pointer.
  219. + * use 'READ_ONCE' to fetch the name pointer.
  220. *
  221. * NOTE! Even if a rename will mean that the length
  222. * was not loaded atomically, we don't care. The
  223. @@ -245,7 +245,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
  224. * early because the data cannot match (there can
  225. * be no NUL in the ct/tcount data)
  226. */
  227. - const unsigned char *cs = lockless_dereference(dentry->d_name.name);
  228. + const unsigned char *cs = READ_ONCE(dentry->d_name.name);
  229. return dentry_string_cmp(cs, ct, tcount);
  230. }
  231. diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
  232. index 3ff960372cb9..7920a3f62c19 100644
  233. --- a/fs/overlayfs/readdir.c
  234. +++ b/fs/overlayfs/readdir.c
  235. @@ -440,7 +440,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
  236. if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
  237. struct inode *inode = file_inode(file);
  238. - realfile = lockless_dereference(od->upperfile);
  239. + realfile = READ_ONCE(od->upperfile);
  240. if (!realfile) {
  241. struct path upperpath;
  242. diff --git a/kernel/events/core.c b/kernel/events/core.c
  243. index 5d4398d1fa19..9f51738bf32e 100644
  244. --- a/kernel/events/core.c
  245. +++ b/kernel/events/core.c
  246. @@ -4221,7 +4221,7 @@ static void perf_remove_from_owner(struct perf_event *event)
  247. * indeed free this event, otherwise we need to serialize on
  248. * owner->perf_event_mutex.
  249. */
  250. - owner = lockless_dereference(event->owner);
  251. + owner = READ_ONCE(event->owner);
  252. if (owner) {
  253. /*
  254. * Since delayed_put_task_struct() also drops the last
  255. @@ -4318,7 +4318,7 @@ int perf_event_release_kernel(struct perf_event *event)
  256. * Cannot change, child events are not migrated, see the
  257. * comment with perf_event_ctx_lock_nested().
  258. */
  259. - ctx = lockless_dereference(child->ctx);
  260. + ctx = READ_ONCE(child->ctx);
  261. /*
  262. * Since child_mutex nests inside ctx::mutex, we must jump
  263. * through hoops. We start by grabbing a reference on the ctx.
  264. diff --git a/kernel/seccomp.c b/kernel/seccomp.c
  265. index 34aced9ff3ff..3fd2c4b23697 100644
  266. --- a/kernel/seccomp.c
  267. +++ b/kernel/seccomp.c
  268. @@ -188,7 +188,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
  269. u32 ret = SECCOMP_RET_ALLOW;
  270. /* Make sure cross-thread synced filter points somewhere sane. */
  271. struct seccomp_filter *f =
  272. - lockless_dereference(current->seccomp.filter);
  273. + READ_ONCE(current->seccomp.filter);
  274. /* Ensure unexpected behavior doesn't result in failing open. */
  275. if (unlikely(WARN_ON(f == NULL)))
  276. diff --git a/kernel/task_work.c b/kernel/task_work.c
  277. index e056d5429783..0371093a2331 100644
  278. --- a/kernel/task_work.c
  279. +++ b/kernel/task_work.c
  280. @@ -67,7 +67,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
  281. * we raced with task_work_run(), *pprev == NULL/exited.
  282. */
  283. raw_spin_lock_irqsave(&task->pi_lock, flags);
  284. - while ((work = lockless_dereference(*pprev))) {
  285. + while ((work = READ_ONCE(*pprev))) {
  286. if (work->func != func)
  287. pprev = &work->next;
  288. else if (cmpxchg(pprev, work, work->next) == work)
  289. --
  290. 2.14.2