| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324 |
- From 9d02a406fe5f64f282832e7d0ab8fcd2631fc15a Mon Sep 17 00:00:00 2001
- From: Will Deacon <[email protected]>
- Date: Tue, 24 Oct 2017 11:22:48 +0100
- Subject: [PATCH 126/242] locking/barriers: Convert users of
- lockless_dereference() to READ_ONCE()
- MIME-Version: 1.0
- Content-Type: text/plain; charset=UTF-8
- Content-Transfer-Encoding: 8bit
- CVE-2017-5754
- [ Note, this is a Git cherry-pick of the following commit:
- 506458efaf15 ("locking/barriers: Convert users of lockless_dereference() to READ_ONCE()")
- ... for easier x86 PTI code testing and back-porting. ]
- READ_ONCE() now has an implicit smp_read_barrier_depends() call, so it
- can be used instead of lockless_dereference() without any change in
- semantics.
- Signed-off-by: Will Deacon <[email protected]>
- Cc: Linus Torvalds <[email protected]>
- Cc: Paul E. McKenney <[email protected]>
- Cc: Peter Zijlstra <[email protected]>
- Cc: Thomas Gleixner <[email protected]>
- Link: http://lkml.kernel.org/r/[email protected]
- Signed-off-by: Ingo Molnar <[email protected]>
- (cherry picked from commit 3382290ed2d5e275429cef510ab21889d3ccd164)
- Signed-off-by: Andy Whitcroft <[email protected]>
- Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
- (cherry picked from commit 7252704bfd83e951d00ec75526ed2bf64a7f6ee1)
- Signed-off-by: Fabian Grünbichler <[email protected]>
- ---
- arch/x86/include/asm/mmu_context.h | 4 ++--
- fs/overlayfs/ovl_entry.h | 2 +-
- include/linux/rculist.h | 4 ++--
- include/linux/rcupdate.h | 4 ++--
- mm/slab.h | 2 +-
- arch/x86/events/core.c | 2 +-
- arch/x86/kernel/ldt.c | 2 +-
- drivers/md/dm-mpath.c | 20 ++++++++++----------
- fs/dcache.c | 4 ++--
- fs/overlayfs/readdir.c | 2 +-
- kernel/events/core.c | 4 ++--
- kernel/seccomp.c | 2 +-
- kernel/task_work.c | 2 +-
- 13 files changed, 27 insertions(+), 27 deletions(-)
- diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
- index 3c856a15b98e..efc530642f7d 100644
- --- a/arch/x86/include/asm/mmu_context.h
- +++ b/arch/x86/include/asm/mmu_context.h
- @@ -72,8 +72,8 @@ static inline void load_mm_ldt(struct mm_struct *mm)
- #ifdef CONFIG_MODIFY_LDT_SYSCALL
- struct ldt_struct *ldt;
-
- - /* lockless_dereference synchronizes with smp_store_release */
- - ldt = lockless_dereference(mm->context.ldt);
- + /* READ_ONCE synchronizes with smp_store_release */
- + ldt = READ_ONCE(mm->context.ldt);
-
- /*
- * Any change to mm->context.ldt is followed by an IPI to all
- diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
- index 25d9b5adcd42..36b49bd09264 100644
- --- a/fs/overlayfs/ovl_entry.h
- +++ b/fs/overlayfs/ovl_entry.h
- @@ -77,5 +77,5 @@ static inline struct ovl_inode *OVL_I(struct inode *inode)
-
- static inline struct dentry *ovl_upperdentry_dereference(struct ovl_inode *oi)
- {
- - return lockless_dereference(oi->__upperdentry);
- + return READ_ONCE(oi->__upperdentry);
- }
- diff --git a/include/linux/rculist.h b/include/linux/rculist.h
- index b1fd8bf85fdc..3a2bb7d8ed4d 100644
- --- a/include/linux/rculist.h
- +++ b/include/linux/rculist.h
- @@ -274,7 +274,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
- * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
- */
- #define list_entry_rcu(ptr, type, member) \
- - container_of(lockless_dereference(ptr), type, member)
- + container_of(READ_ONCE(ptr), type, member)
-
- /**
- * Where are list_empty_rcu() and list_first_entry_rcu()?
- @@ -367,7 +367,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
- * example is when items are added to the list, but never deleted.
- */
- #define list_entry_lockless(ptr, type, member) \
- - container_of((typeof(ptr))lockless_dereference(ptr), type, member)
- + container_of((typeof(ptr))READ_ONCE(ptr), type, member)
-
- /**
- * list_for_each_entry_lockless - iterate over rcu list of given type
- diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
- index f816fc72b51e..ae494eb7b401 100644
- --- a/include/linux/rcupdate.h
- +++ b/include/linux/rcupdate.h
- @@ -341,7 +341,7 @@ static inline void rcu_preempt_sleep_check(void) { }
- #define __rcu_dereference_check(p, c, space) \
- ({ \
- /* Dependency order vs. p above. */ \
- - typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
- + typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
- RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
- rcu_dereference_sparse(p, space); \
- ((typeof(*p) __force __kernel *)(________p1)); \
- @@ -355,7 +355,7 @@ static inline void rcu_preempt_sleep_check(void) { }
- #define rcu_dereference_raw(p) \
- ({ \
- /* Dependency order vs. p above. */ \
- - typeof(p) ________p1 = lockless_dereference(p); \
- + typeof(p) ________p1 = READ_ONCE(p); \
- ((typeof(*p) __force __kernel *)(________p1)); \
- })
-
- diff --git a/mm/slab.h b/mm/slab.h
- index 6885e1192ec5..494cccef822a 100644
- --- a/mm/slab.h
- +++ b/mm/slab.h
- @@ -257,7 +257,7 @@ cache_from_memcg_idx(struct kmem_cache *s, int idx)
- * memcg_caches issues a write barrier to match this (see
- * memcg_create_kmem_cache()).
- */
- - cachep = lockless_dereference(arr->entries[idx]);
- + cachep = READ_ONCE(arr->entries[idx]);
- rcu_read_unlock();
-
- return cachep;
- diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
- index 939050169d12..18685de61288 100644
- --- a/arch/x86/events/core.c
- +++ b/arch/x86/events/core.c
- @@ -2336,7 +2336,7 @@ static unsigned long get_segment_base(unsigned int segment)
- struct ldt_struct *ldt;
-
- /* IRQs are off, so this synchronizes with smp_store_release */
- - ldt = lockless_dereference(current->active_mm->context.ldt);
- + ldt = READ_ONCE(current->active_mm->context.ldt);
- if (!ldt || idx >= ldt->nr_entries)
- return 0;
-
- diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
- index 0402d44deb4d..b8be2413cb74 100644
- --- a/arch/x86/kernel/ldt.c
- +++ b/arch/x86/kernel/ldt.c
- @@ -102,7 +102,7 @@ static void finalize_ldt_struct(struct ldt_struct *ldt)
- static void install_ldt(struct mm_struct *current_mm,
- struct ldt_struct *ldt)
- {
- - /* Synchronizes with lockless_dereference in load_mm_ldt. */
- + /* Synchronizes with READ_ONCE in load_mm_ldt. */
- smp_store_release(¤t_mm->context.ldt, ldt);
-
- /* Activate the LDT for all CPUs using current_mm. */
- diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
- index d24e4b05f5da..731b7ffc7e37 100644
- --- a/drivers/md/dm-mpath.c
- +++ b/drivers/md/dm-mpath.c
- @@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m,
-
- pgpath = path_to_pgpath(path);
-
- - if (unlikely(lockless_dereference(m->current_pg) != pg)) {
- + if (unlikely(READ_ONCE(m->current_pg) != pg)) {
- /* Only update current_pgpath if pg changed */
- spin_lock_irqsave(&m->lock, flags);
- m->current_pgpath = pgpath;
- @@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
- }
-
- /* Were we instructed to switch PG? */
- - if (lockless_dereference(m->next_pg)) {
- + if (READ_ONCE(m->next_pg)) {
- spin_lock_irqsave(&m->lock, flags);
- pg = m->next_pg;
- if (!pg) {
- @@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
-
- /* Don't change PG until it has no remaining paths */
- check_current_pg:
- - pg = lockless_dereference(m->current_pg);
- + pg = READ_ONCE(m->current_pg);
- if (pg) {
- pgpath = choose_path_in_pg(m, pg, nr_bytes);
- if (!IS_ERR_OR_NULL(pgpath))
- @@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
- struct request *clone;
-
- /* Do we need to select a new pgpath? */
- - pgpath = lockless_dereference(m->current_pgpath);
- + pgpath = READ_ONCE(m->current_pgpath);
- if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
- pgpath = choose_pgpath(m, nr_bytes);
-
- @@ -535,7 +535,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
- bool queue_io;
-
- /* Do we need to select a new pgpath? */
- - pgpath = lockless_dereference(m->current_pgpath);
- + pgpath = READ_ONCE(m->current_pgpath);
- queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
- if (!pgpath || !queue_io)
- pgpath = choose_pgpath(m, nr_bytes);
- @@ -1799,7 +1799,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
- struct pgpath *current_pgpath;
- int r;
-
- - current_pgpath = lockless_dereference(m->current_pgpath);
- + current_pgpath = READ_ONCE(m->current_pgpath);
- if (!current_pgpath)
- current_pgpath = choose_pgpath(m, 0);
-
- @@ -1821,7 +1821,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
- }
-
- if (r == -ENOTCONN) {
- - if (!lockless_dereference(m->current_pg)) {
- + if (!READ_ONCE(m->current_pg)) {
- /* Path status changed, redo selection */
- (void) choose_pgpath(m, 0);
- }
- @@ -1890,9 +1890,9 @@ static int multipath_busy(struct dm_target *ti)
- return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
-
- /* Guess which priority_group will be used at next mapping time */
- - pg = lockless_dereference(m->current_pg);
- - next_pg = lockless_dereference(m->next_pg);
- - if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
- + pg = READ_ONCE(m->current_pg);
- + next_pg = READ_ONCE(m->next_pg);
- + if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
- pg = next_pg;
-
- if (!pg) {
- diff --git a/fs/dcache.c b/fs/dcache.c
- index 3203470c59c2..ccc2bcdcfdfb 100644
- --- a/fs/dcache.c
- +++ b/fs/dcache.c
- @@ -231,7 +231,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
- {
- /*
- * Be careful about RCU walk racing with rename:
- - * use 'lockless_dereference' to fetch the name pointer.
- + * use 'READ_ONCE' to fetch the name pointer.
- *
- * NOTE! Even if a rename will mean that the length
- * was not loaded atomically, we don't care. The
- @@ -245,7 +245,7 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
- * early because the data cannot match (there can
- * be no NUL in the ct/tcount data)
- */
- - const unsigned char *cs = lockless_dereference(dentry->d_name.name);
- + const unsigned char *cs = READ_ONCE(dentry->d_name.name);
-
- return dentry_string_cmp(cs, ct, tcount);
- }
- diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
- index 3ff960372cb9..7920a3f62c19 100644
- --- a/fs/overlayfs/readdir.c
- +++ b/fs/overlayfs/readdir.c
- @@ -440,7 +440,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
- if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
- struct inode *inode = file_inode(file);
-
- - realfile = lockless_dereference(od->upperfile);
- + realfile = READ_ONCE(od->upperfile);
- if (!realfile) {
- struct path upperpath;
-
- diff --git a/kernel/events/core.c b/kernel/events/core.c
- index 5d4398d1fa19..9f51738bf32e 100644
- --- a/kernel/events/core.c
- +++ b/kernel/events/core.c
- @@ -4221,7 +4221,7 @@ static void perf_remove_from_owner(struct perf_event *event)
- * indeed free this event, otherwise we need to serialize on
- * owner->perf_event_mutex.
- */
- - owner = lockless_dereference(event->owner);
- + owner = READ_ONCE(event->owner);
- if (owner) {
- /*
- * Since delayed_put_task_struct() also drops the last
- @@ -4318,7 +4318,7 @@ int perf_event_release_kernel(struct perf_event *event)
- * Cannot change, child events are not migrated, see the
- * comment with perf_event_ctx_lock_nested().
- */
- - ctx = lockless_dereference(child->ctx);
- + ctx = READ_ONCE(child->ctx);
- /*
- * Since child_mutex nests inside ctx::mutex, we must jump
- * through hoops. We start by grabbing a reference on the ctx.
- diff --git a/kernel/seccomp.c b/kernel/seccomp.c
- index 34aced9ff3ff..3fd2c4b23697 100644
- --- a/kernel/seccomp.c
- +++ b/kernel/seccomp.c
- @@ -188,7 +188,7 @@ static u32 seccomp_run_filters(const struct seccomp_data *sd,
- u32 ret = SECCOMP_RET_ALLOW;
- /* Make sure cross-thread synced filter points somewhere sane. */
- struct seccomp_filter *f =
- - lockless_dereference(current->seccomp.filter);
- + READ_ONCE(current->seccomp.filter);
-
- /* Ensure unexpected behavior doesn't result in failing open. */
- if (unlikely(WARN_ON(f == NULL)))
- diff --git a/kernel/task_work.c b/kernel/task_work.c
- index e056d5429783..0371093a2331 100644
- --- a/kernel/task_work.c
- +++ b/kernel/task_work.c
- @@ -67,7 +67,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
- * we raced with task_work_run(), *pprev == NULL/exited.
- */
- raw_spin_lock_irqsave(&task->pi_lock, flags);
- - while ((work = lockless_dereference(*pprev))) {
- + while ((work = READ_ONCE(*pprev))) {
- if (work->func != func)
- pprev = &work->next;
- else if (cmpxchg(pprev, work, work->next) == work)
- --
- 2.14.2
|