123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188 |
- From ce45f1c4b32cf69b166f56ef5bc6c761e06ed4e5 Mon Sep 17 00:00:00 2001
- From: Yu Zhao <[email protected]>
- Date: Wed, 21 Dec 2022 21:19:01 -0700
- Subject: [PATCH 23/29] mm: multi-gen LRU: remove eviction fairness safeguard
- Recall that the eviction consumes the oldest generation: first it
- bucket-sorts pages whose gen counters were updated by the aging and
- reclaims the rest; then it increments lrugen->min_seq.
- The current eviction fairness safeguard for global reclaim has a
- dilemma: when there are multiple eligible memcgs, should it continue
- or stop upon meeting the reclaim goal? If it continues, it overshoots
- and increases direct reclaim latency; if it stops, it loses fairness
- between memcgs it has taken memory away from and those it has yet to.
- With memcg LRU, the eviction, while ensuring eventual fairness, will
- stop upon meeting its goal. Therefore the current eviction fairness
- safeguard for global reclaim will not be needed.
- Note that memcg LRU only applies to global reclaim. For memcg reclaim,
- the eviction will continue, even if it is overshooting. This becomes
- unconditional due to code simplification.
- Link: https://lkml.kernel.org/r/[email protected]
- Signed-off-by: Yu Zhao <[email protected]>
- Cc: Johannes Weiner <[email protected]>
- Cc: Jonathan Corbet <[email protected]>
- Cc: Michael Larabel <[email protected]>
- Cc: Michal Hocko <[email protected]>
- Cc: Mike Rapoport <[email protected]>
- Cc: Roman Gushchin <[email protected]>
- Cc: Suren Baghdasaryan <[email protected]>
- Signed-off-by: Andrew Morton <[email protected]>
- ---
- mm/vmscan.c | 82 +++++++++++++++--------------------------------------
- 1 file changed, 23 insertions(+), 59 deletions(-)
- --- a/mm/vmscan.c
- +++ b/mm/vmscan.c
- @@ -443,6 +443,11 @@ static bool cgroup_reclaim(struct scan_c
- return sc->target_mem_cgroup;
- }
-
- +static bool global_reclaim(struct scan_control *sc)
- +{
- + return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup);
- +}
- +
- /**
- * writeback_throttling_sane - is the usual dirty throttling mechanism available?
- * @sc: scan_control in question
- @@ -493,6 +498,11 @@ static bool cgroup_reclaim(struct scan_c
- return false;
- }
-
- +static bool global_reclaim(struct scan_control *sc)
- +{
- + return true;
- +}
- +
- static bool writeback_throttling_sane(struct scan_control *sc)
- {
- return true;
- @@ -4722,8 +4732,7 @@ static int isolate_pages(struct lruvec *
- return scanned;
- }
-
- -static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
- - bool *need_swapping)
- +static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
- {
- int type;
- int scanned;
- @@ -4812,9 +4821,6 @@ retry:
- goto retry;
- }
-
- - if (need_swapping && type == LRU_GEN_ANON)
- - *need_swapping = true;
- -
- return scanned;
- }
-
- @@ -4853,68 +4859,26 @@ done:
- return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
- }
-
- -static bool should_abort_scan(struct lruvec *lruvec, unsigned long seq,
- - struct scan_control *sc, bool need_swapping)
- +static unsigned long get_nr_to_reclaim(struct scan_control *sc)
- {
- - int i;
- - DEFINE_MAX_SEQ(lruvec);
- -
- - if (!current_is_kswapd()) {
- - /* age each memcg once to ensure fairness */
- - if (max_seq - seq > 1)
- - return true;
- -
- - /* over-swapping can increase allocation latency */
- - if (sc->nr_reclaimed >= sc->nr_to_reclaim && need_swapping)
- - return true;
- -
- - /* give this thread a chance to exit and free its memory */
- - if (fatal_signal_pending(current)) {
- - sc->nr_reclaimed += MIN_LRU_BATCH;
- - return true;
- - }
- -
- - if (cgroup_reclaim(sc))
- - return false;
- - } else if (sc->nr_reclaimed - sc->last_reclaimed < sc->nr_to_reclaim)
- - return false;
- -
- - /* keep scanning at low priorities to ensure fairness */
- - if (sc->priority > DEF_PRIORITY - 2)
- - return false;
- -
- - /*
- - * A minimum amount of work was done under global memory pressure. For
- - * kswapd, it may be overshooting. For direct reclaim, the target isn't
- - * met, and yet the allocation may still succeed, since kswapd may have
- - * caught up. In either case, it's better to stop now, and restart if
- - * necessary.
- - */
- - for (i = 0; i <= sc->reclaim_idx; i++) {
- - unsigned long wmark;
- - struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;
- -
- - if (!managed_zone(zone))
- - continue;
- -
- - wmark = current_is_kswapd() ? high_wmark_pages(zone) : low_wmark_pages(zone);
- - if (wmark > zone_page_state(zone, NR_FREE_PAGES))
- - return false;
- - }
- + /* don't abort memcg reclaim to ensure fairness */
- + if (!global_reclaim(sc))
- + return -1;
-
- - sc->nr_reclaimed += MIN_LRU_BATCH;
- + /* discount the previous progress for kswapd */
- + if (current_is_kswapd())
- + return sc->nr_to_reclaim + sc->last_reclaimed;
-
- - return true;
- + return max(sc->nr_to_reclaim, compact_gap(sc->order));
- }
-
- static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
- {
- struct blk_plug plug;
- bool need_aging = false;
- - bool need_swapping = false;
- unsigned long scanned = 0;
- unsigned long reclaimed = sc->nr_reclaimed;
- - DEFINE_MAX_SEQ(lruvec);
- + unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
-
- lru_add_drain();
-
- @@ -4938,7 +4902,7 @@ static void lru_gen_shrink_lruvec(struct
- if (!nr_to_scan)
- goto done;
-
- - delta = evict_pages(lruvec, sc, swappiness, &need_swapping);
- + delta = evict_pages(lruvec, sc, swappiness);
- if (!delta)
- goto done;
-
- @@ -4946,7 +4910,7 @@ static void lru_gen_shrink_lruvec(struct
- if (scanned >= nr_to_scan)
- break;
-
- - if (should_abort_scan(lruvec, max_seq, sc, need_swapping))
- + if (sc->nr_reclaimed >= nr_to_reclaim)
- break;
-
- cond_resched();
- @@ -5393,7 +5357,7 @@ static int run_eviction(struct lruvec *l
- if (sc->nr_reclaimed >= nr_to_reclaim)
- return 0;
-
- - if (!evict_pages(lruvec, sc, swappiness, NULL))
- + if (!evict_pages(lruvec, sc, swappiness))
- return 0;
-
- cond_resched();
|