123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275 |
- From 9e17efd11450d3d2069adaa3c58db9ac8ebd1c66 Mon Sep 17 00:00:00 2001
- From: Yu Zhao <[email protected]>
- Date: Sun, 18 Sep 2022 02:00:00 -0600
- Subject: [PATCH 03/29] mm/vmscan.c: refactor shrink_node()
- MIME-Version: 1.0
- Content-Type: text/plain; charset=UTF-8
- Content-Transfer-Encoding: 8bit
- This patch refactors shrink_node() to improve readability for the upcoming
- changes to mm/vmscan.c.
- Link: https://lkml.kernel.org/r/[email protected]
- Signed-off-by: Yu Zhao <[email protected]>
- Reviewed-by: Barry Song <[email protected]>
- Reviewed-by: Miaohe Lin <[email protected]>
- Acked-by: Brian Geffon <[email protected]>
- Acked-by: Jan Alexander Steffens (heftig) <[email protected]>
- Acked-by: Oleksandr Natalenko <[email protected]>
- Acked-by: Steven Barrett <[email protected]>
- Acked-by: Suleiman Souhlal <[email protected]>
- Tested-by: Daniel Byrne <[email protected]>
- Tested-by: Donald Carr <[email protected]>
- Tested-by: Holger Hoffstätte <[email protected]>
- Tested-by: Konstantin Kharlamov <[email protected]>
- Tested-by: Shuang Zhai <[email protected]>
- Tested-by: Sofia Trinh <[email protected]>
- Tested-by: Vaibhav Jain <[email protected]>
- Cc: Andi Kleen <[email protected]>
- Cc: Aneesh Kumar K.V <[email protected]>
- Cc: Catalin Marinas <[email protected]>
- Cc: Dave Hansen <[email protected]>
- Cc: Hillf Danton <[email protected]>
- Cc: Jens Axboe <[email protected]>
- Cc: Johannes Weiner <[email protected]>
- Cc: Jonathan Corbet <[email protected]>
- Cc: Linus Torvalds <[email protected]>
- Cc: Matthew Wilcox <[email protected]>
- Cc: Mel Gorman <[email protected]>
- Cc: Michael Larabel <[email protected]>
- Cc: Michal Hocko <[email protected]>
- Cc: Mike Rapoport <[email protected]>
- Cc: Mike Rapoport <[email protected]>
- Cc: Peter Zijlstra <[email protected]>
- Cc: Qi Zheng <[email protected]>
- Cc: Tejun Heo <[email protected]>
- Cc: Vlastimil Babka <[email protected]>
- Cc: Will Deacon <[email protected]>
- Signed-off-by: Andrew Morton <[email protected]>
- ---
- mm/vmscan.c | 198 +++++++++++++++++++++++++++-------------------------
- 1 file changed, 104 insertions(+), 94 deletions(-)
- --- a/mm/vmscan.c
- +++ b/mm/vmscan.c
- @@ -2497,6 +2497,109 @@ enum scan_balance {
- SCAN_FILE,
- };
-
- +static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
- +{
- + unsigned long file;
- + struct lruvec *target_lruvec;
- +
- + target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
- +
- + /*
- + * Flush the memory cgroup stats, so that we read accurate per-memcg
- + * lruvec stats for heuristics.
- + */
- + mem_cgroup_flush_stats();
- +
- + /*
- + * Determine the scan balance between anon and file LRUs.
- + */
- + spin_lock_irq(&target_lruvec->lru_lock);
- + sc->anon_cost = target_lruvec->anon_cost;
- + sc->file_cost = target_lruvec->file_cost;
- + spin_unlock_irq(&target_lruvec->lru_lock);
- +
- + /*
- + * Target desirable inactive:active list ratios for the anon
- + * and file LRU lists.
- + */
- + if (!sc->force_deactivate) {
- + unsigned long refaults;
- +
- + refaults = lruvec_page_state(target_lruvec,
- + WORKINGSET_ACTIVATE_ANON);
- + if (refaults != target_lruvec->refaults[0] ||
- + inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
- + sc->may_deactivate |= DEACTIVATE_ANON;
- + else
- + sc->may_deactivate &= ~DEACTIVATE_ANON;
- +
- + /*
- + * When refaults are being observed, it means a new
- + * workingset is being established. Deactivate to get
- + * rid of any stale active pages quickly.
- + */
- + refaults = lruvec_page_state(target_lruvec,
- + WORKINGSET_ACTIVATE_FILE);
- + if (refaults != target_lruvec->refaults[1] ||
- + inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
- + sc->may_deactivate |= DEACTIVATE_FILE;
- + else
- + sc->may_deactivate &= ~DEACTIVATE_FILE;
- + } else
- + sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
- +
- + /*
- + * If we have plenty of inactive file pages that aren't
- + * thrashing, try to reclaim those first before touching
- + * anonymous pages.
- + */
- + file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
- + if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
- + sc->cache_trim_mode = 1;
- + else
- + sc->cache_trim_mode = 0;
- +
- + /*
- + * Prevent the reclaimer from falling into the cache trap: as
- + * cache pages start out inactive, every cache fault will tip
- + * the scan balance towards the file LRU. And as the file LRU
- + * shrinks, so does the window for rotation from references.
- + * This means we have a runaway feedback loop where a tiny
- + * thrashing file LRU becomes infinitely more attractive than
- + * anon pages. Try to detect this based on file LRU size.
- + */
- + if (!cgroup_reclaim(sc)) {
- + unsigned long total_high_wmark = 0;
- + unsigned long free, anon;
- + int z;
- +
- + free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
- + file = node_page_state(pgdat, NR_ACTIVE_FILE) +
- + node_page_state(pgdat, NR_INACTIVE_FILE);
- +
- + for (z = 0; z < MAX_NR_ZONES; z++) {
- + struct zone *zone = &pgdat->node_zones[z];
- +
- + if (!managed_zone(zone))
- + continue;
- +
- + total_high_wmark += high_wmark_pages(zone);
- + }
- +
- + /*
- + * Consider anon: if that's low too, this isn't a
- + * runaway file reclaim problem, but rather just
- + * extreme pressure. Reclaim as per usual then.
- + */
- + anon = node_page_state(pgdat, NR_INACTIVE_ANON);
- +
- + sc->file_is_tiny =
- + file + free <= total_high_wmark &&
- + !(sc->may_deactivate & DEACTIVATE_ANON) &&
- + anon >> sc->priority;
- + }
- +}
- +
- /*
- * Determine how aggressively the anon and file LRU lists should be
- * scanned. The relative value of each set of LRU lists is determined
- @@ -2965,109 +3068,16 @@ static void shrink_node(pg_data_t *pgdat
- unsigned long nr_reclaimed, nr_scanned;
- struct lruvec *target_lruvec;
- bool reclaimable = false;
- - unsigned long file;
-
- target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
-
- again:
- - /*
- - * Flush the memory cgroup stats, so that we read accurate per-memcg
- - * lruvec stats for heuristics.
- - */
- - mem_cgroup_flush_stats();
- -
- memset(&sc->nr, 0, sizeof(sc->nr));
-
- nr_reclaimed = sc->nr_reclaimed;
- nr_scanned = sc->nr_scanned;
-
- - /*
- - * Determine the scan balance between anon and file LRUs.
- - */
- - spin_lock_irq(&target_lruvec->lru_lock);
- - sc->anon_cost = target_lruvec->anon_cost;
- - sc->file_cost = target_lruvec->file_cost;
- - spin_unlock_irq(&target_lruvec->lru_lock);
- -
- - /*
- - * Target desirable inactive:active list ratios for the anon
- - * and file LRU lists.
- - */
- - if (!sc->force_deactivate) {
- - unsigned long refaults;
- -
- - refaults = lruvec_page_state(target_lruvec,
- - WORKINGSET_ACTIVATE_ANON);
- - if (refaults != target_lruvec->refaults[0] ||
- - inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
- - sc->may_deactivate |= DEACTIVATE_ANON;
- - else
- - sc->may_deactivate &= ~DEACTIVATE_ANON;
- -
- - /*
- - * When refaults are being observed, it means a new
- - * workingset is being established. Deactivate to get
- - * rid of any stale active pages quickly.
- - */
- - refaults = lruvec_page_state(target_lruvec,
- - WORKINGSET_ACTIVATE_FILE);
- - if (refaults != target_lruvec->refaults[1] ||
- - inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
- - sc->may_deactivate |= DEACTIVATE_FILE;
- - else
- - sc->may_deactivate &= ~DEACTIVATE_FILE;
- - } else
- - sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
- -
- - /*
- - * If we have plenty of inactive file pages that aren't
- - * thrashing, try to reclaim those first before touching
- - * anonymous pages.
- - */
- - file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
- - if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
- - sc->cache_trim_mode = 1;
- - else
- - sc->cache_trim_mode = 0;
- -
- - /*
- - * Prevent the reclaimer from falling into the cache trap: as
- - * cache pages start out inactive, every cache fault will tip
- - * the scan balance towards the file LRU. And as the file LRU
- - * shrinks, so does the window for rotation from references.
- - * This means we have a runaway feedback loop where a tiny
- - * thrashing file LRU becomes infinitely more attractive than
- - * anon pages. Try to detect this based on file LRU size.
- - */
- - if (!cgroup_reclaim(sc)) {
- - unsigned long total_high_wmark = 0;
- - unsigned long free, anon;
- - int z;
- -
- - free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
- - file = node_page_state(pgdat, NR_ACTIVE_FILE) +
- - node_page_state(pgdat, NR_INACTIVE_FILE);
- -
- - for (z = 0; z < MAX_NR_ZONES; z++) {
- - struct zone *zone = &pgdat->node_zones[z];
- - if (!managed_zone(zone))
- - continue;
- -
- - total_high_wmark += high_wmark_pages(zone);
- - }
- -
- - /*
- - * Consider anon: if that's low too, this isn't a
- - * runaway file reclaim problem, but rather just
- - * extreme pressure. Reclaim as per usual then.
- - */
- - anon = node_page_state(pgdat, NR_INACTIVE_ANON);
- -
- - sc->file_is_tiny =
- - file + free <= total_high_wmark &&
- - !(sc->may_deactivate & DEACTIVATE_ANON) &&
- - anon >> sc->priority;
- - }
- + prepare_scan_count(pgdat, sc);
-
- shrink_node_memcgs(pgdat, sc);
-
|