020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. From 9e17efd11450d3d2069adaa3c58db9ac8ebd1c66 Mon Sep 17 00:00:00 2001
  2. From: Yu Zhao <[email protected]>
  3. Date: Sun, 18 Sep 2022 02:00:00 -0600
  4. Subject: [PATCH 03/29] mm/vmscan.c: refactor shrink_node()
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. This patch refactors shrink_node() to improve readability for the upcoming
  9. changes to mm/vmscan.c.
  10. Link: https://lkml.kernel.org/r/[email protected]
  11. Signed-off-by: Yu Zhao <[email protected]>
  12. Reviewed-by: Barry Song <[email protected]>
  13. Reviewed-by: Miaohe Lin <[email protected]>
  14. Acked-by: Brian Geffon <[email protected]>
  15. Acked-by: Jan Alexander Steffens (heftig) <[email protected]>
  16. Acked-by: Oleksandr Natalenko <[email protected]>
  17. Acked-by: Steven Barrett <[email protected]>
  18. Acked-by: Suleiman Souhlal <[email protected]>
  19. Tested-by: Daniel Byrne <[email protected]>
  20. Tested-by: Donald Carr <[email protected]>
  21. Tested-by: Holger Hoffstätte <[email protected]>
  22. Tested-by: Konstantin Kharlamov <[email protected]>
  23. Tested-by: Shuang Zhai <[email protected]>
  24. Tested-by: Sofia Trinh <[email protected]>
  25. Tested-by: Vaibhav Jain <[email protected]>
  26. Cc: Andi Kleen <[email protected]>
  27. Cc: Aneesh Kumar K.V <[email protected]>
  28. Cc: Catalin Marinas <[email protected]>
  29. Cc: Dave Hansen <[email protected]>
  30. Cc: Hillf Danton <[email protected]>
  31. Cc: Jens Axboe <[email protected]>
  32. Cc: Johannes Weiner <[email protected]>
  33. Cc: Jonathan Corbet <[email protected]>
  34. Cc: Linus Torvalds <[email protected]>
  35. Cc: Matthew Wilcox <[email protected]>
  36. Cc: Mel Gorman <[email protected]>
  37. Cc: Michael Larabel <[email protected]>
  38. Cc: Michal Hocko <[email protected]>
  39. Cc: Mike Rapoport <[email protected]>
  40. Cc: Mike Rapoport <[email protected]>
  41. Cc: Peter Zijlstra <[email protected]>
  42. Cc: Qi Zheng <[email protected]>
  43. Cc: Tejun Heo <[email protected]>
  44. Cc: Vlastimil Babka <[email protected]>
  45. Cc: Will Deacon <[email protected]>
  46. Signed-off-by: Andrew Morton <[email protected]>
  47. ---
  48. mm/vmscan.c | 198 +++++++++++++++++++++++++++-------------------------
  49. 1 file changed, 104 insertions(+), 94 deletions(-)
  50. --- a/mm/vmscan.c
  51. +++ b/mm/vmscan.c
  52. @@ -2497,6 +2497,109 @@ enum scan_balance {
  53. SCAN_FILE,
  54. };
  55. +static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
  56. +{
  57. + unsigned long file;
  58. + struct lruvec *target_lruvec;
  59. +
  60. + target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
  61. +
  62. + /*
  63. + * Flush the memory cgroup stats, so that we read accurate per-memcg
  64. + * lruvec stats for heuristics.
  65. + */
  66. + mem_cgroup_flush_stats();
  67. +
  68. + /*
  69. + * Determine the scan balance between anon and file LRUs.
  70. + */
  71. + spin_lock_irq(&target_lruvec->lru_lock);
  72. + sc->anon_cost = target_lruvec->anon_cost;
  73. + sc->file_cost = target_lruvec->file_cost;
  74. + spin_unlock_irq(&target_lruvec->lru_lock);
  75. +
  76. + /*
  77. + * Target desirable inactive:active list ratios for the anon
  78. + * and file LRU lists.
  79. + */
  80. + if (!sc->force_deactivate) {
  81. + unsigned long refaults;
  82. +
  83. + refaults = lruvec_page_state(target_lruvec,
  84. + WORKINGSET_ACTIVATE_ANON);
  85. + if (refaults != target_lruvec->refaults[0] ||
  86. + inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
  87. + sc->may_deactivate |= DEACTIVATE_ANON;
  88. + else
  89. + sc->may_deactivate &= ~DEACTIVATE_ANON;
  90. +
  91. + /*
  92. + * When refaults are being observed, it means a new
  93. + * workingset is being established. Deactivate to get
  94. + * rid of any stale active pages quickly.
  95. + */
  96. + refaults = lruvec_page_state(target_lruvec,
  97. + WORKINGSET_ACTIVATE_FILE);
  98. + if (refaults != target_lruvec->refaults[1] ||
  99. + inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
  100. + sc->may_deactivate |= DEACTIVATE_FILE;
  101. + else
  102. + sc->may_deactivate &= ~DEACTIVATE_FILE;
  103. + } else
  104. + sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
  105. +
  106. + /*
  107. + * If we have plenty of inactive file pages that aren't
  108. + * thrashing, try to reclaim those first before touching
  109. + * anonymous pages.
  110. + */
  111. + file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
  112. + if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
  113. + sc->cache_trim_mode = 1;
  114. + else
  115. + sc->cache_trim_mode = 0;
  116. +
  117. + /*
  118. + * Prevent the reclaimer from falling into the cache trap: as
  119. + * cache pages start out inactive, every cache fault will tip
  120. + * the scan balance towards the file LRU. And as the file LRU
  121. + * shrinks, so does the window for rotation from references.
  122. + * This means we have a runaway feedback loop where a tiny
  123. + * thrashing file LRU becomes infinitely more attractive than
  124. + * anon pages. Try to detect this based on file LRU size.
  125. + */
  126. + if (!cgroup_reclaim(sc)) {
  127. + unsigned long total_high_wmark = 0;
  128. + unsigned long free, anon;
  129. + int z;
  130. +
  131. + free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
  132. + file = node_page_state(pgdat, NR_ACTIVE_FILE) +
  133. + node_page_state(pgdat, NR_INACTIVE_FILE);
  134. +
  135. + for (z = 0; z < MAX_NR_ZONES; z++) {
  136. + struct zone *zone = &pgdat->node_zones[z];
  137. +
  138. + if (!managed_zone(zone))
  139. + continue;
  140. +
  141. + total_high_wmark += high_wmark_pages(zone);
  142. + }
  143. +
  144. + /*
  145. + * Consider anon: if that's low too, this isn't a
  146. + * runaway file reclaim problem, but rather just
  147. + * extreme pressure. Reclaim as per usual then.
  148. + */
  149. + anon = node_page_state(pgdat, NR_INACTIVE_ANON);
  150. +
  151. + sc->file_is_tiny =
  152. + file + free <= total_high_wmark &&
  153. + !(sc->may_deactivate & DEACTIVATE_ANON) &&
  154. + anon >> sc->priority;
  155. + }
  156. +}
  157. +
  158. /*
  159. * Determine how aggressively the anon and file LRU lists should be
  160. * scanned. The relative value of each set of LRU lists is determined
  161. @@ -2965,109 +3068,16 @@ static void shrink_node(pg_data_t *pgdat
  162. unsigned long nr_reclaimed, nr_scanned;
  163. struct lruvec *target_lruvec;
  164. bool reclaimable = false;
  165. - unsigned long file;
  166. target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
  167. again:
  168. - /*
  169. - * Flush the memory cgroup stats, so that we read accurate per-memcg
  170. - * lruvec stats for heuristics.
  171. - */
  172. - mem_cgroup_flush_stats();
  173. -
  174. memset(&sc->nr, 0, sizeof(sc->nr));
  175. nr_reclaimed = sc->nr_reclaimed;
  176. nr_scanned = sc->nr_scanned;
  177. - /*
  178. - * Determine the scan balance between anon and file LRUs.
  179. - */
  180. - spin_lock_irq(&target_lruvec->lru_lock);
  181. - sc->anon_cost = target_lruvec->anon_cost;
  182. - sc->file_cost = target_lruvec->file_cost;
  183. - spin_unlock_irq(&target_lruvec->lru_lock);
  184. -
  185. - /*
  186. - * Target desirable inactive:active list ratios for the anon
  187. - * and file LRU lists.
  188. - */
  189. - if (!sc->force_deactivate) {
  190. - unsigned long refaults;
  191. -
  192. - refaults = lruvec_page_state(target_lruvec,
  193. - WORKINGSET_ACTIVATE_ANON);
  194. - if (refaults != target_lruvec->refaults[0] ||
  195. - inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
  196. - sc->may_deactivate |= DEACTIVATE_ANON;
  197. - else
  198. - sc->may_deactivate &= ~DEACTIVATE_ANON;
  199. -
  200. - /*
  201. - * When refaults are being observed, it means a new
  202. - * workingset is being established. Deactivate to get
  203. - * rid of any stale active pages quickly.
  204. - */
  205. - refaults = lruvec_page_state(target_lruvec,
  206. - WORKINGSET_ACTIVATE_FILE);
  207. - if (refaults != target_lruvec->refaults[1] ||
  208. - inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
  209. - sc->may_deactivate |= DEACTIVATE_FILE;
  210. - else
  211. - sc->may_deactivate &= ~DEACTIVATE_FILE;
  212. - } else
  213. - sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
  214. -
  215. - /*
  216. - * If we have plenty of inactive file pages that aren't
  217. - * thrashing, try to reclaim those first before touching
  218. - * anonymous pages.
  219. - */
  220. - file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
  221. - if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
  222. - sc->cache_trim_mode = 1;
  223. - else
  224. - sc->cache_trim_mode = 0;
  225. -
  226. - /*
  227. - * Prevent the reclaimer from falling into the cache trap: as
  228. - * cache pages start out inactive, every cache fault will tip
  229. - * the scan balance towards the file LRU. And as the file LRU
  230. - * shrinks, so does the window for rotation from references.
  231. - * This means we have a runaway feedback loop where a tiny
  232. - * thrashing file LRU becomes infinitely more attractive than
  233. - * anon pages. Try to detect this based on file LRU size.
  234. - */
  235. - if (!cgroup_reclaim(sc)) {
  236. - unsigned long total_high_wmark = 0;
  237. - unsigned long free, anon;
  238. - int z;
  239. -
  240. - free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
  241. - file = node_page_state(pgdat, NR_ACTIVE_FILE) +
  242. - node_page_state(pgdat, NR_INACTIVE_FILE);
  243. -
  244. - for (z = 0; z < MAX_NR_ZONES; z++) {
  245. - struct zone *zone = &pgdat->node_zones[z];
  246. - if (!managed_zone(zone))
  247. - continue;
  248. -
  249. - total_high_wmark += high_wmark_pages(zone);
  250. - }
  251. -
  252. - /*
  253. - * Consider anon: if that's low too, this isn't a
  254. - * runaway file reclaim problem, but rather just
  255. - * extreme pressure. Reclaim as per usual then.
  256. - */
  257. - anon = node_page_state(pgdat, NR_INACTIVE_ANON);
  258. -
  259. - sc->file_is_tiny =
  260. - file + free <= total_high_wmark &&
  261. - !(sc->may_deactivate & DEACTIVATE_ANON) &&
  262. - anon >> sc->priority;
  263. - }
  264. + prepare_scan_count(pgdat, sc);
  265. shrink_node_memcgs(pgdat, sc);