020-v6.3-24-mm-multi-gen-LRU-remove-aging-fairness-safeguard.patch 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. From e20b7386fccc18c791796eb1dc1a91eee3ccf801 Mon Sep 17 00:00:00 2001
  2. From: Yu Zhao <[email protected]>
  3. Date: Wed, 21 Dec 2022 21:19:02 -0700
  4. Subject: [PATCH 24/29] mm: multi-gen LRU: remove aging fairness safeguard
  5. Recall that the aging produces the youngest generation: first it scans
  6. for accessed pages and updates their gen counters; then it increments
  7. lrugen->max_seq.
  8. The current aging fairness safeguard for kswapd uses two passes to
  9. ensure the fairness to multiple eligible memcgs. On the first pass,
  10. which is shared with the eviction, it checks whether all eligible
  11. memcgs are low on cold pages. If so, it requires a second pass, on
  12. which it ages all those memcgs at the same time.
  13. With memcg LRU, the aging, while ensuring eventual fairness, will run
  14. when necessary. Therefore the current aging fairness safeguard for
  15. kswapd will not be needed.
  16. Note that memcg LRU only applies to global reclaim. For memcg reclaim,
  17. the aging can be unfair to different memcgs, i.e., their
  18. lrugen->max_seq can be incremented at different paces.
  19. Link: https://lkml.kernel.org/r/[email protected]
  20. Signed-off-by: Yu Zhao <[email protected]>
  21. Cc: Johannes Weiner <[email protected]>
  22. Cc: Jonathan Corbet <[email protected]>
  23. Cc: Michael Larabel <[email protected]>
  24. Cc: Michal Hocko <[email protected]>
  25. Cc: Mike Rapoport <[email protected]>
  26. Cc: Roman Gushchin <[email protected]>
  27. Cc: Suren Baghdasaryan <[email protected]>
  28. Signed-off-by: Andrew Morton <[email protected]>
  29. ---
  30. mm/vmscan.c | 126 ++++++++++++++++++++++++----------------------------
  31. 1 file changed, 59 insertions(+), 67 deletions(-)
  32. --- a/mm/vmscan.c
  33. +++ b/mm/vmscan.c
  34. @@ -131,7 +131,6 @@ struct scan_control {
  35. #ifdef CONFIG_LRU_GEN
  36. /* help kswapd make better choices among multiple memcgs */
  37. - unsigned int memcgs_need_aging:1;
  38. unsigned long last_reclaimed;
  39. #endif
  40. @@ -4184,7 +4183,7 @@ done:
  41. return true;
  42. }
  43. -static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsigned long *min_seq,
  44. +static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
  45. struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
  46. {
  47. int gen, type, zone;
  48. @@ -4193,6 +4192,13 @@ static bool should_run_aging(struct lruv
  49. unsigned long total = 0;
  50. struct lru_gen_page *lrugen = &lruvec->lrugen;
  51. struct mem_cgroup *memcg = lruvec_memcg(lruvec);
  52. + DEFINE_MIN_SEQ(lruvec);
  53. +
  54. + /* whether this lruvec is completely out of cold pages */
  55. + if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) {
  56. + *nr_to_scan = 0;
  57. + return true;
  58. + }
  59. for (type = !can_swap; type < ANON_AND_FILE; type++) {
  60. unsigned long seq;
  61. @@ -4221,8 +4227,6 @@ static bool should_run_aging(struct lruv
  62. * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
  63. * ideal number of generations is MIN_NR_GENS+1.
  64. */
  65. - if (min_seq[!can_swap] + MIN_NR_GENS > max_seq)
  66. - return true;
  67. if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
  68. return false;
  69. @@ -4241,40 +4245,54 @@ static bool should_run_aging(struct lruv
  70. return false;
  71. }
  72. -static bool age_lruvec(struct lruvec *lruvec, struct scan_control *sc, unsigned long min_ttl)
  73. +static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
  74. {
  75. - bool need_aging;
  76. - unsigned long nr_to_scan;
  77. - int swappiness = get_swappiness(lruvec, sc);
  78. + int gen, type, zone;
  79. + unsigned long total = 0;
  80. + bool can_swap = get_swappiness(lruvec, sc);
  81. + struct lru_gen_page *lrugen = &lruvec->lrugen;
  82. struct mem_cgroup *memcg = lruvec_memcg(lruvec);
  83. DEFINE_MAX_SEQ(lruvec);
  84. DEFINE_MIN_SEQ(lruvec);
  85. - VM_WARN_ON_ONCE(sc->memcg_low_reclaim);
  86. + for (type = !can_swap; type < ANON_AND_FILE; type++) {
  87. + unsigned long seq;
  88. - mem_cgroup_calculate_protection(NULL, memcg);
  89. + for (seq = min_seq[type]; seq <= max_seq; seq++) {
  90. + gen = lru_gen_from_seq(seq);
  91. - if (mem_cgroup_below_min(memcg))
  92. - return false;
  93. + for (zone = 0; zone < MAX_NR_ZONES; zone++)
  94. + total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
  95. + }
  96. + }
  97. - need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, swappiness, &nr_to_scan);
  98. + /* whether the size is big enough to be helpful */
  99. + return mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
  100. +}
  101. - if (min_ttl) {
  102. - int gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]);
  103. - unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
  104. +static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc,
  105. + unsigned long min_ttl)
  106. +{
  107. + int gen;
  108. + unsigned long birth;
  109. + struct mem_cgroup *memcg = lruvec_memcg(lruvec);
  110. + DEFINE_MIN_SEQ(lruvec);
  111. - if (time_is_after_jiffies(birth + min_ttl))
  112. - return false;
  113. + VM_WARN_ON_ONCE(sc->memcg_low_reclaim);
  114. - /* the size is likely too small to be helpful */
  115. - if (!nr_to_scan && sc->priority != DEF_PRIORITY)
  116. - return false;
  117. - }
  118. + /* see the comment on lru_gen_page */
  119. + gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]);
  120. + birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
  121. - if (need_aging)
  122. - try_to_inc_max_seq(lruvec, max_seq, sc, swappiness, false);
  123. + if (time_is_after_jiffies(birth + min_ttl))
  124. + return false;
  125. - return true;
  126. + if (!lruvec_is_sizable(lruvec, sc))
  127. + return false;
  128. +
  129. + mem_cgroup_calculate_protection(NULL, memcg);
  130. +
  131. + return !mem_cgroup_below_min(memcg);
  132. }
  133. /* to protect the working set of the last N jiffies */
  134. @@ -4283,46 +4301,32 @@ static unsigned long lru_gen_min_ttl __r
  135. static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
  136. {
  137. struct mem_cgroup *memcg;
  138. - bool success = false;
  139. unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl);
  140. VM_WARN_ON_ONCE(!current_is_kswapd());
  141. sc->last_reclaimed = sc->nr_reclaimed;
  142. - /*
  143. - * To reduce the chance of going into the aging path, which can be
  144. - * costly, optimistically skip it if the flag below was cleared in the
  145. - * eviction path. This improves the overall performance when multiple
  146. - * memcgs are available.
  147. - */
  148. - if (!sc->memcgs_need_aging) {
  149. - sc->memcgs_need_aging = true;
  150. + /* check the order to exclude compaction-induced reclaim */
  151. + if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY)
  152. return;
  153. - }
  154. -
  155. - set_mm_walk(pgdat);
  156. memcg = mem_cgroup_iter(NULL, NULL, NULL);
  157. do {
  158. struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
  159. - if (age_lruvec(lruvec, sc, min_ttl))
  160. - success = true;
  161. + if (lruvec_is_reclaimable(lruvec, sc, min_ttl)) {
  162. + mem_cgroup_iter_break(NULL, memcg);
  163. + return;
  164. + }
  165. cond_resched();
  166. } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
  167. - clear_mm_walk();
  168. -
  169. - /* check the order to exclude compaction-induced reclaim */
  170. - if (success || !min_ttl || sc->order)
  171. - return;
  172. -
  173. /*
  174. * The main goal is to OOM kill if every generation from all memcgs is
  175. * younger than min_ttl. However, another possibility is all memcgs are
  176. - * either below min or empty.
  177. + * either too small or below min.
  178. */
  179. if (mutex_trylock(&oom_lock)) {
  180. struct oom_control oc = {
  181. @@ -4830,33 +4834,27 @@ retry:
  182. * reclaim.
  183. */
  184. static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
  185. - bool can_swap, bool *need_aging)
  186. + bool can_swap)
  187. {
  188. unsigned long nr_to_scan;
  189. struct mem_cgroup *memcg = lruvec_memcg(lruvec);
  190. DEFINE_MAX_SEQ(lruvec);
  191. - DEFINE_MIN_SEQ(lruvec);
  192. if (mem_cgroup_below_min(memcg) ||
  193. (mem_cgroup_below_low(memcg) && !sc->memcg_low_reclaim))
  194. return 0;
  195. - *need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, can_swap, &nr_to_scan);
  196. - if (!*need_aging)
  197. + if (!should_run_aging(lruvec, max_seq, sc, can_swap, &nr_to_scan))
  198. return nr_to_scan;
  199. /* skip the aging path at the default priority */
  200. if (sc->priority == DEF_PRIORITY)
  201. - goto done;
  202. + return nr_to_scan;
  203. - /* leave the work to lru_gen_age_node() */
  204. - if (current_is_kswapd())
  205. - return 0;
  206. + try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false);
  207. - if (try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false))
  208. - return nr_to_scan;
  209. -done:
  210. - return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
  211. + /* skip this lruvec as it's low on cold pages */
  212. + return 0;
  213. }
  214. static unsigned long get_nr_to_reclaim(struct scan_control *sc)
  215. @@ -4875,9 +4873,7 @@ static unsigned long get_nr_to_reclaim(s
  216. static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
  217. {
  218. struct blk_plug plug;
  219. - bool need_aging = false;
  220. unsigned long scanned = 0;
  221. - unsigned long reclaimed = sc->nr_reclaimed;
  222. unsigned long nr_to_reclaim = get_nr_to_reclaim(sc);
  223. lru_add_drain();
  224. @@ -4898,13 +4894,13 @@ static void lru_gen_shrink_lruvec(struct
  225. else
  226. swappiness = 0;
  227. - nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness, &need_aging);
  228. + nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
  229. if (!nr_to_scan)
  230. - goto done;
  231. + break;
  232. delta = evict_pages(lruvec, sc, swappiness);
  233. if (!delta)
  234. - goto done;
  235. + break;
  236. scanned += delta;
  237. if (scanned >= nr_to_scan)
  238. @@ -4916,10 +4912,6 @@ static void lru_gen_shrink_lruvec(struct
  239. cond_resched();
  240. }
  241. - /* see the comment in lru_gen_age_node() */
  242. - if (sc->nr_reclaimed - reclaimed >= MIN_LRU_BATCH && !need_aging)
  243. - sc->memcgs_need_aging = false;
  244. -done:
  245. clear_mm_walk();
  246. blk_finish_plug(&plug);