020-v6.3-21-mm-multi-gen-LRU-rename-lru_gen_struct-to-lru_gen_pa.patch 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. From 348fdbada9fb3f0bf1a53651be46319105af187f Mon Sep 17 00:00:00 2001
  2. From: Yu Zhao <[email protected]>
  3. Date: Wed, 21 Dec 2022 21:18:59 -0700
  4. Subject: [PATCH 21/29] mm: multi-gen LRU: rename lru_gen_struct to
  5. lru_gen_page
  6. Patch series "mm: multi-gen LRU: memcg LRU", v3.
  7. Overview
  8. ========
  9. An memcg LRU is a per-node LRU of memcgs. It is also an LRU of LRUs,
  10. since each node and memcg combination has an LRU of pages (see
  11. mem_cgroup_lruvec()).
  12. Its goal is to improve the scalability of global reclaim, which is
  13. critical to system-wide memory overcommit in data centers. Note that
  14. memcg reclaim is currently out of scope.
  15. Its memory bloat is a pointer to each lruvec and negligible to each
  16. pglist_data. In terms of traversing memcgs during global reclaim, it
  17. improves the best-case complexity from O(n) to O(1) and does not affect
  18. the worst-case complexity O(n). Therefore, on average, it has a sublinear
  19. complexity in contrast to the current linear complexity.
  20. The basic structure of an memcg LRU can be understood by an analogy to
  21. the active/inactive LRU (of pages):
  22. 1. It has the young and the old (generations), i.e., the counterparts
  23. to the active and the inactive;
  24. 2. The increment of max_seq triggers promotion, i.e., the counterpart
  25. to activation;
  26. 3. Other events trigger similar operations, e.g., offlining an memcg
  27. triggers demotion, i.e., the counterpart to deactivation.
  28. In terms of global reclaim, it has two distinct features:
  29. 1. Sharding, which allows each thread to start at a random memcg (in
  30. the old generation) and improves parallelism;
  31. 2. Eventual fairness, which allows direct reclaim to bail out at will
  32. and reduces latency without affecting fairness over some time.
  33. The commit message in patch 6 details the workflow:
  34. https://lore.kernel.org/r/[email protected]/
  35. The following is a simple test to quickly verify its effectiveness.
  36. Test design:
  37. 1. Create multiple memcgs.
  38. 2. Each memcg contains a job (fio).
  39. 3. All jobs access the same amount of memory randomly.
  40. 4. The system does not experience global memory pressure.
  41. 5. Periodically write to the root memory.reclaim.
  42. Desired outcome:
  43. 1. All memcgs have similar pgsteal counts, i.e., stddev(pgsteal)
  44. over mean(pgsteal) is close to 0%.
  45. 2. The total pgsteal is close to the total requested through
  46. memory.reclaim, i.e., sum(pgsteal) over sum(requested) is close
  47. to 100%.
  48. Actual outcome [1]:
  49. MGLRU off MGLRU on
  50. stddev(pgsteal) / mean(pgsteal) 75% 20%
  51. sum(pgsteal) / sum(requested) 425% 95%
  52. ####################################################################
  53. MEMCGS=128
  54. for ((memcg = 0; memcg < $MEMCGS; memcg++)); do
  55. mkdir /sys/fs/cgroup/memcg$memcg
  56. done
  57. start() {
  58. echo $BASHPID > /sys/fs/cgroup/memcg$memcg/cgroup.procs
  59. fio -name=memcg$memcg --numjobs=1 --ioengine=mmap \
  60. --filename=/dev/zero --size=1920M --rw=randrw \
  61. --rate=64m,64m --random_distribution=random \
  62. --fadvise_hint=0 --time_based --runtime=10h \
  63. --group_reporting --minimal
  64. }
  65. for ((memcg = 0; memcg < $MEMCGS; memcg++)); do
  66. start &
  67. done
  68. sleep 600
  69. for ((i = 0; i < 600; i++)); do
  70. echo 256m >/sys/fs/cgroup/memory.reclaim
  71. sleep 6
  72. done
  73. for ((memcg = 0; memcg < $MEMCGS; memcg++)); do
  74. grep "pgsteal " /sys/fs/cgroup/memcg$memcg/memory.stat
  75. done
  76. ####################################################################
  77. [1]: This was obtained from running the above script (touches less
  78. than 256GB memory) on an EPYC 7B13 with 512GB DRAM for over an
  79. hour.
  80. This patch (of 8):
  81. The new name lru_gen_page will be more distinct from the coming
  82. lru_gen_memcg.
  83. Link: https://lkml.kernel.org/r/[email protected]
  84. Link: https://lkml.kernel.org/r/[email protected]
  85. Signed-off-by: Yu Zhao <[email protected]>
  86. Cc: Johannes Weiner <[email protected]>
  87. Cc: Jonathan Corbet <[email protected]>
  88. Cc: Michael Larabel <[email protected]>
  89. Cc: Michal Hocko <[email protected]>
  90. Cc: Mike Rapoport <[email protected]>
  91. Cc: Roman Gushchin <[email protected]>
  92. Cc: Suren Baghdasaryan <[email protected]>
  93. Signed-off-by: Andrew Morton <[email protected]>
  94. ---
  95. include/linux/mm_inline.h | 4 ++--
  96. include/linux/mmzone.h | 6 +++---
  97. mm/vmscan.c | 34 +++++++++++++++++-----------------
  98. mm/workingset.c | 4 ++--
  99. 4 files changed, 24 insertions(+), 24 deletions(-)
  100. --- a/include/linux/mm_inline.h
  101. +++ b/include/linux/mm_inline.h
  102. @@ -168,7 +168,7 @@ static inline void lru_gen_update_size(s
  103. int zone = page_zonenum(page);
  104. int delta = thp_nr_pages(page);
  105. enum lru_list lru = type * LRU_INACTIVE_FILE;
  106. - struct lru_gen_struct *lrugen = &lruvec->lrugen;
  107. + struct lru_gen_page *lrugen = &lruvec->lrugen;
  108. VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
  109. VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
  110. @@ -214,7 +214,7 @@ static inline bool lru_gen_add_page(stru
  111. int gen = page_lru_gen(page);
  112. int type = page_is_file_lru(page);
  113. int zone = page_zonenum(page);
  114. - struct lru_gen_struct *lrugen = &lruvec->lrugen;
  115. + struct lru_gen_page *lrugen = &lruvec->lrugen;
  116. VM_WARN_ON_ONCE_PAGE(gen != -1, page);
  117. --- a/include/linux/mmzone.h
  118. +++ b/include/linux/mmzone.h
  119. @@ -394,7 +394,7 @@ enum {
  120. * The number of pages in each generation is eventually consistent and therefore
  121. * can be transiently negative when reset_batch_size() is pending.
  122. */
  123. -struct lru_gen_struct {
  124. +struct lru_gen_page {
  125. /* the aging increments the youngest generation number */
  126. unsigned long max_seq;
  127. /* the eviction increments the oldest generation numbers */
  128. @@ -451,7 +451,7 @@ struct lru_gen_mm_state {
  129. struct lru_gen_mm_walk {
  130. /* the lruvec under reclaim */
  131. struct lruvec *lruvec;
  132. - /* unstable max_seq from lru_gen_struct */
  133. + /* unstable max_seq from lru_gen_page */
  134. unsigned long max_seq;
  135. /* the next address within an mm to scan */
  136. unsigned long next_addr;
  137. @@ -514,7 +514,7 @@ struct lruvec {
  138. unsigned long flags;
  139. #ifdef CONFIG_LRU_GEN
  140. /* evictable pages divided into generations */
  141. - struct lru_gen_struct lrugen;
  142. + struct lru_gen_page lrugen;
  143. /* to concurrently iterate lru_gen_mm_list */
  144. struct lru_gen_mm_state mm_state;
  145. #endif
  146. --- a/mm/vmscan.c
  147. +++ b/mm/vmscan.c
  148. @@ -2910,7 +2910,7 @@ static int get_nr_gens(struct lruvec *lr
  149. static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
  150. {
  151. - /* see the comment on lru_gen_struct */
  152. + /* see the comment on lru_gen_page */
  153. return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS &&
  154. get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) &&
  155. get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS;
  156. @@ -3316,7 +3316,7 @@ struct ctrl_pos {
  157. static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
  158. struct ctrl_pos *pos)
  159. {
  160. - struct lru_gen_struct *lrugen = &lruvec->lrugen;
  161. + struct lru_gen_page *lrugen = &lruvec->lrugen;
  162. int hist = lru_hist_from_seq(lrugen->min_seq[type]);
  163. pos->refaulted = lrugen->avg_refaulted[type][tier] +
  164. @@ -3331,7 +3331,7 @@ static void read_ctrl_pos(struct lruvec
  165. static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
  166. {
  167. int hist, tier;
  168. - struct lru_gen_struct *lrugen = &lruvec->lrugen;
  169. + struct lru_gen_page *lrugen = &lruvec->lrugen;
  170. bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1;
  171. unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1;
  172. @@ -3408,7 +3408,7 @@ static int page_update_gen(struct page *
  173. static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaiming)
  174. {
  175. int type = page_is_file_lru(page);
  176. - struct lru_gen_struct *lrugen = &lruvec->lrugen;
  177. + struct lru_gen_page *lrugen = &lruvec->lrugen;
  178. int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
  179. unsigned long new_flags, old_flags = READ_ONCE(page->flags);
  180. @@ -3453,7 +3453,7 @@ static void update_batch_size(struct lru
  181. static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk)
  182. {
  183. int gen, type, zone;
  184. - struct lru_gen_struct *lrugen = &lruvec->lrugen;
  185. + struct lru_gen_page *lrugen = &lruvec->lrugen;
  186. walk->batched = 0;
  187. @@ -3979,7 +3979,7 @@ static bool inc_min_seq(struct lruvec *l
  188. {
  189. int zone;
  190. int remaining = MAX_LRU_BATCH;
  191. - struct lru_gen_struct *lrugen = &lruvec->lrugen;
  192. + struct lru_gen_page *lrugen = &lruvec->lrugen;
  193. int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
  194. if (type == LRU_GEN_ANON && !can_swap)
  195. @@ -4015,7 +4015,7 @@ static bool try_to_inc_min_seq(struct lr
  196. {
  197. int gen, type, zone;
  198. bool success = false;
  199. - struct lru_gen_struct *lrugen = &lruvec->lrugen;
  200. + struct lru_gen_page *lrugen = &lruvec->lrugen;
  201. DEFINE_MIN_SEQ(lruvec);
  202. VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
  203. @@ -4036,7 +4036,7 @@ next:
  204. ;
  205. }
  206. - /* see the comment on lru_gen_struct */
  207. + /* see the comment on lru_gen_page */
  208. if (can_swap) {
  209. min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]);
  210. min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]);
  211. @@ -4058,7 +4058,7 @@ static void inc_max_seq(struct lruvec *l
  212. {
  213. int prev, next;
  214. int type, zone;
  215. - struct lru_gen_struct *lrugen = &lruvec->lrugen;
  216. + struct lru_gen_page *lrugen = &lruvec->lrugen;
  217. spin_lock_irq(&lruvec->lru_lock);
  218. @@ -4116,7 +4116,7 @@ static bool try_to_inc_max_seq(struct lr
  219. bool success;
  220. struct lru_gen_mm_walk *walk;
  221. struct mm_struct *mm = NULL;
  222. - struct lru_gen_struct *lrugen = &lruvec->lrugen;
  223. + struct lru_gen_page *lrugen = &lruvec->lrugen;
  224. VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
  225. @@ -4181,7 +4181,7 @@ static bool should_run_aging(struct lruv
  226. unsigned long old = 0;
  227. unsigned long young = 0;
  228. unsigned long total = 0;
  229. - struct lru_gen_struct *lrugen = &lruvec->lrugen;
  230. + struct lru_gen_page *lrugen = &lruvec->lrugen;
  231. struct mem_cgroup *memcg = lruvec_memcg(lruvec);
  232. for (type = !can_swap; type < ANON_AND_FILE; type++) {
  233. @@ -4466,7 +4466,7 @@ static bool sort_page(struct lruvec *lru
  234. int delta = thp_nr_pages(page);
  235. int refs = page_lru_refs(page);
  236. int tier = lru_tier_from_refs(refs);
  237. - struct lru_gen_struct *lrugen = &lruvec->lrugen;
  238. + struct lru_gen_page *lrugen = &lruvec->lrugen;
  239. VM_WARN_ON_ONCE_PAGE(gen >= MAX_NR_GENS, page);
  240. @@ -4566,7 +4566,7 @@ static int scan_pages(struct lruvec *lru
  241. int scanned = 0;
  242. int isolated = 0;
  243. int remaining = MAX_LRU_BATCH;
  244. - struct lru_gen_struct *lrugen = &lruvec->lrugen;
  245. + struct lru_gen_page *lrugen = &lruvec->lrugen;
  246. struct mem_cgroup *memcg = lruvec_memcg(lruvec);
  247. VM_WARN_ON_ONCE(!list_empty(list));
  248. @@ -4967,7 +4967,7 @@ done:
  249. static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
  250. {
  251. - struct lru_gen_struct *lrugen = &lruvec->lrugen;
  252. + struct lru_gen_page *lrugen = &lruvec->lrugen;
  253. if (lrugen->enabled) {
  254. enum lru_list lru;
  255. @@ -5247,7 +5247,7 @@ static void lru_gen_seq_show_full(struct
  256. int i;
  257. int type, tier;
  258. int hist = lru_hist_from_seq(seq);
  259. - struct lru_gen_struct *lrugen = &lruvec->lrugen;
  260. + struct lru_gen_page *lrugen = &lruvec->lrugen;
  261. for (tier = 0; tier < MAX_NR_TIERS; tier++) {
  262. seq_printf(m, " %10d", tier);
  263. @@ -5296,7 +5296,7 @@ static int lru_gen_seq_show(struct seq_f
  264. unsigned long seq;
  265. bool full = !debugfs_real_fops(m->file)->write;
  266. struct lruvec *lruvec = v;
  267. - struct lru_gen_struct *lrugen = &lruvec->lrugen;
  268. + struct lru_gen_page *lrugen = &lruvec->lrugen;
  269. int nid = lruvec_pgdat(lruvec)->node_id;
  270. struct mem_cgroup *memcg = lruvec_memcg(lruvec);
  271. DEFINE_MAX_SEQ(lruvec);
  272. @@ -5549,7 +5549,7 @@ void lru_gen_init_lruvec(struct lruvec *
  273. {
  274. int i;
  275. int gen, type, zone;
  276. - struct lru_gen_struct *lrugen = &lruvec->lrugen;
  277. + struct lru_gen_page *lrugen = &lruvec->lrugen;
  278. lrugen->max_seq = MIN_NR_GENS + 1;
  279. lrugen->enabled = lru_gen_enabled();
  280. --- a/mm/workingset.c
  281. +++ b/mm/workingset.c
  282. @@ -223,7 +223,7 @@ static void *lru_gen_eviction(struct pag
  283. unsigned long token;
  284. unsigned long min_seq;
  285. struct lruvec *lruvec;
  286. - struct lru_gen_struct *lrugen;
  287. + struct lru_gen_page *lrugen;
  288. int type = page_is_file_lru(page);
  289. int delta = thp_nr_pages(page);
  290. int refs = page_lru_refs(page);
  291. @@ -252,7 +252,7 @@ static void lru_gen_refault(struct page
  292. unsigned long token;
  293. unsigned long min_seq;
  294. struct lruvec *lruvec;
  295. - struct lru_gen_struct *lrugen;
  296. + struct lru_gen_page *lrugen;
  297. struct mem_cgroup *memcg;
  298. struct pglist_data *pgdat;
  299. int type = page_is_file_lru(page);