020-v6.1-06-mm-multi-gen-LRU-minimal-implementation.patch 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447
  1. From b564b9471cd60ef1ee3961a224898ce4a9620d84 Mon Sep 17 00:00:00 2001
  2. From: Yu Zhao <[email protected]>
  3. Date: Sun, 18 Sep 2022 02:00:03 -0600
  4. Subject: [PATCH 06/29] mm: multi-gen LRU: minimal implementation
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. To avoid confusion, the terms "promotion" and "demotion" will be applied
  9. to the multi-gen LRU, as a new convention; the terms "activation" and
  10. "deactivation" will be applied to the active/inactive LRU, as usual.
  11. The aging produces young generations. Given an lruvec, it increments
  12. max_seq when max_seq-min_seq+1 approaches MIN_NR_GENS. The aging promotes
  13. hot pages to the youngest generation when it finds them accessed through
  14. page tables; the demotion of cold pages happens consequently when it
  15. increments max_seq. Promotion in the aging path does not involve any LRU
  16. list operations, only the updates of the gen counter and
  17. lrugen->nr_pages[]; demotion, unless as the result of the increment of
  18. max_seq, requires LRU list operations, e.g., lru_deactivate_fn(). The
  19. aging has the complexity O(nr_hot_pages), since it is only interested in
  20. hot pages.
  21. The eviction consumes old generations. Given an lruvec, it increments
  22. min_seq when lrugen->lists[] indexed by min_seq%MAX_NR_GENS becomes empty.
  23. A feedback loop modeled after the PID controller monitors refaults over
  24. anon and file types and decides which type to evict when both types are
  25. available from the same generation.
  26. The protection of pages accessed multiple times through file descriptors
  27. takes place in the eviction path. Each generation is divided into
  28. multiple tiers. A page accessed N times through file descriptors is in
  29. tier order_base_2(N). Tiers do not have dedicated lrugen->lists[], only
  30. bits in page->flags. The aforementioned feedback loop also monitors
  31. refaults over all tiers and decides when to protect pages in which tiers
  32. (N>1), using the first tier (N=0,1) as a baseline. The first tier
  33. contains single-use unmapped clean pages, which are most likely the best
  34. choices. In contrast to promotion in the aging path, the protection of a
  35. page in the eviction path is achieved by moving this page to the next
  36. generation, i.e., min_seq+1, if the feedback loop decides so. This
  37. approach has the following advantages:
  38. 1. It removes the cost of activation in the buffered access path by
  39. inferring whether pages accessed multiple times through file
  40. descriptors are statistically hot and thus worth protecting in the
  41. eviction path.
  42. 2. It takes pages accessed through page tables into account and avoids
  43. overprotecting pages accessed multiple times through file
  44. descriptors. (Pages accessed through page tables are in the first
  45. tier, since N=0.)
  46. 3. More tiers provide better protection for pages accessed more than
  47. twice through file descriptors, when under heavy buffered I/O
  48. workloads.
  49. Server benchmark results:
  50. Single workload:
  51. fio (buffered I/O): +[30, 32]%
  52. IOPS BW
  53. 5.19-rc1: 2673k 10.2GiB/s
  54. patch1-6: 3491k 13.3GiB/s
  55. Single workload:
  56. memcached (anon): -[4, 6]%
  57. Ops/sec KB/sec
  58. 5.19-rc1: 1161501.04 45177.25
  59. patch1-6: 1106168.46 43025.04
  60. Configurations:
  61. CPU: two Xeon 6154
  62. Mem: total 256G
  63. Node 1 was only used as a ram disk to reduce the variance in the
  64. results.
  65. patch drivers/block/brd.c <<EOF
  66. 99,100c99,100
  67. < gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
  68. < page = alloc_page(gfp_flags);
  69. ---
  70. > gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM | __GFP_THISNODE;
  71. > page = alloc_pages_node(1, gfp_flags, 0);
  72. EOF
  73. cat >>/etc/systemd/system.conf <<EOF
  74. CPUAffinity=numa
  75. NUMAPolicy=bind
  76. NUMAMask=0
  77. EOF
  78. cat >>/etc/memcached.conf <<EOF
  79. -m 184320
  80. -s /var/run/memcached/memcached.sock
  81. -a 0766
  82. -t 36
  83. -B binary
  84. EOF
  85. cat fio.sh
  86. modprobe brd rd_nr=1 rd_size=113246208
  87. swapoff -a
  88. mkfs.ext4 /dev/ram0
  89. mount -t ext4 /dev/ram0 /mnt
  90. mkdir /sys/fs/cgroup/user.slice/test
  91. echo 38654705664 >/sys/fs/cgroup/user.slice/test/memory.max
  92. echo $$ >/sys/fs/cgroup/user.slice/test/cgroup.procs
  93. fio -name=mglru --numjobs=72 --directory=/mnt --size=1408m \
  94. --buffered=1 --ioengine=io_uring --iodepth=128 \
  95. --iodepth_batch_submit=32 --iodepth_batch_complete=32 \
  96. --rw=randread --random_distribution=random --norandommap \
  97. --time_based --ramp_time=10m --runtime=5m --group_reporting
  98. cat memcached.sh
  99. modprobe brd rd_nr=1 rd_size=113246208
  100. swapoff -a
  101. mkswap /dev/ram0
  102. swapon /dev/ram0
  103. memtier_benchmark -S /var/run/memcached/memcached.sock \
  104. -P memcache_binary -n allkeys --key-minimum=1 \
  105. --key-maximum=65000000 --key-pattern=P:P -c 1 -t 36 \
  106. --ratio 1:0 --pipeline 8 -d 2000
  107. memtier_benchmark -S /var/run/memcached/memcached.sock \
  108. -P memcache_binary -n allkeys --key-minimum=1 \
  109. --key-maximum=65000000 --key-pattern=R:R -c 1 -t 36 \
  110. --ratio 0:1 --pipeline 8 --randomize --distinct-client-seed
  111. Client benchmark results:
  112. kswapd profiles:
  113. 5.19-rc1
  114. 40.33% page_vma_mapped_walk (overhead)
  115. 21.80% lzo1x_1_do_compress (real work)
  116. 7.53% do_raw_spin_lock
  117. 3.95% _raw_spin_unlock_irq
  118. 2.52% vma_interval_tree_iter_next
  119. 2.37% page_referenced_one
  120. 2.28% vma_interval_tree_subtree_search
  121. 1.97% anon_vma_interval_tree_iter_first
  122. 1.60% ptep_clear_flush
  123. 1.06% __zram_bvec_write
  124. patch1-6
  125. 39.03% lzo1x_1_do_compress (real work)
  126. 18.47% page_vma_mapped_walk (overhead)
  127. 6.74% _raw_spin_unlock_irq
  128. 3.97% do_raw_spin_lock
  129. 2.49% ptep_clear_flush
  130. 2.48% anon_vma_interval_tree_iter_first
  131. 1.92% page_referenced_one
  132. 1.88% __zram_bvec_write
  133. 1.48% memmove
  134. 1.31% vma_interval_tree_iter_next
  135. Configurations:
  136. CPU: single Snapdragon 7c
  137. Mem: total 4G
  138. ChromeOS MemoryPressure [1]
  139. [1] https://chromium.googlesource.com/chromiumos/platform/tast-tests/
  140. Link: https://lkml.kernel.org/r/[email protected]
  141. Signed-off-by: Yu Zhao <[email protected]>
  142. Acked-by: Brian Geffon <[email protected]>
  143. Acked-by: Jan Alexander Steffens (heftig) <[email protected]>
  144. Acked-by: Oleksandr Natalenko <[email protected]>
  145. Acked-by: Steven Barrett <[email protected]>
  146. Acked-by: Suleiman Souhlal <[email protected]>
  147. Tested-by: Daniel Byrne <[email protected]>
  148. Tested-by: Donald Carr <[email protected]>
  149. Tested-by: Holger Hoffstätte <[email protected]>
  150. Tested-by: Konstantin Kharlamov <[email protected]>
  151. Tested-by: Shuang Zhai <[email protected]>
  152. Tested-by: Sofia Trinh <[email protected]>
  153. Tested-by: Vaibhav Jain <[email protected]>
  154. Cc: Andi Kleen <[email protected]>
  155. Cc: Aneesh Kumar K.V <[email protected]>
  156. Cc: Barry Song <[email protected]>
  157. Cc: Catalin Marinas <[email protected]>
  158. Cc: Dave Hansen <[email protected]>
  159. Cc: Hillf Danton <[email protected]>
  160. Cc: Jens Axboe <[email protected]>
  161. Cc: Johannes Weiner <[email protected]>
  162. Cc: Jonathan Corbet <[email protected]>
  163. Cc: Linus Torvalds <[email protected]>
  164. Cc: Matthew Wilcox <[email protected]>
  165. Cc: Mel Gorman <[email protected]>
  166. Cc: Miaohe Lin <[email protected]>
  167. Cc: Michael Larabel <[email protected]>
  168. Cc: Michal Hocko <[email protected]>
  169. Cc: Mike Rapoport <[email protected]>
  170. Cc: Mike Rapoport <[email protected]>
  171. Cc: Peter Zijlstra <[email protected]>
  172. Cc: Qi Zheng <[email protected]>
  173. Cc: Tejun Heo <[email protected]>
  174. Cc: Vlastimil Babka <[email protected]>
  175. Cc: Will Deacon <[email protected]>
  176. Signed-off-by: Andrew Morton <[email protected]>
  177. ---
  178. include/linux/mm_inline.h | 36 ++
  179. include/linux/mmzone.h | 41 ++
  180. include/linux/page-flags-layout.h | 5 +-
  181. kernel/bounds.c | 2 +
  182. mm/Kconfig | 11 +
  183. mm/swap.c | 39 ++
  184. mm/vmscan.c | 792 +++++++++++++++++++++++++++++-
  185. mm/workingset.c | 110 ++++-
  186. 8 files changed, 1025 insertions(+), 11 deletions(-)
  187. --- a/include/linux/mm_inline.h
  188. +++ b/include/linux/mm_inline.h
  189. @@ -106,6 +106,33 @@ static inline int lru_gen_from_seq(unsig
  190. return seq % MAX_NR_GENS;
  191. }
  192. +static inline int lru_hist_from_seq(unsigned long seq)
  193. +{
  194. + return seq % NR_HIST_GENS;
  195. +}
  196. +
  197. +static inline int lru_tier_from_refs(int refs)
  198. +{
  199. + VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
  200. +
  201. + /* see the comment in page_lru_refs() */
  202. + return order_base_2(refs + 1);
  203. +}
  204. +
  205. +static inline int page_lru_refs(struct page *page)
  206. +{
  207. + unsigned long flags = READ_ONCE(page->flags);
  208. + bool workingset = flags & BIT(PG_workingset);
  209. +
  210. + /*
  211. + * Return the number of accesses beyond PG_referenced, i.e., N-1 if the
  212. + * total number of accesses is N>1, since N=0,1 both map to the first
  213. + * tier. lru_tier_from_refs() will account for this off-by-one. Also see
  214. + * the comment on MAX_NR_TIERS.
  215. + */
  216. + return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset;
  217. +}
  218. +
  219. static inline int page_lru_gen(struct page *page)
  220. {
  221. unsigned long flags = READ_ONCE(page->flags);
  222. @@ -158,6 +185,15 @@ static inline void lru_gen_update_size(s
  223. __update_lru_size(lruvec, lru, zone, -delta);
  224. return;
  225. }
  226. +
  227. + /* promotion */
  228. + if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
  229. + __update_lru_size(lruvec, lru, zone, -delta);
  230. + __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
  231. + }
  232. +
  233. + /* demotion requires isolation, e.g., lru_deactivate_fn() */
  234. + VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
  235. }
  236. static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bool reclaiming)
  237. --- a/include/linux/mmzone.h
  238. +++ b/include/linux/mmzone.h
  239. @@ -327,6 +327,28 @@ enum lruvec_flags {
  240. #define MIN_NR_GENS 2U
  241. #define MAX_NR_GENS 4U
  242. +/*
  243. + * Each generation is divided into multiple tiers. A page accessed N times
  244. + * through file descriptors is in tier order_base_2(N). A page in the first tier
  245. + * (N=0,1) is marked by PG_referenced unless it was faulted in through page
  246. + * tables or read ahead. A page in any other tier (N>1) is marked by
  247. + * PG_referenced and PG_workingset. This implies a minimum of two tiers is
  248. + * supported without using additional bits in page->flags.
  249. + *
  250. + * In contrast to moving across generations which requires the LRU lock, moving
  251. + * across tiers only involves atomic operations on page->flags and therefore
  252. + * has a negligible cost in the buffered access path. In the eviction path,
  253. + * comparisons of refaulted/(evicted+protected) from the first tier and the
  254. + * rest infer whether pages accessed multiple times through file descriptors
  255. + * are statistically hot and thus worth protecting.
  256. + *
  257. + * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the
  258. + * number of categories of the active/inactive LRU when keeping track of
  259. + * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in
  260. + * page->flags.
  261. + */
  262. +#define MAX_NR_TIERS 4U
  263. +
  264. #ifndef __GENERATING_BOUNDS_H
  265. struct lruvec;
  266. @@ -341,6 +363,16 @@ enum {
  267. LRU_GEN_FILE,
  268. };
  269. +#define MIN_LRU_BATCH BITS_PER_LONG
  270. +#define MAX_LRU_BATCH (MIN_LRU_BATCH * 64)
  271. +
  272. +/* whether to keep historical stats from evicted generations */
  273. +#ifdef CONFIG_LRU_GEN_STATS
  274. +#define NR_HIST_GENS MAX_NR_GENS
  275. +#else
  276. +#define NR_HIST_GENS 1U
  277. +#endif
  278. +
  279. /*
  280. * The youngest generation number is stored in max_seq for both anon and file
  281. * types as they are aged on an equal footing. The oldest generation numbers are
  282. @@ -363,6 +395,15 @@ struct lru_gen_struct {
  283. struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
  284. /* the multi-gen LRU sizes, eventually consistent */
  285. long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
  286. + /* the exponential moving average of refaulted */
  287. + unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS];
  288. + /* the exponential moving average of evicted+protected */
  289. + unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS];
  290. + /* the first tier doesn't need protection, hence the minus one */
  291. + unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1];
  292. + /* can be modified without holding the LRU lock */
  293. + atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
  294. + atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
  295. };
  296. void lru_gen_init_lruvec(struct lruvec *lruvec);
  297. --- a/include/linux/page-flags-layout.h
  298. +++ b/include/linux/page-flags-layout.h
  299. @@ -106,7 +106,10 @@
  300. #error "Not enough bits in page flags"
  301. #endif
  302. -#define LRU_REFS_WIDTH 0
  303. +/* see the comment on MAX_NR_TIERS */
  304. +#define LRU_REFS_WIDTH min(__LRU_REFS_WIDTH, BITS_PER_LONG - NR_PAGEFLAGS - \
  305. + ZONES_WIDTH - LRU_GEN_WIDTH - SECTIONS_WIDTH - \
  306. + NODES_WIDTH - KASAN_TAG_WIDTH - LAST_CPUPID_WIDTH)
  307. #endif
  308. #endif /* _LINUX_PAGE_FLAGS_LAYOUT */
  309. --- a/kernel/bounds.c
  310. +++ b/kernel/bounds.c
  311. @@ -24,8 +24,10 @@ int main(void)
  312. DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
  313. #ifdef CONFIG_LRU_GEN
  314. DEFINE(LRU_GEN_WIDTH, order_base_2(MAX_NR_GENS + 1));
  315. + DEFINE(__LRU_REFS_WIDTH, MAX_NR_TIERS - 2);
  316. #else
  317. DEFINE(LRU_GEN_WIDTH, 0);
  318. + DEFINE(__LRU_REFS_WIDTH, 0);
  319. #endif
  320. /* End of constants */
  321. --- a/mm/Kconfig
  322. +++ b/mm/Kconfig
  323. @@ -897,6 +897,7 @@ config IO_MAPPING
  324. config SECRETMEM
  325. def_bool ARCH_HAS_SET_DIRECT_MAP && !EMBEDDED
  326. +# multi-gen LRU {
  327. config LRU_GEN
  328. bool "Multi-Gen LRU"
  329. depends on MMU
  330. @@ -905,6 +906,16 @@ config LRU_GEN
  331. help
  332. A high performance LRU implementation to overcommit memory.
  333. +config LRU_GEN_STATS
  334. + bool "Full stats for debugging"
  335. + depends on LRU_GEN
  336. + help
  337. + Do not enable this option unless you plan to look at historical stats
  338. + from evicted generations for debugging purpose.
  339. +
  340. + This option has a per-memcg and per-node memory overhead.
  341. +# }
  342. +
  343. source "mm/damon/Kconfig"
  344. endmenu
  345. --- a/mm/swap.c
  346. +++ b/mm/swap.c
  347. @@ -389,6 +389,40 @@ static void __lru_cache_activate_page(st
  348. local_unlock(&lru_pvecs.lock);
  349. }
  350. +#ifdef CONFIG_LRU_GEN
  351. +static void page_inc_refs(struct page *page)
  352. +{
  353. + unsigned long new_flags, old_flags = READ_ONCE(page->flags);
  354. +
  355. + if (PageUnevictable(page))
  356. + return;
  357. +
  358. + if (!PageReferenced(page)) {
  359. + SetPageReferenced(page);
  360. + return;
  361. + }
  362. +
  363. + if (!PageWorkingset(page)) {
  364. + SetPageWorkingset(page);
  365. + return;
  366. + }
  367. +
  368. + /* see the comment on MAX_NR_TIERS */
  369. + do {
  370. + new_flags = old_flags & LRU_REFS_MASK;
  371. + if (new_flags == LRU_REFS_MASK)
  372. + break;
  373. +
  374. + new_flags += BIT(LRU_REFS_PGOFF);
  375. + new_flags |= old_flags & ~LRU_REFS_MASK;
  376. + } while (!try_cmpxchg(&page->flags, &old_flags, new_flags));
  377. +}
  378. +#else
  379. +static void page_inc_refs(struct page *page)
  380. +{
  381. +}
  382. +#endif /* CONFIG_LRU_GEN */
  383. +
  384. /*
  385. * Mark a page as having seen activity.
  386. *
  387. @@ -403,6 +437,11 @@ void mark_page_accessed(struct page *pag
  388. {
  389. page = compound_head(page);
  390. + if (lru_gen_enabled()) {
  391. + page_inc_refs(page);
  392. + return;
  393. + }
  394. +
  395. if (!PageReferenced(page)) {
  396. SetPageReferenced(page);
  397. } else if (PageUnevictable(page)) {
  398. --- a/mm/vmscan.c
  399. +++ b/mm/vmscan.c
  400. @@ -1142,9 +1142,11 @@ static int __remove_mapping(struct addre
  401. if (PageSwapCache(page)) {
  402. swp_entry_t swap = { .val = page_private(page) };
  403. - mem_cgroup_swapout(page, swap);
  404. +
  405. + /* get a shadow entry before mem_cgroup_swapout() clears page_memcg() */
  406. if (reclaimed && !mapping_exiting(mapping))
  407. shadow = workingset_eviction(page, target_memcg);
  408. + mem_cgroup_swapout(page, swap);
  409. __delete_from_swap_cache(page, swap, shadow);
  410. xa_unlock_irq(&mapping->i_pages);
  411. put_swap_page(page, swap);
  412. @@ -2502,6 +2504,9 @@ static void prepare_scan_count(pg_data_t
  413. unsigned long file;
  414. struct lruvec *target_lruvec;
  415. + if (lru_gen_enabled())
  416. + return;
  417. +
  418. target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
  419. /*
  420. @@ -2827,6 +2832,17 @@ static bool can_age_anon_pages(struct pg
  421. * shorthand helpers
  422. ******************************************************************************/
  423. +#define LRU_REFS_FLAGS (BIT(PG_referenced) | BIT(PG_workingset))
  424. +
  425. +#define DEFINE_MAX_SEQ(lruvec) \
  426. + unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
  427. +
  428. +#define DEFINE_MIN_SEQ(lruvec) \
  429. + unsigned long min_seq[ANON_AND_FILE] = { \
  430. + READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \
  431. + READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
  432. + }
  433. +
  434. #define for_each_gen_type_zone(gen, type, zone) \
  435. for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \
  436. for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
  437. @@ -2852,6 +2868,745 @@ static struct lruvec __maybe_unused *get
  438. return pgdat ? &pgdat->__lruvec : NULL;
  439. }
  440. +static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc)
  441. +{
  442. + struct mem_cgroup *memcg = lruvec_memcg(lruvec);
  443. + struct pglist_data *pgdat = lruvec_pgdat(lruvec);
  444. +
  445. + if (!can_demote(pgdat->node_id, sc) &&
  446. + mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH)
  447. + return 0;
  448. +
  449. + return mem_cgroup_swappiness(memcg);
  450. +}
  451. +
  452. +static int get_nr_gens(struct lruvec *lruvec, int type)
  453. +{
  454. + return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1;
  455. +}
  456. +
  457. +static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
  458. +{
  459. + /* see the comment on lru_gen_struct */
  460. + return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS &&
  461. + get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) &&
  462. + get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS;
  463. +}
  464. +
  465. +/******************************************************************************
  466. + * refault feedback loop
  467. + ******************************************************************************/
  468. +
  469. +/*
  470. + * A feedback loop based on Proportional-Integral-Derivative (PID) controller.
  471. + *
  472. + * The P term is refaulted/(evicted+protected) from a tier in the generation
  473. + * currently being evicted; the I term is the exponential moving average of the
  474. + * P term over the generations previously evicted, using the smoothing factor
  475. + * 1/2; the D term isn't supported.
  476. + *
  477. + * The setpoint (SP) is always the first tier of one type; the process variable
  478. + * (PV) is either any tier of the other type or any other tier of the same
  479. + * type.
  480. + *
  481. + * The error is the difference between the SP and the PV; the correction is to
  482. + * turn off protection when SP>PV or turn on protection when SP<PV.
  483. + *
  484. + * For future optimizations:
  485. + * 1. The D term may discount the other two terms over time so that long-lived
  486. + * generations can resist stale information.
  487. + */
  488. +struct ctrl_pos {
  489. + unsigned long refaulted;
  490. + unsigned long total;
  491. + int gain;
  492. +};
  493. +
  494. +static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
  495. + struct ctrl_pos *pos)
  496. +{
  497. + struct lru_gen_struct *lrugen = &lruvec->lrugen;
  498. + int hist = lru_hist_from_seq(lrugen->min_seq[type]);
  499. +
  500. + pos->refaulted = lrugen->avg_refaulted[type][tier] +
  501. + atomic_long_read(&lrugen->refaulted[hist][type][tier]);
  502. + pos->total = lrugen->avg_total[type][tier] +
  503. + atomic_long_read(&lrugen->evicted[hist][type][tier]);
  504. + if (tier)
  505. + pos->total += lrugen->protected[hist][type][tier - 1];
  506. + pos->gain = gain;
  507. +}
  508. +
  509. +static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
  510. +{
  511. + int hist, tier;
  512. + struct lru_gen_struct *lrugen = &lruvec->lrugen;
  513. + bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1;
  514. + unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1;
  515. +
  516. + lockdep_assert_held(&lruvec->lru_lock);
  517. +
  518. + if (!carryover && !clear)
  519. + return;
  520. +
  521. + hist = lru_hist_from_seq(seq);
  522. +
  523. + for (tier = 0; tier < MAX_NR_TIERS; tier++) {
  524. + if (carryover) {
  525. + unsigned long sum;
  526. +
  527. + sum = lrugen->avg_refaulted[type][tier] +
  528. + atomic_long_read(&lrugen->refaulted[hist][type][tier]);
  529. + WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2);
  530. +
  531. + sum = lrugen->avg_total[type][tier] +
  532. + atomic_long_read(&lrugen->evicted[hist][type][tier]);
  533. + if (tier)
  534. + sum += lrugen->protected[hist][type][tier - 1];
  535. + WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2);
  536. + }
  537. +
  538. + if (clear) {
  539. + atomic_long_set(&lrugen->refaulted[hist][type][tier], 0);
  540. + atomic_long_set(&lrugen->evicted[hist][type][tier], 0);
  541. + if (tier)
  542. + WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0);
  543. + }
  544. + }
  545. +}
  546. +
  547. +static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv)
  548. +{
  549. + /*
  550. + * Return true if the PV has a limited number of refaults or a lower
  551. + * refaulted/total than the SP.
  552. + */
  553. + return pv->refaulted < MIN_LRU_BATCH ||
  554. + pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <=
  555. + (sp->refaulted + 1) * pv->total * pv->gain;
  556. +}
  557. +
  558. +/******************************************************************************
  559. + * the aging
  560. + ******************************************************************************/
  561. +
  562. +/* protect pages accessed multiple times through file descriptors */
  563. +static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaiming)
  564. +{
  565. + int type = page_is_file_lru(page);
  566. + struct lru_gen_struct *lrugen = &lruvec->lrugen;
  567. + int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
  568. + unsigned long new_flags, old_flags = READ_ONCE(page->flags);
  569. +
  570. + VM_WARN_ON_ONCE_PAGE(!(old_flags & LRU_GEN_MASK), page);
  571. +
  572. + do {
  573. + new_gen = (old_gen + 1) % MAX_NR_GENS;
  574. +
  575. + new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
  576. + new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF;
  577. + /* for end_page_writeback() */
  578. + if (reclaiming)
  579. + new_flags |= BIT(PG_reclaim);
  580. + } while (!try_cmpxchg(&page->flags, &old_flags, new_flags));
  581. +
  582. + lru_gen_update_size(lruvec, page, old_gen, new_gen);
  583. +
  584. + return new_gen;
  585. +}
  586. +
  587. +static void inc_min_seq(struct lruvec *lruvec, int type)
  588. +{
  589. + struct lru_gen_struct *lrugen = &lruvec->lrugen;
  590. +
  591. + reset_ctrl_pos(lruvec, type, true);
  592. + WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1);
  593. +}
  594. +
  595. +static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
  596. +{
  597. + int gen, type, zone;
  598. + bool success = false;
  599. + struct lru_gen_struct *lrugen = &lruvec->lrugen;
  600. + DEFINE_MIN_SEQ(lruvec);
  601. +
  602. + VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
  603. +
  604. + /* find the oldest populated generation */
  605. + for (type = !can_swap; type < ANON_AND_FILE; type++) {
  606. + while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) {
  607. + gen = lru_gen_from_seq(min_seq[type]);
  608. +
  609. + for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  610. + if (!list_empty(&lrugen->lists[gen][type][zone]))
  611. + goto next;
  612. + }
  613. +
  614. + min_seq[type]++;
  615. + }
  616. +next:
  617. + ;
  618. + }
  619. +
  620. + /* see the comment on lru_gen_struct */
  621. + if (can_swap) {
  622. + min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]);
  623. + min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]);
  624. + }
  625. +
  626. + for (type = !can_swap; type < ANON_AND_FILE; type++) {
  627. + if (min_seq[type] == lrugen->min_seq[type])
  628. + continue;
  629. +
  630. + reset_ctrl_pos(lruvec, type, true);
  631. + WRITE_ONCE(lrugen->min_seq[type], min_seq[type]);
  632. + success = true;
  633. + }
  634. +
  635. + return success;
  636. +}
  637. +
  638. +static void inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, bool can_swap)
  639. +{
  640. + int prev, next;
  641. + int type, zone;
  642. + struct lru_gen_struct *lrugen = &lruvec->lrugen;
  643. +
  644. + spin_lock_irq(&lruvec->lru_lock);
  645. +
  646. + VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
  647. +
  648. + if (max_seq != lrugen->max_seq)
  649. + goto unlock;
  650. +
  651. + for (type = ANON_AND_FILE - 1; type >= 0; type--) {
  652. + if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
  653. + continue;
  654. +
  655. + VM_WARN_ON_ONCE(type == LRU_GEN_FILE || can_swap);
  656. +
  657. + inc_min_seq(lruvec, type);
  658. + }
  659. +
  660. + /*
  661. + * Update the active/inactive LRU sizes for compatibility. Both sides of
  662. + * the current max_seq need to be covered, since max_seq+1 can overlap
  663. + * with min_seq[LRU_GEN_ANON] if swapping is constrained. And if they do
  664. + * overlap, cold/hot inversion happens.
  665. + */
  666. + prev = lru_gen_from_seq(lrugen->max_seq - 1);
  667. + next = lru_gen_from_seq(lrugen->max_seq + 1);
  668. +
  669. + for (type = 0; type < ANON_AND_FILE; type++) {
  670. + for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  671. + enum lru_list lru = type * LRU_INACTIVE_FILE;
  672. + long delta = lrugen->nr_pages[prev][type][zone] -
  673. + lrugen->nr_pages[next][type][zone];
  674. +
  675. + if (!delta)
  676. + continue;
  677. +
  678. + __update_lru_size(lruvec, lru, zone, delta);
  679. + __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta);
  680. + }
  681. + }
  682. +
  683. + for (type = 0; type < ANON_AND_FILE; type++)
  684. + reset_ctrl_pos(lruvec, type, false);
  685. +
  686. + /* make sure preceding modifications appear */
  687. + smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
  688. +unlock:
  689. + spin_unlock_irq(&lruvec->lru_lock);
  690. +}
  691. +
  692. +static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsigned long *min_seq,
  693. + struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
  694. +{
  695. + int gen, type, zone;
  696. + unsigned long old = 0;
  697. + unsigned long young = 0;
  698. + unsigned long total = 0;
  699. + struct lru_gen_struct *lrugen = &lruvec->lrugen;
  700. + struct mem_cgroup *memcg = lruvec_memcg(lruvec);
  701. +
  702. + for (type = !can_swap; type < ANON_AND_FILE; type++) {
  703. + unsigned long seq;
  704. +
  705. + for (seq = min_seq[type]; seq <= max_seq; seq++) {
  706. + unsigned long size = 0;
  707. +
  708. + gen = lru_gen_from_seq(seq);
  709. +
  710. + for (zone = 0; zone < MAX_NR_ZONES; zone++)
  711. + size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
  712. +
  713. + total += size;
  714. + if (seq == max_seq)
  715. + young += size;
  716. + else if (seq + MIN_NR_GENS == max_seq)
  717. + old += size;
  718. + }
  719. + }
  720. +
  721. + /* try to scrape all its memory if this memcg was deleted */
  722. + *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
  723. +
  724. + /*
  725. + * The aging tries to be lazy to reduce the overhead, while the eviction
  726. + * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
  727. + * ideal number of generations is MIN_NR_GENS+1.
  728. + */
  729. + if (min_seq[!can_swap] + MIN_NR_GENS > max_seq)
  730. + return true;
  731. + if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
  732. + return false;
  733. +
  734. + /*
  735. + * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
  736. + * of the total number of pages for each generation. A reasonable range
  737. + * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
  738. + * aging cares about the upper bound of hot pages, while the eviction
  739. + * cares about the lower bound of cold pages.
  740. + */
  741. + if (young * MIN_NR_GENS > total)
  742. + return true;
  743. + if (old * (MIN_NR_GENS + 2) < total)
  744. + return true;
  745. +
  746. + return false;
  747. +}
  748. +
  749. +static void age_lruvec(struct lruvec *lruvec, struct scan_control *sc)
  750. +{
  751. + bool need_aging;
  752. + unsigned long nr_to_scan;
  753. + int swappiness = get_swappiness(lruvec, sc);
  754. + struct mem_cgroup *memcg = lruvec_memcg(lruvec);
  755. + DEFINE_MAX_SEQ(lruvec);
  756. + DEFINE_MIN_SEQ(lruvec);
  757. +
  758. + VM_WARN_ON_ONCE(sc->memcg_low_reclaim);
  759. +
  760. + mem_cgroup_calculate_protection(NULL, memcg);
  761. +
  762. + if (mem_cgroup_below_min(memcg))
  763. + return;
  764. +
  765. + need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, swappiness, &nr_to_scan);
  766. + if (need_aging)
  767. + inc_max_seq(lruvec, max_seq, swappiness);
  768. +}
  769. +
  770. +static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
  771. +{
  772. + struct mem_cgroup *memcg;
  773. +
  774. + VM_WARN_ON_ONCE(!current_is_kswapd());
  775. +
  776. + memcg = mem_cgroup_iter(NULL, NULL, NULL);
  777. + do {
  778. + struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
  779. +
  780. + age_lruvec(lruvec, sc);
  781. +
  782. + cond_resched();
  783. + } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
  784. +}
  785. +
  786. +/******************************************************************************
  787. + * the eviction
  788. + ******************************************************************************/
  789. +
  790. +static bool sort_page(struct lruvec *lruvec, struct page *page, int tier_idx)
  791. +{
  792. + bool success;
  793. + int gen = page_lru_gen(page);
  794. + int type = page_is_file_lru(page);
  795. + int zone = page_zonenum(page);
  796. + int delta = thp_nr_pages(page);
  797. + int refs = page_lru_refs(page);
  798. + int tier = lru_tier_from_refs(refs);
  799. + struct lru_gen_struct *lrugen = &lruvec->lrugen;
  800. +
  801. + VM_WARN_ON_ONCE_PAGE(gen >= MAX_NR_GENS, page);
  802. +
  803. + /* unevictable */
  804. + if (!page_evictable(page)) {
  805. + success = lru_gen_del_page(lruvec, page, true);
  806. + VM_WARN_ON_ONCE_PAGE(!success, page);
  807. + SetPageUnevictable(page);
  808. + add_page_to_lru_list(page, lruvec);
  809. + __count_vm_events(UNEVICTABLE_PGCULLED, delta);
  810. + return true;
  811. + }
  812. +
  813. + /* dirty lazyfree */
  814. + if (type == LRU_GEN_FILE && PageAnon(page) && PageDirty(page)) {
  815. + success = lru_gen_del_page(lruvec, page, true);
  816. + VM_WARN_ON_ONCE_PAGE(!success, page);
  817. + SetPageSwapBacked(page);
  818. + add_page_to_lru_list_tail(page, lruvec);
  819. + return true;
  820. + }
  821. +
  822. + /* protected */
  823. + if (tier > tier_idx) {
  824. + int hist = lru_hist_from_seq(lrugen->min_seq[type]);
  825. +
  826. + gen = page_inc_gen(lruvec, page, false);
  827. + list_move_tail(&page->lru, &lrugen->lists[gen][type][zone]);
  828. +
  829. + WRITE_ONCE(lrugen->protected[hist][type][tier - 1],
  830. + lrugen->protected[hist][type][tier - 1] + delta);
  831. + __mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta);
  832. + return true;
  833. + }
  834. +
  835. + /* waiting for writeback */
  836. + if (PageLocked(page) || PageWriteback(page) ||
  837. + (type == LRU_GEN_FILE && PageDirty(page))) {
  838. + gen = page_inc_gen(lruvec, page, true);
  839. + list_move(&page->lru, &lrugen->lists[gen][type][zone]);
  840. + return true;
  841. + }
  842. +
  843. + return false;
  844. +}
  845. +
  846. +static bool isolate_page(struct lruvec *lruvec, struct page *page, struct scan_control *sc)
  847. +{
  848. + bool success;
  849. +
  850. + /* unmapping inhibited */
  851. + if (!sc->may_unmap && page_mapped(page))
  852. + return false;
  853. +
  854. + /* swapping inhibited */
  855. + if (!(sc->may_writepage && (sc->gfp_mask & __GFP_IO)) &&
  856. + (PageDirty(page) ||
  857. + (PageAnon(page) && !PageSwapCache(page))))
  858. + return false;
  859. +
  860. + /* raced with release_pages() */
  861. + if (!get_page_unless_zero(page))
  862. + return false;
  863. +
  864. + /* raced with another isolation */
  865. + if (!TestClearPageLRU(page)) {
  866. + put_page(page);
  867. + return false;
  868. + }
  869. +
  870. + /* see the comment on MAX_NR_TIERS */
  871. + if (!PageReferenced(page))
  872. + set_mask_bits(&page->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0);
  873. +
  874. + /* for shrink_page_list() */
  875. + ClearPageReclaim(page);
  876. + ClearPageReferenced(page);
  877. +
  878. + success = lru_gen_del_page(lruvec, page, true);
  879. + VM_WARN_ON_ONCE_PAGE(!success, page);
  880. +
  881. + return true;
  882. +}
  883. +
  884. +static int scan_pages(struct lruvec *lruvec, struct scan_control *sc,
  885. + int type, int tier, struct list_head *list)
  886. +{
  887. + int gen, zone;
  888. + enum vm_event_item item;
  889. + int sorted = 0;
  890. + int scanned = 0;
  891. + int isolated = 0;
  892. + int remaining = MAX_LRU_BATCH;
  893. + struct lru_gen_struct *lrugen = &lruvec->lrugen;
  894. + struct mem_cgroup *memcg = lruvec_memcg(lruvec);
  895. +
  896. + VM_WARN_ON_ONCE(!list_empty(list));
  897. +
  898. + if (get_nr_gens(lruvec, type) == MIN_NR_GENS)
  899. + return 0;
  900. +
  901. + gen = lru_gen_from_seq(lrugen->min_seq[type]);
  902. +
  903. + for (zone = sc->reclaim_idx; zone >= 0; zone--) {
  904. + LIST_HEAD(moved);
  905. + int skipped = 0;
  906. + struct list_head *head = &lrugen->lists[gen][type][zone];
  907. +
  908. + while (!list_empty(head)) {
  909. + struct page *page = lru_to_page(head);
  910. + int delta = thp_nr_pages(page);
  911. +
  912. + VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
  913. + VM_WARN_ON_ONCE_PAGE(PageActive(page), page);
  914. + VM_WARN_ON_ONCE_PAGE(page_is_file_lru(page) != type, page);
  915. + VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page);
  916. +
  917. + scanned += delta;
  918. +
  919. + if (sort_page(lruvec, page, tier))
  920. + sorted += delta;
  921. + else if (isolate_page(lruvec, page, sc)) {
  922. + list_add(&page->lru, list);
  923. + isolated += delta;
  924. + } else {
  925. + list_move(&page->lru, &moved);
  926. + skipped += delta;
  927. + }
  928. +
  929. + if (!--remaining || max(isolated, skipped) >= MIN_LRU_BATCH)
  930. + break;
  931. + }
  932. +
  933. + if (skipped) {
  934. + list_splice(&moved, head);
  935. + __count_zid_vm_events(PGSCAN_SKIP, zone, skipped);
  936. + }
  937. +
  938. + if (!remaining || isolated >= MIN_LRU_BATCH)
  939. + break;
  940. + }
  941. +
  942. + item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
  943. + if (!cgroup_reclaim(sc)) {
  944. + __count_vm_events(item, isolated);
  945. + __count_vm_events(PGREFILL, sorted);
  946. + }
  947. + __count_memcg_events(memcg, item, isolated);
  948. + __count_memcg_events(memcg, PGREFILL, sorted);
  949. + __count_vm_events(PGSCAN_ANON + type, isolated);
  950. +
  951. + /*
  952. + * There might not be eligible pages due to reclaim_idx, may_unmap and
  953. + * may_writepage. Check the remaining to prevent livelock if it's not
  954. + * making progress.
  955. + */
  956. + return isolated || !remaining ? scanned : 0;
  957. +}
  958. +
  959. +static int get_tier_idx(struct lruvec *lruvec, int type)
  960. +{
  961. + int tier;
  962. + struct ctrl_pos sp, pv;
  963. +
  964. + /*
  965. + * To leave a margin for fluctuations, use a larger gain factor (1:2).
  966. + * This value is chosen because any other tier would have at least twice
  967. + * as many refaults as the first tier.
  968. + */
  969. + read_ctrl_pos(lruvec, type, 0, 1, &sp);
  970. + for (tier = 1; tier < MAX_NR_TIERS; tier++) {
  971. + read_ctrl_pos(lruvec, type, tier, 2, &pv);
  972. + if (!positive_ctrl_err(&sp, &pv))
  973. + break;
  974. + }
  975. +
  976. + return tier - 1;
  977. +}
  978. +
  979. +static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx)
  980. +{
  981. + int type, tier;
  982. + struct ctrl_pos sp, pv;
  983. + int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness };
  984. +
  985. + /*
  986. + * Compare the first tier of anon with that of file to determine which
  987. + * type to scan. Also need to compare other tiers of the selected type
  988. + * with the first tier of the other type to determine the last tier (of
  989. + * the selected type) to evict.
  990. + */
  991. + read_ctrl_pos(lruvec, LRU_GEN_ANON, 0, gain[LRU_GEN_ANON], &sp);
  992. + read_ctrl_pos(lruvec, LRU_GEN_FILE, 0, gain[LRU_GEN_FILE], &pv);
  993. + type = positive_ctrl_err(&sp, &pv);
  994. +
  995. + read_ctrl_pos(lruvec, !type, 0, gain[!type], &sp);
  996. + for (tier = 1; tier < MAX_NR_TIERS; tier++) {
  997. + read_ctrl_pos(lruvec, type, tier, gain[type], &pv);
  998. + if (!positive_ctrl_err(&sp, &pv))
  999. + break;
  1000. + }
  1001. +
  1002. + *tier_idx = tier - 1;
  1003. +
  1004. + return type;
  1005. +}
  1006. +
  1007. +static int isolate_pages(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
  1008. + int *type_scanned, struct list_head *list)
  1009. +{
  1010. + int i;
  1011. + int type;
  1012. + int scanned;
  1013. + int tier = -1;
  1014. + DEFINE_MIN_SEQ(lruvec);
  1015. +
  1016. + /*
  1017. + * Try to make the obvious choice first. When anon and file are both
  1018. + * available from the same generation, interpret swappiness 1 as file
  1019. + * first and 200 as anon first.
  1020. + */
  1021. + if (!swappiness)
  1022. + type = LRU_GEN_FILE;
  1023. + else if (min_seq[LRU_GEN_ANON] < min_seq[LRU_GEN_FILE])
  1024. + type = LRU_GEN_ANON;
  1025. + else if (swappiness == 1)
  1026. + type = LRU_GEN_FILE;
  1027. + else if (swappiness == 200)
  1028. + type = LRU_GEN_ANON;
  1029. + else
  1030. + type = get_type_to_scan(lruvec, swappiness, &tier);
  1031. +
  1032. + for (i = !swappiness; i < ANON_AND_FILE; i++) {
  1033. + if (tier < 0)
  1034. + tier = get_tier_idx(lruvec, type);
  1035. +
  1036. + scanned = scan_pages(lruvec, sc, type, tier, list);
  1037. + if (scanned)
  1038. + break;
  1039. +
  1040. + type = !type;
  1041. + tier = -1;
  1042. + }
  1043. +
  1044. + *type_scanned = type;
  1045. +
  1046. + return scanned;
  1047. +}
  1048. +
  1049. +static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swappiness)
  1050. +{
  1051. + int type;
  1052. + int scanned;
  1053. + int reclaimed;
  1054. + LIST_HEAD(list);
  1055. + struct page *page;
  1056. + enum vm_event_item item;
  1057. + struct reclaim_stat stat;
  1058. + struct mem_cgroup *memcg = lruvec_memcg(lruvec);
  1059. + struct pglist_data *pgdat = lruvec_pgdat(lruvec);
  1060. +
  1061. + spin_lock_irq(&lruvec->lru_lock);
  1062. +
  1063. + scanned = isolate_pages(lruvec, sc, swappiness, &type, &list);
  1064. +
  1065. + scanned += try_to_inc_min_seq(lruvec, swappiness);
  1066. +
  1067. + if (get_nr_gens(lruvec, !swappiness) == MIN_NR_GENS)
  1068. + scanned = 0;
  1069. +
  1070. + spin_unlock_irq(&lruvec->lru_lock);
  1071. +
  1072. + if (list_empty(&list))
  1073. + return scanned;
  1074. +
  1075. + reclaimed = shrink_page_list(&list, pgdat, sc, &stat, false);
  1076. +
  1077. + list_for_each_entry(page, &list, lru) {
  1078. + /* restore LRU_REFS_FLAGS cleared by isolate_page() */
  1079. + if (PageWorkingset(page))
  1080. + SetPageReferenced(page);
  1081. +
  1082. + /* don't add rejected pages to the oldest generation */
  1083. + if (PageReclaim(page) &&
  1084. + (PageDirty(page) || PageWriteback(page)))
  1085. + ClearPageActive(page);
  1086. + else
  1087. + SetPageActive(page);
  1088. + }
  1089. +
  1090. + spin_lock_irq(&lruvec->lru_lock);
  1091. +
  1092. + move_pages_to_lru(lruvec, &list);
  1093. +
  1094. + item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
  1095. + if (!cgroup_reclaim(sc))
  1096. + __count_vm_events(item, reclaimed);
  1097. + __count_memcg_events(memcg, item, reclaimed);
  1098. + __count_vm_events(PGSTEAL_ANON + type, reclaimed);
  1099. +
  1100. + spin_unlock_irq(&lruvec->lru_lock);
  1101. +
  1102. + mem_cgroup_uncharge_list(&list);
  1103. + free_unref_page_list(&list);
  1104. +
  1105. + sc->nr_reclaimed += reclaimed;
  1106. +
  1107. + return scanned;
  1108. +}
  1109. +
  1110. +static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
  1111. + bool can_swap)
  1112. +{
  1113. + bool need_aging;
  1114. + unsigned long nr_to_scan;
  1115. + struct mem_cgroup *memcg = lruvec_memcg(lruvec);
  1116. + DEFINE_MAX_SEQ(lruvec);
  1117. + DEFINE_MIN_SEQ(lruvec);
  1118. +
  1119. + if (mem_cgroup_below_min(memcg) ||
  1120. + (mem_cgroup_below_low(memcg) && !sc->memcg_low_reclaim))
  1121. + return 0;
  1122. +
  1123. + need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, can_swap, &nr_to_scan);
  1124. + if (!need_aging)
  1125. + return nr_to_scan;
  1126. +
  1127. + /* skip the aging path at the default priority */
  1128. + if (sc->priority == DEF_PRIORITY)
  1129. + goto done;
  1130. +
  1131. + /* leave the work to lru_gen_age_node() */
  1132. + if (current_is_kswapd())
  1133. + return 0;
  1134. +
  1135. + inc_max_seq(lruvec, max_seq, can_swap);
  1136. +done:
  1137. + return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
  1138. +}
  1139. +
  1140. +static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
  1141. +{
  1142. + struct blk_plug plug;
  1143. + unsigned long scanned = 0;
  1144. +
  1145. + lru_add_drain();
  1146. +
  1147. + blk_start_plug(&plug);
  1148. +
  1149. + while (true) {
  1150. + int delta;
  1151. + int swappiness;
  1152. + unsigned long nr_to_scan;
  1153. +
  1154. + if (sc->may_swap)
  1155. + swappiness = get_swappiness(lruvec, sc);
  1156. + else if (!cgroup_reclaim(sc) && get_swappiness(lruvec, sc))
  1157. + swappiness = 1;
  1158. + else
  1159. + swappiness = 0;
  1160. +
  1161. + nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness);
  1162. + if (!nr_to_scan)
  1163. + break;
  1164. +
  1165. + delta = evict_pages(lruvec, sc, swappiness);
  1166. + if (!delta)
  1167. + break;
  1168. +
  1169. + scanned += delta;
  1170. + if (scanned >= nr_to_scan)
  1171. + break;
  1172. +
  1173. + cond_resched();
  1174. + }
  1175. +
  1176. + blk_finish_plug(&plug);
  1177. +}
  1178. +
  1179. /******************************************************************************
  1180. * initialization
  1181. ******************************************************************************/
  1182. @@ -2894,6 +3649,16 @@ static int __init init_lru_gen(void)
  1183. };
  1184. late_initcall(init_lru_gen);
  1185. +#else /* !CONFIG_LRU_GEN */
  1186. +
  1187. +static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
  1188. +{
  1189. +}
  1190. +
  1191. +static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
  1192. +{
  1193. +}
  1194. +
  1195. #endif /* CONFIG_LRU_GEN */
  1196. static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
  1197. @@ -2907,6 +3672,11 @@ static void shrink_lruvec(struct lruvec
  1198. bool proportional_reclaim;
  1199. struct blk_plug plug;
  1200. + if (lru_gen_enabled()) {
  1201. + lru_gen_shrink_lruvec(lruvec, sc);
  1202. + return;
  1203. + }
  1204. +
  1205. get_scan_count(lruvec, sc, nr);
  1206. /* Record the original scan target for proportional adjustments later */
  1207. @@ -3375,6 +4145,9 @@ static void snapshot_refaults(struct mem
  1208. struct lruvec *target_lruvec;
  1209. unsigned long refaults;
  1210. + if (lru_gen_enabled())
  1211. + return;
  1212. +
  1213. target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
  1214. refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
  1215. target_lruvec->refaults[0] = refaults;
  1216. @@ -3739,12 +4512,16 @@ unsigned long try_to_free_mem_cgroup_pag
  1217. }
  1218. #endif
  1219. -static void age_active_anon(struct pglist_data *pgdat,
  1220. - struct scan_control *sc)
  1221. +static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc)
  1222. {
  1223. struct mem_cgroup *memcg;
  1224. struct lruvec *lruvec;
  1225. + if (lru_gen_enabled()) {
  1226. + lru_gen_age_node(pgdat, sc);
  1227. + return;
  1228. + }
  1229. +
  1230. if (!can_age_anon_pages(pgdat, sc))
  1231. return;
  1232. @@ -4061,12 +4838,11 @@ restart:
  1233. sc.may_swap = !nr_boost_reclaim;
  1234. /*
  1235. - * Do some background aging of the anon list, to give
  1236. - * pages a chance to be referenced before reclaiming. All
  1237. - * pages are rotated regardless of classzone as this is
  1238. - * about consistent aging.
  1239. + * Do some background aging, to give pages a chance to be
  1240. + * referenced before reclaiming. All pages are rotated
  1241. + * regardless of classzone as this is about consistent aging.
  1242. */
  1243. - age_active_anon(pgdat, &sc);
  1244. + kswapd_age_node(pgdat, &sc);
  1245. /*
  1246. * If we're getting trouble reclaiming, start doing writepage
  1247. --- a/mm/workingset.c
  1248. +++ b/mm/workingset.c
  1249. @@ -187,7 +187,6 @@ static unsigned int bucket_order __read_
  1250. static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
  1251. bool workingset)
  1252. {
  1253. - eviction >>= bucket_order;
  1254. eviction &= EVICTION_MASK;
  1255. eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
  1256. eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
  1257. @@ -212,10 +211,107 @@ static void unpack_shadow(void *shadow,
  1258. *memcgidp = memcgid;
  1259. *pgdat = NODE_DATA(nid);
  1260. - *evictionp = entry << bucket_order;
  1261. + *evictionp = entry;
  1262. *workingsetp = workingset;
  1263. }
  1264. +#ifdef CONFIG_LRU_GEN
  1265. +
  1266. +static void *lru_gen_eviction(struct page *page)
  1267. +{
  1268. + int hist;
  1269. + unsigned long token;
  1270. + unsigned long min_seq;
  1271. + struct lruvec *lruvec;
  1272. + struct lru_gen_struct *lrugen;
  1273. + int type = page_is_file_lru(page);
  1274. + int delta = thp_nr_pages(page);
  1275. + int refs = page_lru_refs(page);
  1276. + int tier = lru_tier_from_refs(refs);
  1277. + struct mem_cgroup *memcg = page_memcg(page);
  1278. + struct pglist_data *pgdat = page_pgdat(page);
  1279. +
  1280. + BUILD_BUG_ON(LRU_GEN_WIDTH + LRU_REFS_WIDTH > BITS_PER_LONG - EVICTION_SHIFT);
  1281. +
  1282. + lruvec = mem_cgroup_lruvec(memcg, pgdat);
  1283. + lrugen = &lruvec->lrugen;
  1284. + min_seq = READ_ONCE(lrugen->min_seq[type]);
  1285. + token = (min_seq << LRU_REFS_WIDTH) | max(refs - 1, 0);
  1286. +
  1287. + hist = lru_hist_from_seq(min_seq);
  1288. + atomic_long_add(delta, &lrugen->evicted[hist][type][tier]);
  1289. +
  1290. + return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs);
  1291. +}
  1292. +
  1293. +static void lru_gen_refault(struct page *page, void *shadow)
  1294. +{
  1295. + int hist, tier, refs;
  1296. + int memcg_id;
  1297. + bool workingset;
  1298. + unsigned long token;
  1299. + unsigned long min_seq;
  1300. + struct lruvec *lruvec;
  1301. + struct lru_gen_struct *lrugen;
  1302. + struct mem_cgroup *memcg;
  1303. + struct pglist_data *pgdat;
  1304. + int type = page_is_file_lru(page);
  1305. + int delta = thp_nr_pages(page);
  1306. +
  1307. + unpack_shadow(shadow, &memcg_id, &pgdat, &token, &workingset);
  1308. +
  1309. + if (pgdat != page_pgdat(page))
  1310. + return;
  1311. +
  1312. + rcu_read_lock();
  1313. +
  1314. + memcg = page_memcg_rcu(page);
  1315. + if (memcg_id != mem_cgroup_id(memcg))
  1316. + goto unlock;
  1317. +
  1318. + lruvec = mem_cgroup_lruvec(memcg, pgdat);
  1319. + lrugen = &lruvec->lrugen;
  1320. +
  1321. + min_seq = READ_ONCE(lrugen->min_seq[type]);
  1322. + if ((token >> LRU_REFS_WIDTH) != (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH)))
  1323. + goto unlock;
  1324. +
  1325. + hist = lru_hist_from_seq(min_seq);
  1326. + /* see the comment in page_lru_refs() */
  1327. + refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + workingset;
  1328. + tier = lru_tier_from_refs(refs);
  1329. +
  1330. + atomic_long_add(delta, &lrugen->refaulted[hist][type][tier]);
  1331. + mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type, delta);
  1332. +
  1333. + /*
  1334. + * Count the following two cases as stalls:
  1335. + * 1. For pages accessed through page tables, hotter pages pushed out
  1336. + * hot pages which refaulted immediately.
  1337. + * 2. For pages accessed multiple times through file descriptors,
  1338. + * numbers of accesses might have been out of the range.
  1339. + */
  1340. + if (lru_gen_in_fault() || refs == BIT(LRU_REFS_WIDTH)) {
  1341. + SetPageWorkingset(page);
  1342. + mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta);
  1343. + }
  1344. +unlock:
  1345. + rcu_read_unlock();
  1346. +}
  1347. +
  1348. +#else /* !CONFIG_LRU_GEN */
  1349. +
  1350. +static void *lru_gen_eviction(struct page *page)
  1351. +{
  1352. + return NULL;
  1353. +}
  1354. +
  1355. +static void lru_gen_refault(struct page *page, void *shadow)
  1356. +{
  1357. +}
  1358. +
  1359. +#endif /* CONFIG_LRU_GEN */
  1360. +
  1361. /**
  1362. * workingset_age_nonresident - age non-resident entries as LRU ages
  1363. * @lruvec: the lruvec that was aged
  1364. @@ -264,10 +360,14 @@ void *workingset_eviction(struct page *p
  1365. VM_BUG_ON_PAGE(page_count(page), page);
  1366. VM_BUG_ON_PAGE(!PageLocked(page), page);
  1367. + if (lru_gen_enabled())
  1368. + return lru_gen_eviction(page);
  1369. +
  1370. lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
  1371. /* XXX: target_memcg can be NULL, go through lruvec */
  1372. memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
  1373. eviction = atomic_long_read(&lruvec->nonresident_age);
  1374. + eviction >>= bucket_order;
  1375. workingset_age_nonresident(lruvec, thp_nr_pages(page));
  1376. return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
  1377. }
  1378. @@ -296,7 +396,13 @@ void workingset_refault(struct page *pag
  1379. bool workingset;
  1380. int memcgid;
  1381. + if (lru_gen_enabled()) {
  1382. + lru_gen_refault(page, shadow);
  1383. + return;
  1384. + }
  1385. +
  1386. unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
  1387. + eviction <<= bucket_order;
  1388. rcu_read_lock();
  1389. /*