020-v6.1-12-mm-multi-gen-LRU-debugfs-interface.patch 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579
  1. From 530716d008ca26315f246cd70dc1cefc636beaa4 Mon Sep 17 00:00:00 2001
  2. From: Yu Zhao <[email protected]>
  3. Date: Sun, 18 Sep 2022 02:00:09 -0600
  4. Subject: [PATCH 12/29] mm: multi-gen LRU: debugfs interface
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. Add /sys/kernel/debug/lru_gen for working set estimation and proactive
  9. reclaim. These techniques are commonly used to optimize job scheduling
  10. (bin packing) in data centers [1][2].
  11. Compared with the page table-based approach and the PFN-based
  12. approach, this lruvec-based approach has the following advantages:
  13. 1. It offers better choices because it is aware of memcgs, NUMA nodes,
  14. shared mappings and unmapped page cache.
  15. 2. It is more scalable because it is O(nr_hot_pages), whereas the
  16. PFN-based approach is O(nr_total_pages).
  17. Add /sys/kernel/debug/lru_gen_full for debugging.
  18. [1] https://dl.acm.org/doi/10.1145/3297858.3304053
  19. [2] https://dl.acm.org/doi/10.1145/3503222.3507731
  20. Link: https://lkml.kernel.org/r/[email protected]
  21. Signed-off-by: Yu Zhao <[email protected]>
  22. Reviewed-by: Qi Zheng <[email protected]>
  23. Acked-by: Brian Geffon <[email protected]>
  24. Acked-by: Jan Alexander Steffens (heftig) <[email protected]>
  25. Acked-by: Oleksandr Natalenko <[email protected]>
  26. Acked-by: Steven Barrett <[email protected]>
  27. Acked-by: Suleiman Souhlal <[email protected]>
  28. Tested-by: Daniel Byrne <[email protected]>
  29. Tested-by: Donald Carr <[email protected]>
  30. Tested-by: Holger Hoffstätte <[email protected]>
  31. Tested-by: Konstantin Kharlamov <[email protected]>
  32. Tested-by: Shuang Zhai <[email protected]>
  33. Tested-by: Sofia Trinh <[email protected]>
  34. Tested-by: Vaibhav Jain <[email protected]>
  35. Cc: Andi Kleen <[email protected]>
  36. Cc: Aneesh Kumar K.V <[email protected]>
  37. Cc: Barry Song <[email protected]>
  38. Cc: Catalin Marinas <[email protected]>
  39. Cc: Dave Hansen <[email protected]>
  40. Cc: Hillf Danton <[email protected]>
  41. Cc: Jens Axboe <[email protected]>
  42. Cc: Johannes Weiner <[email protected]>
  43. Cc: Jonathan Corbet <[email protected]>
  44. Cc: Linus Torvalds <[email protected]>
  45. Cc: Matthew Wilcox <[email protected]>
  46. Cc: Mel Gorman <[email protected]>
  47. Cc: Miaohe Lin <[email protected]>
  48. Cc: Michael Larabel <[email protected]>
  49. Cc: Michal Hocko <[email protected]>
  50. Cc: Mike Rapoport <[email protected]>
  51. Cc: Mike Rapoport <[email protected]>
  52. Cc: Peter Zijlstra <[email protected]>
  53. Cc: Tejun Heo <[email protected]>
  54. Cc: Vlastimil Babka <[email protected]>
  55. Cc: Will Deacon <[email protected]>
  56. Signed-off-by: Andrew Morton <[email protected]>
  57. ---
  58. include/linux/nodemask.h | 1 +
  59. mm/vmscan.c | 411 ++++++++++++++++++++++++++++++++++++++-
  60. 2 files changed, 402 insertions(+), 10 deletions(-)
  61. --- a/include/linux/nodemask.h
  62. +++ b/include/linux/nodemask.h
  63. @@ -485,6 +485,7 @@ static inline int num_node_state(enum no
  64. #define first_online_node 0
  65. #define first_memory_node 0
  66. #define next_online_node(nid) (MAX_NUMNODES)
  67. +#define next_memory_node(nid) (MAX_NUMNODES)
  68. #define nr_node_ids 1U
  69. #define nr_online_nodes 1U
  70. --- a/mm/vmscan.c
  71. +++ b/mm/vmscan.c
  72. @@ -53,6 +53,7 @@
  73. #include <linux/pagewalk.h>
  74. #include <linux/shmem_fs.h>
  75. #include <linux/ctype.h>
  76. +#include <linux/debugfs.h>
  77. #include <asm/tlbflush.h>
  78. #include <asm/div64.h>
  79. @@ -3968,12 +3969,40 @@ static void clear_mm_walk(void)
  80. kfree(walk);
  81. }
  82. -static void inc_min_seq(struct lruvec *lruvec, int type)
  83. +static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
  84. {
  85. + int zone;
  86. + int remaining = MAX_LRU_BATCH;
  87. struct lru_gen_struct *lrugen = &lruvec->lrugen;
  88. + int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
  89. +
  90. + if (type == LRU_GEN_ANON && !can_swap)
  91. + goto done;
  92. +
  93. + /* prevent cold/hot inversion if force_scan is true */
  94. + for (zone = 0; zone < MAX_NR_ZONES; zone++) {
  95. + struct list_head *head = &lrugen->lists[old_gen][type][zone];
  96. +
  97. + while (!list_empty(head)) {
  98. + struct page *page = lru_to_page(head);
  99. +
  100. + VM_WARN_ON_ONCE_PAGE(PageUnevictable(page), page);
  101. + VM_WARN_ON_ONCE_PAGE(PageActive(page), page);
  102. + VM_WARN_ON_ONCE_PAGE(page_is_file_lru(page) != type, page);
  103. + VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page);
  104. + new_gen = page_inc_gen(lruvec, page, false);
  105. + list_move_tail(&page->lru, &lrugen->lists[new_gen][type][zone]);
  106. +
  107. + if (!--remaining)
  108. + return false;
  109. + }
  110. + }
  111. +done:
  112. reset_ctrl_pos(lruvec, type, true);
  113. WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1);
  114. +
  115. + return true;
  116. }
  117. static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
  118. @@ -4019,7 +4048,7 @@ next:
  119. return success;
  120. }
  121. -static void inc_max_seq(struct lruvec *lruvec, bool can_swap)
  122. +static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan)
  123. {
  124. int prev, next;
  125. int type, zone;
  126. @@ -4033,9 +4062,13 @@ static void inc_max_seq(struct lruvec *l
  127. if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
  128. continue;
  129. - VM_WARN_ON_ONCE(type == LRU_GEN_FILE || can_swap);
  130. + VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap));
  131. - inc_min_seq(lruvec, type);
  132. + while (!inc_min_seq(lruvec, type, can_swap)) {
  133. + spin_unlock_irq(&lruvec->lru_lock);
  134. + cond_resched();
  135. + spin_lock_irq(&lruvec->lru_lock);
  136. + }
  137. }
  138. /*
  139. @@ -4072,7 +4105,7 @@ static void inc_max_seq(struct lruvec *l
  140. }
  141. static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
  142. - struct scan_control *sc, bool can_swap)
  143. + struct scan_control *sc, bool can_swap, bool force_scan)
  144. {
  145. bool success;
  146. struct lru_gen_mm_walk *walk;
  147. @@ -4093,7 +4126,7 @@ static bool try_to_inc_max_seq(struct lr
  148. * handful of PTEs. Spreading the work out over a period of time usually
  149. * is less efficient, but it avoids bursty page faults.
  150. */
  151. - if (!(arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))) {
  152. + if (!force_scan && !(arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))) {
  153. success = iterate_mm_list_nowalk(lruvec, max_seq);
  154. goto done;
  155. }
  156. @@ -4107,7 +4140,7 @@ static bool try_to_inc_max_seq(struct lr
  157. walk->lruvec = lruvec;
  158. walk->max_seq = max_seq;
  159. walk->can_swap = can_swap;
  160. - walk->force_scan = false;
  161. + walk->force_scan = force_scan;
  162. do {
  163. success = iterate_mm_list(lruvec, walk, &mm);
  164. @@ -4127,7 +4160,7 @@ done:
  165. VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq));
  166. - inc_max_seq(lruvec, can_swap);
  167. + inc_max_seq(lruvec, can_swap, force_scan);
  168. /* either this sees any waiters or they will see updated max_seq */
  169. if (wq_has_sleeper(&lruvec->mm_state.wait))
  170. wake_up_all(&lruvec->mm_state.wait);
  171. @@ -4225,7 +4258,7 @@ static bool age_lruvec(struct lruvec *lr
  172. }
  173. if (need_aging)
  174. - try_to_inc_max_seq(lruvec, max_seq, sc, swappiness);
  175. + try_to_inc_max_seq(lruvec, max_seq, sc, swappiness, false);
  176. return true;
  177. }
  178. @@ -4784,7 +4817,7 @@ static unsigned long get_nr_to_scan(stru
  179. if (current_is_kswapd())
  180. return 0;
  181. - if (try_to_inc_max_seq(lruvec, max_seq, sc, can_swap))
  182. + if (try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false))
  183. return nr_to_scan;
  184. done:
  185. return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
  186. @@ -5124,6 +5157,361 @@ static struct attribute_group lru_gen_at
  187. };
  188. /******************************************************************************
  189. + * debugfs interface
  190. + ******************************************************************************/
  191. +
  192. +static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos)
  193. +{
  194. + struct mem_cgroup *memcg;
  195. + loff_t nr_to_skip = *pos;
  196. +
  197. + m->private = kvmalloc(PATH_MAX, GFP_KERNEL);
  198. + if (!m->private)
  199. + return ERR_PTR(-ENOMEM);
  200. +
  201. + memcg = mem_cgroup_iter(NULL, NULL, NULL);
  202. + do {
  203. + int nid;
  204. +
  205. + for_each_node_state(nid, N_MEMORY) {
  206. + if (!nr_to_skip--)
  207. + return get_lruvec(memcg, nid);
  208. + }
  209. + } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
  210. +
  211. + return NULL;
  212. +}
  213. +
  214. +static void lru_gen_seq_stop(struct seq_file *m, void *v)
  215. +{
  216. + if (!IS_ERR_OR_NULL(v))
  217. + mem_cgroup_iter_break(NULL, lruvec_memcg(v));
  218. +
  219. + kvfree(m->private);
  220. + m->private = NULL;
  221. +}
  222. +
  223. +static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos)
  224. +{
  225. + int nid = lruvec_pgdat(v)->node_id;
  226. + struct mem_cgroup *memcg = lruvec_memcg(v);
  227. +
  228. + ++*pos;
  229. +
  230. + nid = next_memory_node(nid);
  231. + if (nid == MAX_NUMNODES) {
  232. + memcg = mem_cgroup_iter(NULL, memcg, NULL);
  233. + if (!memcg)
  234. + return NULL;
  235. +
  236. + nid = first_memory_node;
  237. + }
  238. +
  239. + return get_lruvec(memcg, nid);
  240. +}
  241. +
  242. +static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
  243. + unsigned long max_seq, unsigned long *min_seq,
  244. + unsigned long seq)
  245. +{
  246. + int i;
  247. + int type, tier;
  248. + int hist = lru_hist_from_seq(seq);
  249. + struct lru_gen_struct *lrugen = &lruvec->lrugen;
  250. +
  251. + for (tier = 0; tier < MAX_NR_TIERS; tier++) {
  252. + seq_printf(m, " %10d", tier);
  253. + for (type = 0; type < ANON_AND_FILE; type++) {
  254. + const char *s = " ";
  255. + unsigned long n[3] = {};
  256. +
  257. + if (seq == max_seq) {
  258. + s = "RT ";
  259. + n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]);
  260. + n[1] = READ_ONCE(lrugen->avg_total[type][tier]);
  261. + } else if (seq == min_seq[type] || NR_HIST_GENS > 1) {
  262. + s = "rep";
  263. + n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]);
  264. + n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]);
  265. + if (tier)
  266. + n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]);
  267. + }
  268. +
  269. + for (i = 0; i < 3; i++)
  270. + seq_printf(m, " %10lu%c", n[i], s[i]);
  271. + }
  272. + seq_putc(m, '\n');
  273. + }
  274. +
  275. + seq_puts(m, " ");
  276. + for (i = 0; i < NR_MM_STATS; i++) {
  277. + const char *s = " ";
  278. + unsigned long n = 0;
  279. +
  280. + if (seq == max_seq && NR_HIST_GENS == 1) {
  281. + s = "LOYNFA";
  282. + n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
  283. + } else if (seq != max_seq && NR_HIST_GENS > 1) {
  284. + s = "loynfa";
  285. + n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
  286. + }
  287. +
  288. + seq_printf(m, " %10lu%c", n, s[i]);
  289. + }
  290. + seq_putc(m, '\n');
  291. +}
  292. +
  293. +static int lru_gen_seq_show(struct seq_file *m, void *v)
  294. +{
  295. + unsigned long seq;
  296. + bool full = !debugfs_real_fops(m->file)->write;
  297. + struct lruvec *lruvec = v;
  298. + struct lru_gen_struct *lrugen = &lruvec->lrugen;
  299. + int nid = lruvec_pgdat(lruvec)->node_id;
  300. + struct mem_cgroup *memcg = lruvec_memcg(lruvec);
  301. + DEFINE_MAX_SEQ(lruvec);
  302. + DEFINE_MIN_SEQ(lruvec);
  303. +
  304. + if (nid == first_memory_node) {
  305. + const char *path = memcg ? m->private : "";
  306. +
  307. +#ifdef CONFIG_MEMCG
  308. + if (memcg)
  309. + cgroup_path(memcg->css.cgroup, m->private, PATH_MAX);
  310. +#endif
  311. + seq_printf(m, "memcg %5hu %s\n", mem_cgroup_id(memcg), path);
  312. + }
  313. +
  314. + seq_printf(m, " node %5d\n", nid);
  315. +
  316. + if (!full)
  317. + seq = min_seq[LRU_GEN_ANON];
  318. + else if (max_seq >= MAX_NR_GENS)
  319. + seq = max_seq - MAX_NR_GENS + 1;
  320. + else
  321. + seq = 0;
  322. +
  323. + for (; seq <= max_seq; seq++) {
  324. + int type, zone;
  325. + int gen = lru_gen_from_seq(seq);
  326. + unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
  327. +
  328. + seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth));
  329. +
  330. + for (type = 0; type < ANON_AND_FILE; type++) {
  331. + unsigned long size = 0;
  332. + char mark = full && seq < min_seq[type] ? 'x' : ' ';
  333. +
  334. + for (zone = 0; zone < MAX_NR_ZONES; zone++)
  335. + size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
  336. +
  337. + seq_printf(m, " %10lu%c", size, mark);
  338. + }
  339. +
  340. + seq_putc(m, '\n');
  341. +
  342. + if (full)
  343. + lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq);
  344. + }
  345. +
  346. + return 0;
  347. +}
  348. +
  349. +static const struct seq_operations lru_gen_seq_ops = {
  350. + .start = lru_gen_seq_start,
  351. + .stop = lru_gen_seq_stop,
  352. + .next = lru_gen_seq_next,
  353. + .show = lru_gen_seq_show,
  354. +};
  355. +
  356. +static int run_aging(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
  357. + bool can_swap, bool force_scan)
  358. +{
  359. + DEFINE_MAX_SEQ(lruvec);
  360. + DEFINE_MIN_SEQ(lruvec);
  361. +
  362. + if (seq < max_seq)
  363. + return 0;
  364. +
  365. + if (seq > max_seq)
  366. + return -EINVAL;
  367. +
  368. + if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq)
  369. + return -ERANGE;
  370. +
  371. + try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, force_scan);
  372. +
  373. + return 0;
  374. +}
  375. +
  376. +static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
  377. + int swappiness, unsigned long nr_to_reclaim)
  378. +{
  379. + DEFINE_MAX_SEQ(lruvec);
  380. +
  381. + if (seq + MIN_NR_GENS > max_seq)
  382. + return -EINVAL;
  383. +
  384. + sc->nr_reclaimed = 0;
  385. +
  386. + while (!signal_pending(current)) {
  387. + DEFINE_MIN_SEQ(lruvec);
  388. +
  389. + if (seq < min_seq[!swappiness])
  390. + return 0;
  391. +
  392. + if (sc->nr_reclaimed >= nr_to_reclaim)
  393. + return 0;
  394. +
  395. + if (!evict_pages(lruvec, sc, swappiness, NULL))
  396. + return 0;
  397. +
  398. + cond_resched();
  399. + }
  400. +
  401. + return -EINTR;
  402. +}
  403. +
  404. +static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq,
  405. + struct scan_control *sc, int swappiness, unsigned long opt)
  406. +{
  407. + struct lruvec *lruvec;
  408. + int err = -EINVAL;
  409. + struct mem_cgroup *memcg = NULL;
  410. +
  411. + if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY))
  412. + return -EINVAL;
  413. +
  414. + if (!mem_cgroup_disabled()) {
  415. + rcu_read_lock();
  416. + memcg = mem_cgroup_from_id(memcg_id);
  417. +#ifdef CONFIG_MEMCG
  418. + if (memcg && !css_tryget(&memcg->css))
  419. + memcg = NULL;
  420. +#endif
  421. + rcu_read_unlock();
  422. +
  423. + if (!memcg)
  424. + return -EINVAL;
  425. + }
  426. +
  427. + if (memcg_id != mem_cgroup_id(memcg))
  428. + goto done;
  429. +
  430. + lruvec = get_lruvec(memcg, nid);
  431. +
  432. + if (swappiness < 0)
  433. + swappiness = get_swappiness(lruvec, sc);
  434. + else if (swappiness > 200)
  435. + goto done;
  436. +
  437. + switch (cmd) {
  438. + case '+':
  439. + err = run_aging(lruvec, seq, sc, swappiness, opt);
  440. + break;
  441. + case '-':
  442. + err = run_eviction(lruvec, seq, sc, swappiness, opt);
  443. + break;
  444. + }
  445. +done:
  446. + mem_cgroup_put(memcg);
  447. +
  448. + return err;
  449. +}
  450. +
  451. +static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
  452. + size_t len, loff_t *pos)
  453. +{
  454. + void *buf;
  455. + char *cur, *next;
  456. + unsigned int flags;
  457. + struct blk_plug plug;
  458. + int err = -EINVAL;
  459. + struct scan_control sc = {
  460. + .may_writepage = true,
  461. + .may_unmap = true,
  462. + .may_swap = true,
  463. + .reclaim_idx = MAX_NR_ZONES - 1,
  464. + .gfp_mask = GFP_KERNEL,
  465. + };
  466. +
  467. + buf = kvmalloc(len + 1, GFP_KERNEL);
  468. + if (!buf)
  469. + return -ENOMEM;
  470. +
  471. + if (copy_from_user(buf, src, len)) {
  472. + kvfree(buf);
  473. + return -EFAULT;
  474. + }
  475. +
  476. + set_task_reclaim_state(current, &sc.reclaim_state);
  477. + flags = memalloc_noreclaim_save();
  478. + blk_start_plug(&plug);
  479. + if (!set_mm_walk(NULL)) {
  480. + err = -ENOMEM;
  481. + goto done;
  482. + }
  483. +
  484. + next = buf;
  485. + next[len] = '\0';
  486. +
  487. + while ((cur = strsep(&next, ",;\n"))) {
  488. + int n;
  489. + int end;
  490. + char cmd;
  491. + unsigned int memcg_id;
  492. + unsigned int nid;
  493. + unsigned long seq;
  494. + unsigned int swappiness = -1;
  495. + unsigned long opt = -1;
  496. +
  497. + cur = skip_spaces(cur);
  498. + if (!*cur)
  499. + continue;
  500. +
  501. + n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid,
  502. + &seq, &end, &swappiness, &end, &opt, &end);
  503. + if (n < 4 || cur[end]) {
  504. + err = -EINVAL;
  505. + break;
  506. + }
  507. +
  508. + err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt);
  509. + if (err)
  510. + break;
  511. + }
  512. +done:
  513. + clear_mm_walk();
  514. + blk_finish_plug(&plug);
  515. + memalloc_noreclaim_restore(flags);
  516. + set_task_reclaim_state(current, NULL);
  517. +
  518. + kvfree(buf);
  519. +
  520. + return err ? : len;
  521. +}
  522. +
  523. +static int lru_gen_seq_open(struct inode *inode, struct file *file)
  524. +{
  525. + return seq_open(file, &lru_gen_seq_ops);
  526. +}
  527. +
  528. +static const struct file_operations lru_gen_rw_fops = {
  529. + .open = lru_gen_seq_open,
  530. + .read = seq_read,
  531. + .write = lru_gen_seq_write,
  532. + .llseek = seq_lseek,
  533. + .release = seq_release,
  534. +};
  535. +
  536. +static const struct file_operations lru_gen_ro_fops = {
  537. + .open = lru_gen_seq_open,
  538. + .read = seq_read,
  539. + .llseek = seq_lseek,
  540. + .release = seq_release,
  541. +};
  542. +
  543. +/******************************************************************************
  544. * initialization
  545. ******************************************************************************/
  546. @@ -5180,6 +5568,9 @@ static int __init init_lru_gen(void)
  547. if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
  548. pr_err("lru_gen: failed to create sysfs group\n");
  549. + debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops);
  550. + debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops);
  551. +
  552. return 0;
  553. };
  554. late_initcall(init_lru_gen);