0116-RISC-V-Add-arch-functions-to-support-hibernation-sus.patch 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673
  1. From 06f1d699e923c3f09869439cdb603e36302c2611 Mon Sep 17 00:00:00 2001
  2. From: Sia Jee Heng <[email protected]>
  3. Date: Thu, 30 Mar 2023 14:43:21 +0800
  4. Subject: [PATCH 116/122] RISC-V: Add arch functions to support
  5. hibernation/suspend-to-disk
  6. Low level Arch functions were created to support hibernation.
  7. swsusp_arch_suspend() relies code from __cpu_suspend_enter() to write
  8. cpu state onto the stack, then calling swsusp_save() to save the memory
  9. image.
  10. Arch specific hibernation header is implemented and is utilized by the
  11. arch_hibernation_header_restore() and arch_hibernation_header_save()
  12. functions. The arch specific hibernation header consists of satp, hartid,
  13. and the cpu_resume address. The kernel built version is also need to be
  14. saved into the hibernation image header to making sure only the same
  15. kernel is restore when resume.
  16. swsusp_arch_resume() creates a temporary page table that covering only
  17. the linear map. It copies the restore code to a 'safe' page, then start
  18. to restore the memory image. Once completed, it restores the original
  19. kernel's page table. It then calls into __hibernate_cpu_resume()
  20. to restore the CPU context. Finally, it follows the normal hibernation
  21. path back to the hibernation core.
  22. To enable hibernation/suspend to disk into RISCV, the below config
  23. need to be enabled:
  24. - CONFIG_HIBERNATION
  25. - CONFIG_ARCH_HIBERNATION_HEADER
  26. - CONFIG_ARCH_HIBERNATION_POSSIBLE
  27. Signed-off-by: Sia Jee Heng <[email protected]>
  28. Reviewed-by: Ley Foon Tan <[email protected]>
  29. Reviewed-by: Mason Huo <[email protected]>
  30. Reviewed-by: Conor Dooley <[email protected]>
  31. Reviewed-by: Andrew Jones <[email protected]>
  32. ---
  33. arch/riscv/Kconfig | 8 +-
  34. arch/riscv/include/asm/assembler.h | 20 ++
  35. arch/riscv/include/asm/suspend.h | 19 ++
  36. arch/riscv/kernel/Makefile | 1 +
  37. arch/riscv/kernel/asm-offsets.c | 5 +
  38. arch/riscv/kernel/hibernate-asm.S | 77 ++++++
  39. arch/riscv/kernel/hibernate.c | 427 +++++++++++++++++++++++++++++
  40. 7 files changed, 556 insertions(+), 1 deletion(-)
  41. create mode 100644 arch/riscv/kernel/hibernate-asm.S
  42. create mode 100644 arch/riscv/kernel/hibernate.c
  43. --- a/arch/riscv/Kconfig
  44. +++ b/arch/riscv/Kconfig
  45. @@ -52,7 +52,7 @@ config RISCV
  46. select CLONE_BACKWARDS
  47. select CLINT_TIMER if !MMU
  48. select COMMON_CLK
  49. - select CPU_PM if CPU_IDLE
  50. + select CPU_PM if CPU_IDLE || HIBERNATION
  51. select EDAC_SUPPORT
  52. select GENERIC_ARCH_TOPOLOGY
  53. select GENERIC_ATOMIC64 if !64BIT
  54. @@ -715,6 +715,12 @@ menu "Power management options"
  55. source "kernel/power/Kconfig"
  56. +config ARCH_HIBERNATION_POSSIBLE
  57. + def_bool y
  58. +
  59. +config ARCH_HIBERNATION_HEADER
  60. + def_bool HIBERNATION
  61. +
  62. endmenu # "Power management options"
  63. menu "CPU Power Management"
  64. --- a/arch/riscv/include/asm/assembler.h
  65. +++ b/arch/riscv/include/asm/assembler.h
  66. @@ -59,4 +59,24 @@
  67. REG_L s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
  68. .endm
  69. +/*
  70. + * copy_page - copy 1 page (4KB) of data from source to destination
  71. + * @a0 - destination
  72. + * @a1 - source
  73. + */
  74. + .macro copy_page a0, a1
  75. + lui a2, 0x1
  76. + add a2, a2, a0
  77. +1 :
  78. + REG_L t0, 0(a1)
  79. + REG_L t1, SZREG(a1)
  80. +
  81. + REG_S t0, 0(a0)
  82. + REG_S t1, SZREG(a0)
  83. +
  84. + addi a0, a0, 2 * SZREG
  85. + addi a1, a1, 2 * SZREG
  86. + bne a2, a0, 1b
  87. + .endm
  88. +
  89. #endif /* __ASM_ASSEMBLER_H */
  90. --- a/arch/riscv/include/asm/suspend.h
  91. +++ b/arch/riscv/include/asm/suspend.h
  92. @@ -21,6 +21,11 @@ struct suspend_context {
  93. #endif
  94. };
  95. +/*
  96. + * Used by hibernation core and cleared during resume sequence
  97. + */
  98. +extern int in_suspend;
  99. +
  100. /* Low-level CPU suspend entry function */
  101. int __cpu_suspend_enter(struct suspend_context *context);
  102. @@ -36,4 +41,18 @@ int __cpu_resume_enter(unsigned long har
  103. /* Used to save and restore the CSRs */
  104. void suspend_save_csrs(struct suspend_context *context);
  105. void suspend_restore_csrs(struct suspend_context *context);
  106. +
  107. +/* Low-level API to support hibernation */
  108. +int swsusp_arch_suspend(void);
  109. +int swsusp_arch_resume(void);
  110. +int arch_hibernation_header_save(void *addr, unsigned int max_size);
  111. +int arch_hibernation_header_restore(void *addr);
  112. +int __hibernate_cpu_resume(void);
  113. +
  114. +/* Used to resume on the CPU we hibernated on */
  115. +int hibernate_resume_nonboot_cpu_disable(void);
  116. +
  117. +asmlinkage void hibernate_restore_image(unsigned long resume_satp, unsigned long satp_temp,
  118. + unsigned long cpu_resume);
  119. +asmlinkage int hibernate_core_restore_code(void);
  120. #endif
  121. --- a/arch/riscv/kernel/Makefile
  122. +++ b/arch/riscv/kernel/Makefile
  123. @@ -67,6 +67,7 @@ obj-$(CONFIG_MODULES) += module.o
  124. obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
  125. obj-$(CONFIG_CPU_PM) += suspend_entry.o suspend.o
  126. +obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
  127. obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
  128. obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
  129. --- a/arch/riscv/kernel/asm-offsets.c
  130. +++ b/arch/riscv/kernel/asm-offsets.c
  131. @@ -9,6 +9,7 @@
  132. #include <linux/kbuild.h>
  133. #include <linux/mm.h>
  134. #include <linux/sched.h>
  135. +#include <linux/suspend.h>
  136. #include <asm/kvm_host.h>
  137. #include <asm/thread_info.h>
  138. #include <asm/ptrace.h>
  139. @@ -116,6 +117,10 @@ void asm_offsets(void)
  140. OFFSET(SUSPEND_CONTEXT_REGS, suspend_context, regs);
  141. + OFFSET(HIBERN_PBE_ADDR, pbe, address);
  142. + OFFSET(HIBERN_PBE_ORIG, pbe, orig_address);
  143. + OFFSET(HIBERN_PBE_NEXT, pbe, next);
  144. +
  145. OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero);
  146. OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra);
  147. OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp);
  148. --- /dev/null
  149. +++ b/arch/riscv/kernel/hibernate-asm.S
  150. @@ -0,0 +1,77 @@
  151. +/* SPDX-License-Identifier: GPL-2.0-only */
  152. +/*
  153. + * Hibernation low level support for RISCV.
  154. + *
  155. + * Copyright (C) 2023 StarFive Technology Co., Ltd.
  156. + *
  157. + * Author: Jee Heng Sia <[email protected]>
  158. + */
  159. +
  160. +#include <asm/asm.h>
  161. +#include <asm/asm-offsets.h>
  162. +#include <asm/assembler.h>
  163. +#include <asm/csr.h>
  164. +
  165. +#include <linux/linkage.h>
  166. +
  167. +/*
  168. + * int __hibernate_cpu_resume(void)
  169. + * Switch back to the hibernated image's page table prior to restoring the CPU
  170. + * context.
  171. + *
  172. + * Always returns 0
  173. + */
  174. +ENTRY(__hibernate_cpu_resume)
  175. + /* switch to hibernated image's page table. */
  176. + csrw CSR_SATP, s0
  177. + sfence.vma
  178. +
  179. + REG_L a0, hibernate_cpu_context
  180. +
  181. + suspend_restore_csrs
  182. + suspend_restore_regs
  183. +
  184. + /* Return zero value. */
  185. + mv a0, zero
  186. +
  187. + ret
  188. +END(__hibernate_cpu_resume)
  189. +
  190. +/*
  191. + * Prepare to restore the image.
  192. + * a0: satp of saved page tables.
  193. + * a1: satp of temporary page tables.
  194. + * a2: cpu_resume.
  195. + */
  196. +ENTRY(hibernate_restore_image)
  197. + mv s0, a0
  198. + mv s1, a1
  199. + mv s2, a2
  200. + REG_L s4, restore_pblist
  201. + REG_L a1, relocated_restore_code
  202. +
  203. + jalr a1
  204. +END(hibernate_restore_image)
  205. +
  206. +/*
  207. + * The below code will be executed from a 'safe' page.
  208. + * It first switches to the temporary page table, then starts to copy the pages
  209. + * back to the original memory location. Finally, it jumps to __hibernate_cpu_resume()
  210. + * to restore the CPU context.
  211. + */
  212. +ENTRY(hibernate_core_restore_code)
  213. + /* switch to temp page table. */
  214. + csrw satp, s1
  215. + sfence.vma
  216. +.Lcopy:
  217. + /* The below code will restore the hibernated image. */
  218. + REG_L a1, HIBERN_PBE_ADDR(s4)
  219. + REG_L a0, HIBERN_PBE_ORIG(s4)
  220. +
  221. + copy_page a0, a1
  222. +
  223. + REG_L s4, HIBERN_PBE_NEXT(s4)
  224. + bnez s4, .Lcopy
  225. +
  226. + jalr s2
  227. +END(hibernate_core_restore_code)
  228. --- /dev/null
  229. +++ b/arch/riscv/kernel/hibernate.c
  230. @@ -0,0 +1,427 @@
  231. +// SPDX-License-Identifier: GPL-2.0-only
  232. +/*
  233. + * Hibernation support for RISCV
  234. + *
  235. + * Copyright (C) 2023 StarFive Technology Co., Ltd.
  236. + *
  237. + * Author: Jee Heng Sia <[email protected]>
  238. + */
  239. +
  240. +#include <asm/barrier.h>
  241. +#include <asm/cacheflush.h>
  242. +#include <asm/mmu_context.h>
  243. +#include <asm/page.h>
  244. +#include <asm/pgalloc.h>
  245. +#include <asm/pgtable.h>
  246. +#include <asm/sections.h>
  247. +#include <asm/set_memory.h>
  248. +#include <asm/smp.h>
  249. +#include <asm/suspend.h>
  250. +
  251. +#include <linux/cpu.h>
  252. +#include <linux/memblock.h>
  253. +#include <linux/pm.h>
  254. +#include <linux/sched.h>
  255. +#include <linux/suspend.h>
  256. +#include <linux/utsname.h>
  257. +
  258. +/* The logical cpu number we should resume on, initialised to a non-cpu number. */
  259. +static int sleep_cpu = -EINVAL;
  260. +
  261. +/* Pointer to the temporary resume page table. */
  262. +static pgd_t *resume_pg_dir;
  263. +
  264. +/* CPU context to be saved. */
  265. +struct suspend_context *hibernate_cpu_context;
  266. +EXPORT_SYMBOL_GPL(hibernate_cpu_context);
  267. +
  268. +unsigned long relocated_restore_code;
  269. +EXPORT_SYMBOL_GPL(relocated_restore_code);
  270. +
  271. +/**
  272. + * struct arch_hibernate_hdr_invariants - container to store kernel build version.
  273. + * @uts_version: to save the build number and date so that we do not resume with
  274. + * a different kernel.
  275. + */
  276. +struct arch_hibernate_hdr_invariants {
  277. + char uts_version[__NEW_UTS_LEN + 1];
  278. +};
  279. +
  280. +/**
  281. + * struct arch_hibernate_hdr - helper parameters that help us to restore the image.
  282. + * @invariants: container to store kernel build version.
  283. + * @hartid: to make sure same boot_cpu executes the hibernate/restore code.
  284. + * @saved_satp: original page table used by the hibernated image.
  285. + * @restore_cpu_addr: the kernel's image address to restore the CPU context.
  286. + */
  287. +static struct arch_hibernate_hdr {
  288. + struct arch_hibernate_hdr_invariants invariants;
  289. + unsigned long hartid;
  290. + unsigned long saved_satp;
  291. + unsigned long restore_cpu_addr;
  292. +} resume_hdr;
  293. +
  294. +static void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
  295. +{
  296. + memset(i, 0, sizeof(*i));
  297. + memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
  298. +}
  299. +
  300. +/*
  301. + * Check if the given pfn is in the 'nosave' section.
  302. + */
  303. +int pfn_is_nosave(unsigned long pfn)
  304. +{
  305. + unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
  306. + unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
  307. +
  308. + return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn));
  309. +}
  310. +
  311. +void notrace save_processor_state(void)
  312. +{
  313. + WARN_ON(num_online_cpus() != 1);
  314. +}
  315. +
  316. +void notrace restore_processor_state(void)
  317. +{
  318. +}
  319. +
  320. +/*
  321. + * Helper parameters need to be saved to the hibernation image header.
  322. + */
  323. +int arch_hibernation_header_save(void *addr, unsigned int max_size)
  324. +{
  325. + struct arch_hibernate_hdr *hdr = addr;
  326. +
  327. + if (max_size < sizeof(*hdr))
  328. + return -EOVERFLOW;
  329. +
  330. + arch_hdr_invariants(&hdr->invariants);
  331. +
  332. + hdr->hartid = cpuid_to_hartid_map(sleep_cpu);
  333. + hdr->saved_satp = csr_read(CSR_SATP);
  334. + hdr->restore_cpu_addr = (unsigned long)__hibernate_cpu_resume;
  335. +
  336. + return 0;
  337. +}
  338. +EXPORT_SYMBOL_GPL(arch_hibernation_header_save);
  339. +
  340. +/*
  341. + * Retrieve the helper parameters from the hibernation image header.
  342. + */
  343. +int arch_hibernation_header_restore(void *addr)
  344. +{
  345. + struct arch_hibernate_hdr_invariants invariants;
  346. + struct arch_hibernate_hdr *hdr = addr;
  347. + int ret = 0;
  348. +
  349. + arch_hdr_invariants(&invariants);
  350. +
  351. + if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
  352. + pr_crit("Hibernate image not generated by this kernel!\n");
  353. + return -EINVAL;
  354. + }
  355. +
  356. + sleep_cpu = riscv_hartid_to_cpuid(hdr->hartid);
  357. + if (sleep_cpu < 0) {
  358. + pr_crit("Hibernated on a CPU not known to this kernel!\n");
  359. + sleep_cpu = -EINVAL;
  360. + return -EINVAL;
  361. + }
  362. +
  363. +#ifdef CONFIG_SMP
  364. + ret = bringup_hibernate_cpu(sleep_cpu);
  365. + if (ret) {
  366. + sleep_cpu = -EINVAL;
  367. + return ret;
  368. + }
  369. +#endif
  370. + resume_hdr = *hdr;
  371. +
  372. + return ret;
  373. +}
  374. +EXPORT_SYMBOL_GPL(arch_hibernation_header_restore);
  375. +
  376. +int swsusp_arch_suspend(void)
  377. +{
  378. + int ret = 0;
  379. +
  380. + if (__cpu_suspend_enter(hibernate_cpu_context)) {
  381. + sleep_cpu = smp_processor_id();
  382. + suspend_save_csrs(hibernate_cpu_context);
  383. + ret = swsusp_save();
  384. + } else {
  385. + suspend_restore_csrs(hibernate_cpu_context);
  386. + flush_tlb_all();
  387. + flush_icache_all();
  388. +
  389. + /*
  390. + * Tell the hibernation core that we've just restored the memory.
  391. + */
  392. + in_suspend = 0;
  393. + sleep_cpu = -EINVAL;
  394. + }
  395. +
  396. + return ret;
  397. +}
  398. +
  399. +static int temp_pgtable_map_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start,
  400. + unsigned long end, pgprot_t prot)
  401. +{
  402. + pte_t *src_ptep;
  403. + pte_t *dst_ptep;
  404. +
  405. + if (pmd_none(READ_ONCE(*dst_pmdp))) {
  406. + dst_ptep = (pte_t *)get_safe_page(GFP_ATOMIC);
  407. + if (!dst_ptep)
  408. + return -ENOMEM;
  409. +
  410. + pmd_populate_kernel(NULL, dst_pmdp, dst_ptep);
  411. + }
  412. +
  413. + dst_ptep = pte_offset_kernel(dst_pmdp, start);
  414. + src_ptep = pte_offset_kernel(src_pmdp, start);
  415. +
  416. + do {
  417. + pte_t pte = READ_ONCE(*src_ptep);
  418. +
  419. + if (pte_present(pte))
  420. + set_pte(dst_ptep, __pte(pte_val(pte) | pgprot_val(prot)));
  421. + } while (dst_ptep++, src_ptep++, start += PAGE_SIZE, start < end);
  422. +
  423. + return 0;
  424. +}
  425. +
  426. +static int temp_pgtable_map_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start,
  427. + unsigned long end, pgprot_t prot)
  428. +{
  429. + unsigned long next;
  430. + unsigned long ret;
  431. + pmd_t *src_pmdp;
  432. + pmd_t *dst_pmdp;
  433. +
  434. + if (pud_none(READ_ONCE(*dst_pudp))) {
  435. + dst_pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC);
  436. + if (!dst_pmdp)
  437. + return -ENOMEM;
  438. +
  439. + pud_populate(NULL, dst_pudp, dst_pmdp);
  440. + }
  441. +
  442. + dst_pmdp = pmd_offset(dst_pudp, start);
  443. + src_pmdp = pmd_offset(src_pudp, start);
  444. +
  445. + do {
  446. + pmd_t pmd = READ_ONCE(*src_pmdp);
  447. +
  448. + next = pmd_addr_end(start, end);
  449. +
  450. + if (pmd_none(pmd))
  451. + continue;
  452. +
  453. + if (pmd_leaf(pmd)) {
  454. + set_pmd(dst_pmdp, __pmd(pmd_val(pmd) | pgprot_val(prot)));
  455. + } else {
  456. + ret = temp_pgtable_map_pte(dst_pmdp, src_pmdp, start, next, prot);
  457. + if (ret)
  458. + return -ENOMEM;
  459. + }
  460. + } while (dst_pmdp++, src_pmdp++, start = next, start != end);
  461. +
  462. + return 0;
  463. +}
  464. +
  465. +static int temp_pgtable_map_pud(p4d_t *dst_p4dp, p4d_t *src_p4dp, unsigned long start,
  466. + unsigned long end, pgprot_t prot)
  467. +{
  468. + unsigned long next;
  469. + unsigned long ret;
  470. + pud_t *dst_pudp;
  471. + pud_t *src_pudp;
  472. +
  473. + if (p4d_none(READ_ONCE(*dst_p4dp))) {
  474. + dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC);
  475. + if (!dst_pudp)
  476. + return -ENOMEM;
  477. +
  478. + p4d_populate(NULL, dst_p4dp, dst_pudp);
  479. + }
  480. +
  481. + dst_pudp = pud_offset(dst_p4dp, start);
  482. + src_pudp = pud_offset(src_p4dp, start);
  483. +
  484. + do {
  485. + pud_t pud = READ_ONCE(*src_pudp);
  486. +
  487. + next = pud_addr_end(start, end);
  488. +
  489. + if (pud_none(pud))
  490. + continue;
  491. +
  492. + if (pud_leaf(pud)) {
  493. + set_pud(dst_pudp, __pud(pud_val(pud) | pgprot_val(prot)));
  494. + } else {
  495. + ret = temp_pgtable_map_pmd(dst_pudp, src_pudp, start, next, prot);
  496. + if (ret)
  497. + return -ENOMEM;
  498. + }
  499. + } while (dst_pudp++, src_pudp++, start = next, start != end);
  500. +
  501. + return 0;
  502. +}
  503. +
  504. +static int temp_pgtable_map_p4d(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
  505. + unsigned long end, pgprot_t prot)
  506. +{
  507. + unsigned long next;
  508. + unsigned long ret;
  509. + p4d_t *dst_p4dp;
  510. + p4d_t *src_p4dp;
  511. +
  512. + if (pgd_none(READ_ONCE(*dst_pgdp))) {
  513. + dst_p4dp = (p4d_t *)get_safe_page(GFP_ATOMIC);
  514. + if (!dst_p4dp)
  515. + return -ENOMEM;
  516. +
  517. + pgd_populate(NULL, dst_pgdp, dst_p4dp);
  518. + }
  519. +
  520. + dst_p4dp = p4d_offset(dst_pgdp, start);
  521. + src_p4dp = p4d_offset(src_pgdp, start);
  522. +
  523. + do {
  524. + p4d_t p4d = READ_ONCE(*src_p4dp);
  525. +
  526. + next = p4d_addr_end(start, end);
  527. +
  528. + if (p4d_none(p4d))
  529. + continue;
  530. +
  531. + if (p4d_leaf(p4d)) {
  532. + set_p4d(dst_p4dp, __p4d(p4d_val(p4d) | pgprot_val(prot)));
  533. + } else {
  534. + ret = temp_pgtable_map_pud(dst_p4dp, src_p4dp, start, next, prot);
  535. + if (ret)
  536. + return -ENOMEM;
  537. + }
  538. + } while (dst_p4dp++, src_p4dp++, start = next, start != end);
  539. +
  540. + return 0;
  541. +}
  542. +
  543. +static int temp_pgtable_mapping(pgd_t *pgdp, unsigned long start, unsigned long end, pgprot_t prot)
  544. +{
  545. + pgd_t *dst_pgdp = pgd_offset_pgd(pgdp, start);
  546. + pgd_t *src_pgdp = pgd_offset_k(start);
  547. + unsigned long next;
  548. + unsigned long ret;
  549. +
  550. + do {
  551. + pgd_t pgd = READ_ONCE(*src_pgdp);
  552. +
  553. + next = pgd_addr_end(start, end);
  554. +
  555. + if (pgd_none(pgd))
  556. + continue;
  557. +
  558. + if (pgd_leaf(pgd)) {
  559. + set_pgd(dst_pgdp, __pgd(pgd_val(pgd) | pgprot_val(prot)));
  560. + } else {
  561. + ret = temp_pgtable_map_p4d(dst_pgdp, src_pgdp, start, next, prot);
  562. + if (ret)
  563. + return -ENOMEM;
  564. + }
  565. + } while (dst_pgdp++, src_pgdp++, start = next, start != end);
  566. +
  567. + return 0;
  568. +}
  569. +
  570. +static unsigned long relocate_restore_code(void)
  571. +{
  572. + void *page = (void *)get_safe_page(GFP_ATOMIC);
  573. +
  574. + if (!page)
  575. + return -ENOMEM;
  576. +
  577. + copy_page(page, hibernate_core_restore_code);
  578. +
  579. + /* Make the page containing the relocated code executable. */
  580. + set_memory_x((unsigned long)page, 1);
  581. +
  582. + return (unsigned long)page;
  583. +}
  584. +
  585. +int swsusp_arch_resume(void)
  586. +{
  587. + unsigned long end = (unsigned long)pfn_to_virt(max_low_pfn);
  588. + unsigned long start = PAGE_OFFSET;
  589. + int ret;
  590. +
  591. + /*
  592. + * Memory allocated by get_safe_page() will be dealt with by the hibernation core,
  593. + * we don't need to free it here.
  594. + */
  595. + resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
  596. + if (!resume_pg_dir)
  597. + return -ENOMEM;
  598. +
  599. + /*
  600. + * Create a temporary page table and map the whole linear region as executable and
  601. + * writable.
  602. + */
  603. + ret = temp_pgtable_mapping(resume_pg_dir, start, end, __pgprot(_PAGE_WRITE | _PAGE_EXEC));
  604. + if (ret)
  605. + return ret;
  606. +
  607. + /* Move the restore code to a new page so that it doesn't get overwritten by itself. */
  608. + relocated_restore_code = relocate_restore_code();
  609. + if (relocated_restore_code == -ENOMEM)
  610. + return -ENOMEM;
  611. +
  612. + /*
  613. + * Map the __hibernate_cpu_resume() address to the temporary page table so that the
  614. + * restore code can jumps to it after finished restore the image. The next execution
  615. + * code doesn't find itself in a different address space after switching over to the
  616. + * original page table used by the hibernated image.
  617. + * The __hibernate_cpu_resume() mapping is unnecessary for RV32 since the kernel and
  618. + * linear addresses are identical, but different for RV64. To ensure consistency, we
  619. + * map it for both RV32 and RV64 kernels.
  620. + * Additionally, we should ensure that the page is writable before restoring the image.
  621. + */
  622. + start = (unsigned long)resume_hdr.restore_cpu_addr;
  623. + end = start + PAGE_SIZE;
  624. +
  625. + ret = temp_pgtable_mapping(resume_pg_dir, start, end, __pgprot(_PAGE_WRITE));
  626. + if (ret)
  627. + return ret;
  628. +
  629. + hibernate_restore_image(resume_hdr.saved_satp, (PFN_DOWN(__pa(resume_pg_dir)) | satp_mode),
  630. + resume_hdr.restore_cpu_addr);
  631. +
  632. + return 0;
  633. +}
  634. +
  635. +#ifdef CONFIG_PM_SLEEP_SMP
  636. +int hibernate_resume_nonboot_cpu_disable(void)
  637. +{
  638. + if (sleep_cpu < 0) {
  639. + pr_err("Failing to resume from hibernate on an unknown CPU\n");
  640. + return -ENODEV;
  641. + }
  642. +
  643. + return freeze_secondary_cpus(sleep_cpu);
  644. +}
  645. +#endif
  646. +
  647. +static int __init riscv_hibernate_init(void)
  648. +{
  649. + hibernate_cpu_context = kzalloc(sizeof(*hibernate_cpu_context), GFP_KERNEL);
  650. +
  651. + if (WARN_ON(!hibernate_cpu_context))
  652. + return -ENOMEM;
  653. +
  654. + return 0;
  655. +}
  656. +
  657. +early_initcall(riscv_hibernate_init);