820-kvm-support-layerscape.patch 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. From fe22151c95c02c6bb145ea6c3685941e8fb09d60 Mon Sep 17 00:00:00 2001
  2. From: Yangbo Lu <[email protected]>
  3. Date: Thu, 5 Jul 2018 17:43:16 +0800
  4. Subject: [PATCH 32/32] kvm: support layerscape
  5. This is an integrated patch for layerscape kvm support.
  6. Signed-off-by: Laurentiu Tudor <[email protected]>
  7. Signed-off-by: Bharat Bhushan <[email protected]>
  8. Signed-off-by: Ioana Ciornei <[email protected]>
  9. Signed-off-by: Yangbo Lu <[email protected]>
  10. ---
  11. arch/arm/include/asm/kvm_mmu.h | 3 +-
  12. arch/arm/kvm/mmu.c | 56 ++++++++++++++++++++++++++++++--
  13. arch/arm64/include/asm/kvm_mmu.h | 14 ++++++--
  14. virt/kvm/arm/vgic/vgic-its.c | 24 +++++++++++---
  15. virt/kvm/arm/vgic/vgic-v2.c | 3 +-
  16. 5 files changed, 88 insertions(+), 12 deletions(-)
  17. --- a/arch/arm/include/asm/kvm_mmu.h
  18. +++ b/arch/arm/include/asm/kvm_mmu.h
  19. @@ -55,7 +55,8 @@ void stage2_unmap_vm(struct kvm *kvm);
  20. int kvm_alloc_stage2_pgd(struct kvm *kvm);
  21. void kvm_free_stage2_pgd(struct kvm *kvm);
  22. int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
  23. - phys_addr_t pa, unsigned long size, bool writable);
  24. + phys_addr_t pa, unsigned long size, bool writable,
  25. + pgprot_t prot);
  26. int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
  27. --- a/arch/arm/kvm/mmu.c
  28. +++ b/arch/arm/kvm/mmu.c
  29. @@ -1020,9 +1020,11 @@ static int stage2_pmdp_test_and_clear_yo
  30. * @guest_ipa: The IPA at which to insert the mapping
  31. * @pa: The physical address of the device
  32. * @size: The size of the mapping
  33. + * @prot: S2 page translation bits
  34. */
  35. int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
  36. - phys_addr_t pa, unsigned long size, bool writable)
  37. + phys_addr_t pa, unsigned long size, bool writable,
  38. + pgprot_t prot)
  39. {
  40. phys_addr_t addr, end;
  41. int ret = 0;
  42. @@ -1033,7 +1035,7 @@ int kvm_phys_addr_ioremap(struct kvm *kv
  43. pfn = __phys_to_pfn(pa);
  44. for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
  45. - pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
  46. + pte_t pte = pfn_pte(pfn, prot);
  47. if (writable)
  48. pte = kvm_s2pte_mkwrite(pte);
  49. @@ -1057,6 +1059,30 @@ out:
  50. return ret;
  51. }
  52. +#ifdef CONFIG_ARM64
  53. +static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
  54. +{
  55. + switch (pgprot_val(prot) & PTE_ATTRINDX_MASK) {
  56. + case PTE_ATTRINDX(MT_DEVICE_nGnRE):
  57. + case PTE_ATTRINDX(MT_DEVICE_nGnRnE):
  58. + case PTE_ATTRINDX(MT_DEVICE_GRE):
  59. + return PAGE_S2_DEVICE;
  60. + case PTE_ATTRINDX(MT_NORMAL_NC):
  61. + case PTE_ATTRINDX(MT_NORMAL):
  62. + return (pgprot_val(prot) & PTE_SHARED)
  63. + ? PAGE_S2
  64. + : PAGE_S2_NS;
  65. + }
  66. +
  67. + return PAGE_S2_DEVICE;
  68. +}
  69. +#else
  70. +static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
  71. +{
  72. + return PAGE_S2_DEVICE;
  73. +}
  74. +#endif
  75. +
  76. static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
  77. {
  78. kvm_pfn_t pfn = *pfnp;
  79. @@ -1308,6 +1334,19 @@ static int user_mem_abort(struct kvm_vcp
  80. hugetlb = true;
  81. gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
  82. } else {
  83. + if (!is_vm_hugetlb_page(vma)) {
  84. + pte_t *pte;
  85. + spinlock_t *ptl;
  86. + pgprot_t prot;
  87. +
  88. + pte = get_locked_pte(current->mm, memslot->userspace_addr, &ptl);
  89. + prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
  90. + pte_unmap_unlock(pte, ptl);
  91. +#ifdef CONFIG_ARM64
  92. + if (pgprot_val(prot) == pgprot_val(PAGE_S2_NS))
  93. + mem_type = PAGE_S2_NS;
  94. +#endif
  95. + }
  96. /*
  97. * Pages belonging to memslots that don't have the same
  98. * alignment for userspace and IPA cannot be mapped using
  99. @@ -1345,6 +1384,11 @@ static int user_mem_abort(struct kvm_vcp
  100. if (is_error_noslot_pfn(pfn))
  101. return -EFAULT;
  102. +#ifdef CONFIG_ARM64
  103. + if (pgprot_val(mem_type) == pgprot_val(PAGE_S2_NS)) {
  104. + flags |= KVM_S2PTE_FLAG_IS_IOMAP;
  105. + } else
  106. +#endif
  107. if (kvm_is_device_pfn(pfn)) {
  108. mem_type = PAGE_S2_DEVICE;
  109. flags |= KVM_S2PTE_FLAG_IS_IOMAP;
  110. @@ -1882,6 +1926,9 @@ int kvm_arch_prepare_memory_region(struc
  111. gpa_t gpa = mem->guest_phys_addr +
  112. (vm_start - mem->userspace_addr);
  113. phys_addr_t pa;
  114. + pgprot_t prot;
  115. + pte_t *pte;
  116. + spinlock_t *ptl;
  117. pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
  118. pa += vm_start - vma->vm_start;
  119. @@ -1891,10 +1938,13 @@ int kvm_arch_prepare_memory_region(struc
  120. ret = -EINVAL;
  121. goto out;
  122. }
  123. + pte = get_locked_pte(current->mm, mem->userspace_addr, &ptl);
  124. + prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
  125. + pte_unmap_unlock(pte, ptl);
  126. ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
  127. vm_end - vm_start,
  128. - writable);
  129. + writable, prot);
  130. if (ret)
  131. break;
  132. }
  133. --- a/arch/arm64/include/asm/kvm_mmu.h
  134. +++ b/arch/arm64/include/asm/kvm_mmu.h
  135. @@ -167,7 +167,8 @@ void stage2_unmap_vm(struct kvm *kvm);
  136. int kvm_alloc_stage2_pgd(struct kvm *kvm);
  137. void kvm_free_stage2_pgd(struct kvm *kvm);
  138. int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
  139. - phys_addr_t pa, unsigned long size, bool writable);
  140. + phys_addr_t pa, unsigned long size, bool writable,
  141. + pgprot_t prot);
  142. int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
  143. @@ -274,8 +275,15 @@ static inline void __coherent_cache_gues
  144. static inline void __kvm_flush_dcache_pte(pte_t pte)
  145. {
  146. - struct page *page = pte_page(pte);
  147. - kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
  148. + if (pfn_valid(pte_pfn(pte))) {
  149. + struct page *page = pte_page(pte);
  150. + kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
  151. + } else {
  152. + void __iomem *va = ioremap_cache_ns(pte_pfn(pte) << PAGE_SHIFT, PAGE_SIZE);
  153. +
  154. + kvm_flush_dcache_to_poc(va, PAGE_SIZE);
  155. + iounmap(va);
  156. + }
  157. }
  158. static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
  159. --- a/virt/kvm/arm/vgic/vgic-its.c
  160. +++ b/virt/kvm/arm/vgic/vgic-its.c
  161. @@ -176,6 +176,8 @@ static struct its_itte *find_itte(struct
  162. #define GIC_LPI_OFFSET 8192
  163. +#define VITS_TYPER_DEVBITS 17
  164. +
  165. /*
  166. * Finds and returns a collection in the ITS collection table.
  167. * Must be called with the its_lock mutex held.
  168. @@ -375,7 +377,7 @@ static unsigned long vgic_mmio_read_its_
  169. * To avoid memory waste in the guest, we keep the number of IDBits and
  170. * DevBits low - as least for the time being.
  171. */
  172. - reg |= 0x0f << GITS_TYPER_DEVBITS_SHIFT;
  173. + reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
  174. reg |= 0x0f << GITS_TYPER_IDBITS_SHIFT;
  175. return extract_bytes(reg, addr & 7, len);
  176. @@ -601,16 +603,30 @@ static int vgic_its_cmd_handle_movi(stru
  177. * Check whether an ID can be stored into the corresponding guest table.
  178. * For a direct table this is pretty easy, but gets a bit nasty for
  179. * indirect tables. We check whether the resulting guest physical address
  180. - * is actually valid (covered by a memslot and guest accessbible).
  181. + * is actually valid (covered by a memslot and guest accessible).
  182. * For this we have to read the respective first level entry.
  183. */
  184. -static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id)
  185. +static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id)
  186. {
  187. int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
  188. + u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
  189. int index;
  190. - u64 indirect_ptr;
  191. gfn_t gfn;
  192. + switch (type) {
  193. + case GITS_BASER_TYPE_DEVICE:
  194. + if (id >= BIT_ULL(VITS_TYPER_DEVBITS))
  195. + return false;
  196. + break;
  197. + case GITS_BASER_TYPE_COLLECTION:
  198. + /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
  199. + if (id >= BIT_ULL(16))
  200. + return false;
  201. + break;
  202. + default:
  203. + return false;
  204. + }
  205. +
  206. if (!(baser & GITS_BASER_INDIRECT)) {
  207. phys_addr_t addr;
  208. --- a/virt/kvm/arm/vgic/vgic-v2.c
  209. +++ b/virt/kvm/arm/vgic/vgic-v2.c
  210. @@ -290,7 +290,8 @@ int vgic_v2_map_resources(struct kvm *kv
  211. if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
  212. ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
  213. kvm_vgic_global_state.vcpu_base,
  214. - KVM_VGIC_V2_CPU_SIZE, true);
  215. + KVM_VGIC_V2_CPU_SIZE, true,
  216. + PAGE_S2_DEVICE);
  217. if (ret) {
  218. kvm_err("Unable to remap VGIC CPU to VCPU\n");
  219. goto out;