0010-x86-kvm-Cache-gfn-to-pfn-translation.patch 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
  2. From: Boris Ostrovsky <[email protected]>
  3. Date: Fri, 31 Jan 2020 08:06:42 -0300
  4. Subject: [PATCH] x86/kvm: Cache gfn to pfn translation
  5. CVE-2019-3016
  6. CVE-2020-3016
  7. __kvm_map_gfn()'s call to gfn_to_pfn_memslot() is
  8. * relatively expensive
  9. * in certain cases (such as when done from atomic context) cannot be called
  10. Stashing gfn-to-pfn mapping should help with both cases.
  11. This is part of CVE-2019-3016.
  12. Signed-off-by: Boris Ostrovsky <[email protected]>
  13. Reviewed-by: Joao Martins <[email protected]>
  14. Signed-off-by: Thadeu Lima de Souza Cascardo <[email protected]>
  15. Signed-off-by: Thomas Lamprecht <[email protected]>
  16. ---
  17. arch/x86/include/asm/kvm_host.h | 1 +
  18. arch/x86/kvm/x86.c | 10 ++++
  19. include/linux/kvm_host.h | 7 ++-
  20. include/linux/kvm_types.h | 9 ++-
  21. virt/kvm/kvm_main.c | 98 ++++++++++++++++++++++++++-------
  22. 5 files changed, 103 insertions(+), 22 deletions(-)
  23. diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
  24. index f68e174f452f..7c06343614a4 100644
  25. --- a/arch/x86/include/asm/kvm_host.h
  26. +++ b/arch/x86/include/asm/kvm_host.h
  27. @@ -678,6 +678,7 @@ struct kvm_vcpu_arch {
  28. u64 last_steal;
  29. struct gfn_to_hva_cache stime;
  30. struct kvm_steal_time steal;
  31. + struct gfn_to_pfn_cache cache;
  32. } st;
  33. u64 tsc_offset;
  34. diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
  35. index 92d8e4ebba16..41fee3d359ab 100644
  36. --- a/arch/x86/kvm/x86.c
  37. +++ b/arch/x86/kvm/x86.c
  38. @@ -8936,6 +8936,9 @@ static void fx_init(struct kvm_vcpu *vcpu)
  39. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  40. {
  41. void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
  42. + struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
  43. +
  44. + kvm_release_pfn(cache->pfn, cache->dirty, cache);
  45. kvmclock_reset(vcpu);
  46. @@ -9602,11 +9605,18 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
  47. void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
  48. {
  49. + struct kvm_vcpu *vcpu;
  50. + int i;
  51. +
  52. /*
  53. * memslots->generation has been incremented.
  54. * mmio generation may have reached its maximum value.
  55. */
  56. kvm_mmu_invalidate_mmio_sptes(kvm, gen);
  57. +
  58. + /* Force re-initialization of steal_time cache */
  59. + kvm_for_each_vcpu(i, vcpu, kvm)
  60. + kvm_vcpu_kick(vcpu);
  61. }
  62. int kvm_arch_prepare_memory_region(struct kvm *kvm,
  63. diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
  64. index df4cc0ead363..abfc2fbde957 100644
  65. --- a/include/linux/kvm_host.h
  66. +++ b/include/linux/kvm_host.h
  67. @@ -728,6 +728,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn);
  68. void kvm_set_pfn_accessed(kvm_pfn_t pfn);
  69. void kvm_get_pfn(kvm_pfn_t pfn);
  70. +void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
  71. int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
  72. int len);
  73. int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
  74. @@ -758,10 +759,12 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
  75. kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
  76. kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
  77. int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
  78. -int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map);
  79. +int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
  80. + struct gfn_to_pfn_cache *cache, bool atomic);
  81. struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
  82. void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
  83. -int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
  84. +int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
  85. + struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
  86. unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
  87. unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
  88. int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
  89. diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
  90. index bde5374ae021..2382cb58969d 100644
  91. --- a/include/linux/kvm_types.h
  92. +++ b/include/linux/kvm_types.h
  93. @@ -18,7 +18,7 @@ struct kvm_memslots;
  94. enum kvm_mr_change;
  95. -#include <asm/types.h>
  96. +#include <linux/types.h>
  97. /*
  98. * Address types:
  99. @@ -49,4 +49,11 @@ struct gfn_to_hva_cache {
  100. struct kvm_memory_slot *memslot;
  101. };
  102. +struct gfn_to_pfn_cache {
  103. + u64 generation;
  104. + gfn_t gfn;
  105. + kvm_pfn_t pfn;
  106. + bool dirty;
  107. +};
  108. +
  109. #endif /* __KVM_TYPES_H__ */
  110. diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
  111. index 6614e030ae75..f05e5b5c30e8 100644
  112. --- a/virt/kvm/kvm_main.c
  113. +++ b/virt/kvm/kvm_main.c
  114. @@ -1792,27 +1792,72 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
  115. }
  116. EXPORT_SYMBOL_GPL(gfn_to_page);
  117. +void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
  118. +{
  119. + if (pfn == 0)
  120. + return;
  121. +
  122. + if (cache)
  123. + cache->pfn = cache->gfn = 0;
  124. +
  125. + if (dirty)
  126. + kvm_release_pfn_dirty(pfn);
  127. + else
  128. + kvm_release_pfn_clean(pfn);
  129. +}
  130. +
  131. +static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
  132. + struct gfn_to_pfn_cache *cache, u64 gen)
  133. +{
  134. + kvm_release_pfn(cache->pfn, cache->dirty, cache);
  135. +
  136. + cache->pfn = gfn_to_pfn_memslot(slot, gfn);
  137. + cache->gfn = gfn;
  138. + cache->dirty = false;
  139. + cache->generation = gen;
  140. +}
  141. +
  142. static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
  143. - struct kvm_host_map *map)
  144. + struct kvm_host_map *map,
  145. + struct gfn_to_pfn_cache *cache,
  146. + bool atomic)
  147. {
  148. kvm_pfn_t pfn;
  149. void *hva = NULL;
  150. struct page *page = KVM_UNMAPPED_PAGE;
  151. struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
  152. + u64 gen = slots->generation;
  153. if (!map)
  154. return -EINVAL;
  155. - pfn = gfn_to_pfn_memslot(slot, gfn);
  156. + if (cache) {
  157. + if (!cache->pfn || cache->gfn != gfn ||
  158. + cache->generation != gen) {
  159. + if (atomic)
  160. + return -EAGAIN;
  161. + kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
  162. + }
  163. + pfn = cache->pfn;
  164. + } else {
  165. + if (atomic)
  166. + return -EAGAIN;
  167. + pfn = gfn_to_pfn_memslot(slot, gfn);
  168. + }
  169. if (is_error_noslot_pfn(pfn))
  170. return -EINVAL;
  171. if (pfn_valid(pfn)) {
  172. page = pfn_to_page(pfn);
  173. - hva = kmap(page);
  174. + if (atomic)
  175. + hva = kmap_atomic(page);
  176. + else
  177. + hva = kmap(page);
  178. #ifdef CONFIG_HAS_IOMEM
  179. - } else {
  180. + } else if (!atomic) {
  181. hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
  182. + } else {
  183. + return -EINVAL;
  184. #endif
  185. }
  186. @@ -1827,20 +1872,25 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
  187. return 0;
  188. }
  189. -int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
  190. +int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
  191. + struct gfn_to_pfn_cache *cache, bool atomic)
  192. {
  193. - return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map);
  194. + return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
  195. + cache, atomic);
  196. }
  197. EXPORT_SYMBOL_GPL(kvm_map_gfn);
  198. int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
  199. {
  200. - return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map);
  201. + return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
  202. + NULL, false);
  203. }
  204. EXPORT_SYMBOL_GPL(kvm_vcpu_map);
  205. static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
  206. - struct kvm_host_map *map, bool dirty)
  207. + struct kvm_host_map *map,
  208. + struct gfn_to_pfn_cache *cache,
  209. + bool dirty, bool atomic)
  210. {
  211. if (!map)
  212. return;
  213. @@ -1848,34 +1898,44 @@ static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
  214. if (!map->hva)
  215. return;
  216. - if (map->page != KVM_UNMAPPED_PAGE)
  217. - kunmap(map->page);
  218. + if (map->page != KVM_UNMAPPED_PAGE) {
  219. + if (atomic)
  220. + kunmap_atomic(map->hva);
  221. + else
  222. + kunmap(map->page);
  223. + }
  224. #ifdef CONFIG_HAS_IOMEM
  225. - else
  226. + else if (!atomic)
  227. memunmap(map->hva);
  228. + else
  229. + WARN_ONCE(1, "Unexpected unmapping in atomic context");
  230. #endif
  231. - if (dirty) {
  232. + if (dirty)
  233. mark_page_dirty_in_slot(memslot, map->gfn);
  234. - kvm_release_pfn_dirty(map->pfn);
  235. - } else {
  236. - kvm_release_pfn_clean(map->pfn);
  237. - }
  238. +
  239. + if (cache)
  240. + cache->dirty |= dirty;
  241. + else
  242. + kvm_release_pfn(map->pfn, dirty, NULL);
  243. map->hva = NULL;
  244. map->page = NULL;
  245. }
  246. -int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
  247. +int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
  248. + struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
  249. {
  250. - __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, dirty);
  251. + __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
  252. + cache, dirty, atomic);
  253. return 0;
  254. }
  255. EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
  256. void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
  257. {
  258. - __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, dirty);
  259. + __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
  260. + dirty, false);
  261. }
  262. EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);