aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill@shutemov.name>2020-05-22 15:52:07 +0300
committerMarc Zyngier <maz@kernel.org>2020-09-15 19:24:22 +0100
commit842821613f3ec8a3357ddb6b4b45740bccc38aeb (patch)
tree521333cd5b9ac9621972f3fb49ca7d9446464af4
parent96f446072c8cc0457388d8614170a3293dac45eb (diff)
downloadarm-platforms-842821613f3ec8a3357ddb6b4b45740bccc38aeb.tar.gz
KVM: Protected memory extension
Add infrastructure that handles protected memory extension. Arch-specific code has to provide hypercalls and define non-zero VM_KVM_PROTECTED. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r--include/linux/kvm_host.h4
-rw-r--r--mm/mprotect.c1
-rw-r--r--virt/kvm/kvm_main.c131
3 files changed, 136 insertions, 0 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 97388bac63078..7db56cd119c71 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -701,6 +701,10 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm);
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot);
+int kvm_protect_all_memory(struct kvm *kvm);
+int kvm_protect_memory(struct kvm *kvm,
+ unsigned long gfn, unsigned long npages, bool protect);
+
int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
struct page **pages, int nr_pages);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index ce8b8a5eacbb6..26bd1a1da8750 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -505,6 +505,7 @@ fail:
vm_unacct_memory(charged);
return error;
}
+EXPORT_SYMBOL_GPL(mprotect_fixup);
/*
* pkey==-1 when doing a legacy mprotect()
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 4b6324c7463f4..deccd9cff98ce 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -154,6 +154,8 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
static unsigned long long kvm_createvm_count;
static unsigned long long kvm_active_vms;
+static int protect_memory(unsigned long start, unsigned long end, bool protect);
+
__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
unsigned long start, unsigned long end)
{
@@ -1371,6 +1373,14 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (r)
goto out_bitmap;
+ if (mem->memory_size && kvm->mem_protected) {
+ r = protect_memory(new.userspace_addr,
+ new.userspace_addr + new.npages * PAGE_SIZE,
+ true);
+ if (r)
+ goto out_bitmap;
+ }
+
if (old.dirty_bitmap && !new.dirty_bitmap)
kvm_destroy_dirty_bitmap(&old);
return 0;
@@ -2726,6 +2736,127 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
+static int protect_memory(unsigned long start, unsigned long end, bool protect)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma, *prev;
+ int ret;
+
+ if (down_write_killable(&mm->mmap_sem))
+ return -EINTR;
+
+ ret = -ENOMEM;
+ vma = find_vma(current->mm, start);
+ if (!vma)
+ goto out;
+
+ ret = -EINVAL;
+ if (vma->vm_start > start)
+ goto out;
+
+ if (start > vma->vm_start)
+ prev = vma;
+ else
+ prev = vma->vm_prev;
+
+ ret = 0;
+ while (true) {
+ unsigned long newflags, tmp;
+
+ tmp = vma->vm_end;
+ if (tmp > end)
+ tmp = end;
+
+ newflags = vma->vm_flags;
+ if (protect)
+ newflags |= VM_KVM_PROTECTED;
+ else
+ newflags &= ~VM_KVM_PROTECTED;
+
+ /* The VMA has been handled as part of other memslot */
+ if (newflags == vma->vm_flags)
+ goto next;
+
+ ret = mprotect_fixup(vma, &prev, start, tmp, newflags);
+ if (ret)
+ goto out;
+
+next:
+ start = tmp;
+ if (start < prev->vm_end)
+ start = prev->vm_end;
+
+ if (start >= end)
+ goto out;
+
+ vma = prev->vm_next;
+ if (!vma || vma->vm_start != start) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+out:
+ up_write(&mm->mmap_sem);
+ return ret;
+}
+
+int kvm_protect_memory(struct kvm *kvm,
+ unsigned long gfn, unsigned long npages, bool protect)
+{
+ struct kvm_memory_slot *memslot;
+ unsigned long start, end;
+ gfn_t numpages;
+
+ if (!VM_KVM_PROTECTED)
+ return -KVM_ENOSYS;
+
+ if (!npages)
+ return 0;
+
+ memslot = gfn_to_memslot(kvm, gfn);
+ /* Not backed by memory. It's okay. */
+ if (!memslot)
+ return 0;
+
+ start = gfn_to_hva_many(memslot, gfn, &numpages);
+ end = start + npages * PAGE_SIZE;
+
+ /* XXX: Share range across memory slots? */
+ if (WARN_ON(numpages < npages))
+ return -EINVAL;
+
+ return protect_memory(start, end, protect);
+}
+EXPORT_SYMBOL_GPL(kvm_protect_memory);
+
+int kvm_protect_all_memory(struct kvm *kvm)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+ unsigned long start, end;
+ int i, ret = 0;;
+
+ if (!VM_KVM_PROTECTED)
+ return -KVM_ENOSYS;
+
+ mutex_lock(&kvm->slots_lock);
+ kvm->mem_protected = true;
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ slots = __kvm_memslots(kvm, i);
+ kvm_for_each_memslot(memslot, slots) {
+ start = memslot->userspace_addr;
+ end = start + memslot->npages * PAGE_SIZE;
+ ret = protect_memory(start, end, true);
+ if (ret)
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&kvm->slots_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kvm_protect_all_memory);
+
void kvm_sigset_activate(struct kvm_vcpu *vcpu)
{
if (!vcpu->sigset_active)