From e217a195d0cf9fb80f6fc072d4bf6b1f612c004b Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Thu, 15 Apr 2021 15:53:14 +0000 Subject: [PATCH 01/35] KVM: SVM: Add KVM_SEV SEND_START command mainline inclusion from mainline-v5.14 commit 4cfdd47d6d95aca4fb8d6cfbe73392472d353f82 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=4cfdd47d6d95aca4fb8d6cfbe73392472d353f82 --------------------------- The command is used to create an outgoing SEV guest encryption context. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Paolo Bonzini Cc: Joerg Roedel Cc: Borislav Petkov Cc: Tom Lendacky Cc: x86@kernel.org Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Steve Rutherford Reviewed-by: Venu Busireddy Signed-off-by: Brijesh Singh Signed-off-by: Ashish Kalra Message-Id: <2f1686d0164e0f1b3d6a41d620408393e0a48376.1618498113.git.ashish.kalra@amd.com> Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- .../virt/kvm/amd-memory-encryption.rst | 30 ++++ arch/x86/kvm/svm/sev.c | 128 ++++++++++++++++++ include/linux/psp-sev.h | 8 +- include/uapi/linux/kvm.h | 12 ++ 4 files changed, 174 insertions(+), 4 deletions(-) diff --git a/Documentation/virt/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/amd-memory-encryption.rst index 09a8f2a34e39..c8b409657378 100644 --- a/Documentation/virt/kvm/amd-memory-encryption.rst +++ b/Documentation/virt/kvm/amd-memory-encryption.rst @@ -263,6 +263,36 @@ Returns: 0 on success, -negative on error __u32 trans_len; }; +11. KVM_SEV_SEND_START +---------------------- + +The KVM_SEV_SEND_START command can be used by the hypervisor to create an +outgoing guest encryption context. + +If session_len is zero on entry, the length of the guest session information is +written to session_len and all other fields are not used. + +Parameters (in): struct kvm_sev_send_start + +Returns: 0 on success, -negative on error + +:: + struct kvm_sev_send_start { + __u32 policy; /* guest policy */ + + __u64 pdh_cert_uaddr; /* platform Diffie-Hellman certificate */ + __u32 pdh_cert_len; + + __u64 plat_certs_uaddr; /* platform certificate chain */ + __u32 plat_certs_len; + + __u64 amd_certs_uaddr; /* AMD certificate */ + __u32 amd_certs_len; + + __u64 session_uaddr; /* Guest session information */ + __u32 session_len; + }; + References ========== diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index babb900e49d8..1ab1e98c3d42 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1074,6 +1074,131 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +/* Userspace wants to query session length. */ +static int +__sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_sev_send_start *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_start *data; + int ret; + + data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); + if (data == NULL) + return -ENOMEM; + + data->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, data, &argp->error); + if (ret < 0) + goto out; + + params->session_len = data->session_len; + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, + sizeof(struct kvm_sev_send_start))) + ret = -EFAULT; + +out: + kfree(data); + return ret; +} + +static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_start *data; + struct kvm_sev_send_start params; + void *amd_certs, *session_data; + void *pdh_cert, *plat_certs; + int ret; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_sev_send_start))) + return -EFAULT; + + /* if session_len is zero, userspace wants to query the session length */ + if (!params.session_len) + return __sev_send_start_query_session_length(kvm, argp, + ¶ms); + + /* some sanity checks */ + if (!params.pdh_cert_uaddr || !params.pdh_cert_len || + !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE) + return -EINVAL; + + /* allocate the memory to hold the session data blob */ + session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT); + if (!session_data) + return -ENOMEM; + + /* copy the certificate blobs from userspace */ + pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr, + params.pdh_cert_len); + if (IS_ERR(pdh_cert)) { + ret = PTR_ERR(pdh_cert); + goto e_free_session; + } + + plat_certs = psp_copy_user_blob(params.plat_certs_uaddr, + params.plat_certs_len); + if (IS_ERR(plat_certs)) { + ret = PTR_ERR(plat_certs); + goto e_free_pdh; + } + + amd_certs = psp_copy_user_blob(params.amd_certs_uaddr, + params.amd_certs_len); + if (IS_ERR(amd_certs)) { + ret = PTR_ERR(amd_certs); + goto e_free_plat_cert; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); + if (data == NULL) { + ret = -ENOMEM; + goto e_free_amd_cert; + } + + /* populate the FW SEND_START field with system physical address */ + data->pdh_cert_address = __psp_pa(pdh_cert); + data->pdh_cert_len = params.pdh_cert_len; + data->plat_certs_address = __psp_pa(plat_certs); + data->plat_certs_len = params.plat_certs_len; + data->amd_certs_address = __psp_pa(amd_certs); + data->amd_certs_len = params.amd_certs_len; + data->session_address = __psp_pa(session_data); + data->session_len = params.session_len; + data->handle = sev->handle; + + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, data, &argp->error); + + if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr, + session_data, params.session_len)) { + ret = -EFAULT; + goto e_free; + } + + params.policy = data->policy; + params.session_len = data->session_len; + if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, + sizeof(struct kvm_sev_send_start))) + ret = -EFAULT; + +e_free: + kfree(data); +e_free_amd_cert: + kfree(amd_certs); +e_free_plat_cert: + kfree(plat_certs); +e_free_pdh: + kfree(pdh_cert); +e_free_session: + kfree(session_data); + return ret; +} + int svm_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1124,6 +1249,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_SEV_LAUNCH_SECRET: r = sev_launch_secret(kvm, &sev_cmd); break; + case KVM_SEV_SEND_START: + r = sev_send_start(kvm, &sev_cmd); + break; default: r = -EINVAL; goto out; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index b7fcd83da6a9..8f498fdbba4f 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -318,11 +318,11 @@ struct sev_data_send_start { u64 pdh_cert_address; /* In */ u32 pdh_cert_len; /* In */ u32 reserved1; - u64 plat_cert_address; /* In */ - u32 plat_cert_len; /* In */ + u64 plat_certs_address; /* In */ + u32 plat_certs_len; /* In */ u32 reserved2; - u64 amd_cert_address; /* In */ - u32 amd_cert_len; /* In */ + u64 amd_certs_address; /* In */ + u32 amd_certs_len; /* In */ u32 reserved3; u64 session_address; /* In */ u32 session_len; /* In/Out */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index f27776e44095..434bf73482b5 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1756,6 +1756,18 @@ struct kvm_sev_dbg { __u32 len; }; +struct kvm_sev_send_start { + __u32 policy; + __u64 pdh_cert_uaddr; + __u32 pdh_cert_len; + __u64 plat_certs_uaddr; + __u32 plat_certs_len; + __u64 amd_certs_uaddr; + __u32 amd_certs_len; + __u64 session_uaddr; + __u32 session_len; +}; + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) -- Gitee From 66b2bcbe9810a71e1e0fa189c0eb938f7e40e75e Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Thu, 15 Apr 2021 15:53:55 +0000 Subject: [PATCH 02/35] KVM: SVM: Add KVM_SEND_UPDATE_DATA command mainline inclusion from mainline-v5.14 commit d3d1af85e2c75bb57da51535a6e182c7c45eceb0 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=d3d1af85e2c75bb57da51535a6e182c7c45eceb0 --------------------------- The command is used for encrypting the guest memory region using the encryption context created with KVM_SEV_SEND_START. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Paolo Bonzini Cc: Joerg Roedel Cc: Borislav Petkov Cc: Tom Lendacky Cc: x86@kernel.org Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Reviewed-by : Steve Rutherford Signed-off-by: Brijesh Singh Signed-off-by: Ashish Kalra Message-Id: Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- .../virt/kvm/amd-memory-encryption.rst | 28 ++++ arch/x86/kvm/svm/sev.c | 125 ++++++++++++++++++ include/uapi/linux/kvm.h | 9 ++ 3 files changed, 162 insertions(+) diff --git a/Documentation/virt/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/amd-memory-encryption.rst index c8b409657378..5b4b8100e789 100644 --- a/Documentation/virt/kvm/amd-memory-encryption.rst +++ b/Documentation/virt/kvm/amd-memory-encryption.rst @@ -293,6 +293,34 @@ Returns: 0 on success, -negative on error __u32 session_len; }; +12. KVM_SEV_SEND_UPDATE_DATA +---------------------------- + +The KVM_SEV_SEND_UPDATE_DATA command can be used by the hypervisor to encrypt the +outgoing guest memory region with the encryption context creating using +KVM_SEV_SEND_START. + +If hdr_len or trans_len are zero on entry, the length of the packet header and +transport region are written to hdr_len and trans_len respectively, and all +other fields are not used. + +Parameters (in): struct kvm_sev_send_update_data + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_launch_send_update_data { + __u64 hdr_uaddr; /* userspace address containing the packet header */ + __u32 hdr_len; + + __u64 guest_uaddr; /* the source memory region to be encrypted */ + __u32 guest_len; + + __u64 trans_uaddr; /* the destination memory region */ + __u32 trans_len; + }; + References ========== diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 1ab1e98c3d42..cfbb507a28cd 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -46,6 +46,7 @@ static DECLARE_RWSEM(sev_deactivate_lock); static DEFINE_MUTEX(sev_bitmap_lock); unsigned int max_sev_asid; static unsigned int min_sev_asid; +static unsigned long sev_me_mask; static unsigned long *sev_asid_bitmap; static unsigned long *sev_reclaim_asid_bitmap; @@ -1199,6 +1200,126 @@ static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +/* Userspace wants to query either header or trans length. */ +static int +__sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_sev_send_update_data *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_data *data; + int ret; + + data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); + if (!data) + return -ENOMEM; + + data->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, data, &argp->error); + if (ret < 0) + goto out; + + params->hdr_len = data->hdr_len; + params->trans_len = data->trans_len; + + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, + sizeof(struct kvm_sev_send_update_data))) + ret = -EFAULT; + +out: + kfree(data); + return ret; +} + +static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_data *data; + struct kvm_sev_send_update_data params; + void *hdr, *trans_data; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_sev_send_update_data))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return __sev_send_update_data_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.guest_uaddr || + !params.guest_len || !params.hdr_uaddr) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if ((params.guest_len + offset > PAGE_SIZE)) + return -EINVAL; + + /* Pin guest memory */ + guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 0); + if (!guest_page) + return -EFAULT; + + /* allocate memory for header and transport buffer */ + ret = -ENOMEM; + hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) + goto e_unpin; + + trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT); + if (!trans_data) + goto e_free_hdr; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_free_trans_data; + + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; + data->trans_address = __psp_pa(trans_data); + data->trans_len = params.trans_len; + + /* The SEND_UPDATE_DATA command requires C-bit to be always set. */ + data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + + offset; + data->guest_address |= sev_me_mask; + data->guest_len = params.guest_len; + data->handle = sev->handle; + + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, data, &argp->error); + + if (ret) + goto e_free; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) { + ret = -EFAULT; + goto e_free; + } + + /* Copy packet header to userspace. */ + ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len); + +e_free: + kfree(data); +e_free_trans_data: + kfree(trans_data); +e_free_hdr: + kfree(hdr); +e_unpin: + sev_unpin_memory(kvm, guest_page, n); + + return ret; +} + int svm_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1252,6 +1373,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_SEV_SEND_START: r = sev_send_start(kvm, &sev_cmd); break; + case KVM_SEV_SEND_UPDATE_DATA: + r = sev_send_update_data(kvm, &sev_cmd); + break; default: r = -EINVAL; goto out; @@ -1456,6 +1580,7 @@ void __init sev_hardware_setup(void) /* Minimum ASID value that should be used for SEV guest */ min_sev_asid = edx; + sev_me_mask = 1UL << (ebx & 0x3f); /* Initialize SEV ASID bitmaps */ sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 434bf73482b5..1d43090832ca 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1768,6 +1768,15 @@ struct kvm_sev_send_start { __u32 session_len; }; +struct kvm_sev_send_update_data { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 guest_uaddr; + __u32 guest_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) -- Gitee From e782a801e74711ee71b077f0a409c8a1b930ae92 Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Thu, 15 Apr 2021 15:54:15 +0000 Subject: [PATCH 03/35] KVM: SVM: Add KVM_SEV_SEND_FINISH command mainline inclusion from mainline-v5.14 commit fddecf6a237ee464db7a1771fad6507d8c180c03 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=fddecf6a237ee464db7a1771fad6507d8c180c03 --------------------------- The command is used to finailize the encryption context created with KVM_SEV_SEND_START command. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Paolo Bonzini Cc: Joerg Roedel Cc: Borislav Petkov Cc: Tom Lendacky Cc: x86@kernel.org Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Steve Rutherford Signed-off-by: Brijesh Singh Signed-off-by: Ashish Kalra Message-Id: <5082bd6a8539d24bc55a1dd63a1b341245bb168f.1618498113.git.ashish.kalra@amd.com> Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- .../virt/kvm/amd-memory-encryption.rst | 8 +++++++ arch/x86/kvm/svm/sev.c | 23 +++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/Documentation/virt/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/amd-memory-encryption.rst index 5b4b8100e789..4f8f05789dad 100644 --- a/Documentation/virt/kvm/amd-memory-encryption.rst +++ b/Documentation/virt/kvm/amd-memory-encryption.rst @@ -321,6 +321,14 @@ Returns: 0 on success, -negative on error __u32 trans_len; }; +13. KVM_SEV_SEND_FINISH +------------------------ + +After completion of the migration flow, the KVM_SEV_SEND_FINISH command can be +issued by the hypervisor to delete the encryption context. + +Returns: 0 on success, -negative on error + References ========== diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index cfbb507a28cd..abce1dc3d2e6 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1320,6 +1320,26 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_finish *data; + int ret; + + if (!sev_guest(kvm)) + return -ENOTTY; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, data, &argp->error); + + kfree(data); + return ret; +} + int svm_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1376,6 +1396,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_SEV_SEND_UPDATE_DATA: r = sev_send_update_data(kvm, &sev_cmd); break; + case KVM_SEV_SEND_FINISH: + r = sev_send_finish(kvm, &sev_cmd); + break; default: r = -EINVAL; goto out; -- Gitee From 483ffff69c3e60474cbcc9182df803250dcd3724 Mon Sep 17 00:00:00 2001 From: Steve Rutherford Date: Tue, 20 Apr 2021 05:01:20 -0400 Subject: [PATCH 04/35] KVM: SVM: Add support for KVM_SEV_SEND_CANCEL command mainline inclusion from mainline-v5.14 commit 5569e2e7a650dfffd4df7635662b2f92162d6501 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=5569e2e7a650dfffd4df7635662b2f92162d6501 --------------------------- After completion of SEND_START, but before SEND_FINISH, the source VMM can issue the SEND_CANCEL command to stop a migration. This is necessary so that a cancelled migration can restart with a new target later. Reviewed-by: Nathan Tempelman Reviewed-by: Brijesh Singh Signed-off-by: Steve Rutherford Message-Id: <20210412194408.2458827-1-srutherford@google.com> Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- .../virt/kvm/amd-memory-encryption.rst | 9 ++++++++ arch/x86/kvm/svm/sev.c | 23 +++++++++++++++++++ drivers/crypto/ccp/sev-dev.c | 1 + include/linux/psp-sev.h | 10 ++++++++ include/uapi/linux/kvm.h | 2 ++ 5 files changed, 45 insertions(+) diff --git a/Documentation/virt/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/amd-memory-encryption.rst index 4f8f05789dad..dacf695f7b3d 100644 --- a/Documentation/virt/kvm/amd-memory-encryption.rst +++ b/Documentation/virt/kvm/amd-memory-encryption.rst @@ -329,6 +329,15 @@ issued by the hypervisor to delete the encryption context. Returns: 0 on success, -negative on error +14. KVM_SEV_SEND_CANCEL +------------------------ + +After completion of SEND_START, but before SEND_FINISH, the source VMM can issue the +SEND_CANCEL command to stop a migration. This is necessary so that a cancelled +migration can restart with a new target later. + +Returns: 0 on success, -negative on error + References ========== diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index abce1dc3d2e6..18448f676173 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1340,6 +1340,26 @@ static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_cancel *data; + int ret; + + if (!sev_guest(kvm)) + return -ENOTTY; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, data, &argp->error); + + kfree(data); + return ret; +} + int svm_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1399,6 +1419,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_SEV_SEND_FINISH: r = sev_send_finish(kvm, &sev_cmd); break; + case KVM_SEV_SEND_CANCEL: + r = sev_send_cancel(kvm, &sev_cmd); + break; default: r = -EINVAL; goto out; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index e1908c99000a..3e2b1acf5570 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -152,6 +152,7 @@ static int sev_cmd_buffer_len(int cmd) case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware); case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id); case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report); + case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel); default: return 0; } diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 8f498fdbba4f..854fc935db47 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -65,6 +65,7 @@ enum sev_cmd { SEV_CMD_SEND_UPDATE_DATA = 0x041, SEV_CMD_SEND_UPDATE_VMSA = 0x042, SEV_CMD_SEND_FINISH = 0x043, + SEV_CMD_SEND_CANCEL = 0x044, /* Guest migration commands (incoming) */ SEV_CMD_RECEIVE_START = 0x050, @@ -384,6 +385,15 @@ struct sev_data_send_finish { u32 handle; /* In */ } __packed; +/** + * struct sev_data_send_cancel - SEND_CANCEL command parameters + * + * @handle: handle of the VM to process + */ +struct sev_data_send_cancel { + u32 handle; /* In */ +} __packed; + /** * struct sev_data_receive_start - RECEIVE_START command parameters * diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 1d43090832ca..c3e8d1cc647d 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1704,6 +1704,8 @@ enum sev_cmd_id { KVM_SEV_DBG_ENCRYPT, /* Guest certificates commands */ KVM_SEV_CERT_EXPORT, + /* Guest Migration Extension */ + KVM_SEV_SEND_CANCEL, KVM_SEV_NR_MAX, }; -- Gitee From 2b5b39eb42310be570dee00851dd1d30535a49f5 Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Thu, 15 Apr 2021 15:54:50 +0000 Subject: [PATCH 05/35] KVM: SVM: Add support for KVM_SEV_RECEIVE_START command mainline inclusion from mainline-v5.14 commit af43cbbf954b50ca97d5e7bb56c2edc6ffd209ef category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=af43cbbf954b50ca97d5e7bb56c2edc6ffd209ef --------------------------- The command is used to create the encryption context for an incoming SEV guest. The encryption context can be later used by the hypervisor to import the incoming data into the SEV guest memory space. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Paolo Bonzini Cc: Joerg Roedel Cc: Borislav Petkov Cc: Tom Lendacky Cc: x86@kernel.org Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Steve Rutherford Signed-off-by: Brijesh Singh Signed-off-by: Ashish Kalra Message-Id: Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- .../virt/kvm/amd-memory-encryption.rst | 29 +++++++ arch/x86/kvm/svm/sev.c | 81 +++++++++++++++++++ include/uapi/linux/kvm.h | 9 +++ 3 files changed, 119 insertions(+) diff --git a/Documentation/virt/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/amd-memory-encryption.rst index dacf695f7b3d..3dd3e672d917 100644 --- a/Documentation/virt/kvm/amd-memory-encryption.rst +++ b/Documentation/virt/kvm/amd-memory-encryption.rst @@ -338,6 +338,35 @@ migration can restart with a new target later. Returns: 0 on success, -negative on error +15. KVM_SEV_RECEIVE_START +------------------------ + +The KVM_SEV_RECEIVE_START command is used for creating the memory encryption +context for an incoming SEV guest. To create the encryption context, the user must +provide a guest policy, the platform public Diffie-Hellman (PDH) key and session +information. + +Parameters: struct kvm_sev_receive_start (in/out) + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_receive_start { + __u32 handle; /* if zero then firmware creates a new handle */ + __u32 policy; /* guest's policy */ + + __u64 pdh_uaddr; /* userspace address pointing to the PDH key */ + __u32 pdh_len; + + __u64 session_uaddr; /* userspace address which points to the guest session information */ + __u32 session_len; + }; + +On success, the 'handle' field contains a new handle and on error, a negative value. + +For more details, see SEV spec Section 6.12. + References ========== diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 18448f676173..6a6c3e799437 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1360,6 +1360,84 @@ static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_receive_start *start; + struct kvm_sev_receive_start params; + int *error = &argp->error; + void *session_data; + void *pdh_data; + int ret; + + if (!sev_guest(kvm)) + return -ENOTTY; + + /* Get parameter from the userspace */ + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_sev_receive_start))) + return -EFAULT; + + /* some sanity checks */ + if (!params.pdh_uaddr || !params.pdh_len || + !params.session_uaddr || !params.session_len) + return -EINVAL; + + pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len); + if (IS_ERR(pdh_data)) + return PTR_ERR(pdh_data); + + session_data = psp_copy_user_blob(params.session_uaddr, + params.session_len); + if (IS_ERR(session_data)) { + ret = PTR_ERR(session_data); + goto e_free_pdh; + } + + ret = -ENOMEM; + start = kzalloc(sizeof(*start), GFP_KERNEL); + if (!start) + goto e_free_session; + + start->handle = params.handle; + start->policy = params.policy; + start->pdh_cert_address = __psp_pa(pdh_data); + start->pdh_cert_len = params.pdh_len; + start->session_address = __psp_pa(session_data); + start->session_len = params.session_len; + + /* create memory encryption context */ + ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, start, + error); + if (ret) + goto e_free; + + /* Bind ASID to this guest */ + ret = sev_bind_asid(kvm, start->handle, error); + if (ret) + goto e_free; + + params.handle = start->handle; + if (copy_to_user((void __user *)(uintptr_t)argp->data, + ¶ms, sizeof(struct kvm_sev_receive_start))) { + ret = -EFAULT; + sev_unbind_asid(kvm, start->handle); + goto e_free; + } + + sev->handle = start->handle; + sev->fd = argp->sev_fd; + +e_free: + kfree(start); +e_free_session: + kfree(session_data); +e_free_pdh: + kfree(pdh_data); + + return ret; +} + int svm_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1422,6 +1500,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_SEV_SEND_CANCEL: r = sev_send_cancel(kvm, &sev_cmd); break; + case KVM_SEV_RECEIVE_START: + r = sev_receive_start(kvm, &sev_cmd); + break; default: r = -EINVAL; goto out; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index c3e8d1cc647d..5b0d4325e2c6 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1779,6 +1779,15 @@ struct kvm_sev_send_update_data { __u32 trans_len; }; +struct kvm_sev_receive_start { + __u32 handle; + __u32 policy; + __u64 pdh_uaddr; + __u32 pdh_len; + __u64 session_uaddr; + __u32 session_len; +}; + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) -- Gitee From cb1ae5653aa0c80f61e8ef8969194e1fc1b48b2f Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Thu, 15 Apr 2021 15:55:17 +0000 Subject: [PATCH 06/35] KVM: SVM: Add KVM_SEV_RECEIVE_UPDATE_DATA command mainline inclusion from mainline-v5.14 commit 15fb7de1a7f5af0d5910ca4352b26f887543e26e category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=15fb7de1a7f5af0d5910ca4352b26f887543e26e --------------------------- The command is used for copying the incoming buffer into the SEV guest memory space. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Paolo Bonzini Cc: Joerg Roedel Cc: Borislav Petkov Cc: Tom Lendacky Cc: x86@kernel.org Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Steve Rutherford Signed-off-by: Brijesh Singh Signed-off-by: Ashish Kalra Message-Id: Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- .../virt/kvm/amd-memory-encryption.rst | 24 ++++++ arch/x86/kvm/svm/sev.c | 79 +++++++++++++++++++ include/uapi/linux/kvm.h | 9 +++ 3 files changed, 112 insertions(+) diff --git a/Documentation/virt/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/amd-memory-encryption.rst index 3dd3e672d917..8ae79af6b57a 100644 --- a/Documentation/virt/kvm/amd-memory-encryption.rst +++ b/Documentation/virt/kvm/amd-memory-encryption.rst @@ -367,6 +367,30 @@ On success, the 'handle' field contains a new handle and on error, a negative va For more details, see SEV spec Section 6.12. +16. KVM_SEV_RECEIVE_UPDATE_DATA +---------------------------- + +The KVM_SEV_RECEIVE_UPDATE_DATA command can be used by the hypervisor to copy +the incoming buffers into the guest memory region with encryption context +created during the KVM_SEV_RECEIVE_START. + +Parameters (in): struct kvm_sev_receive_update_data + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_launch_receive_update_data { + __u64 hdr_uaddr; /* userspace address containing the packet header */ + __u32 hdr_len; + + __u64 guest_uaddr; /* the destination guest memory region */ + __u32 guest_len; + + __u64 trans_uaddr; /* the incoming buffer memory region */ + __u32 trans_len; + }; + References ========== diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 6a6c3e799437..6b3b8956fe49 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1438,6 +1438,82 @@ static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_sev_receive_update_data params; + struct sev_data_receive_update_data *data; + void *hdr = NULL, *trans = NULL; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_sev_receive_update_data))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.guest_uaddr || !params.guest_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if ((params.guest_len + offset > PAGE_SIZE)) + return -EINVAL; + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto e_free_hdr; + } + + ret = -ENOMEM; + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_free_trans; + + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; + data->trans_address = __psp_pa(trans); + data->trans_len = params.trans_len; + + /* Pin guest memory */ + ret = -EFAULT; + guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 0); + if (!guest_page) + goto e_free; + + /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ + data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + + offset; + data->guest_address |= sev_me_mask; + data->guest_len = params.guest_len; + data->handle = sev->handle; + + ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, data, + &argp->error); + + sev_unpin_memory(kvm, guest_page, n); + +e_free: + kfree(data); +e_free_trans: + kfree(trans); +e_free_hdr: + kfree(hdr); + + return ret; +} + int svm_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1503,6 +1579,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_SEV_RECEIVE_START: r = sev_receive_start(kvm, &sev_cmd); break; + case KVM_SEV_RECEIVE_UPDATE_DATA: + r = sev_receive_update_data(kvm, &sev_cmd); + break; default: r = -EINVAL; goto out; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 5b0d4325e2c6..8c82b83a7468 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1788,6 +1788,15 @@ struct kvm_sev_receive_start { __u32 session_len; }; +struct kvm_sev_receive_update_data { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 guest_uaddr; + __u32 guest_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) -- Gitee From 08deb005dc52fb643c065d11241070bb9b9f9fa6 Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Thu, 15 Apr 2021 15:55:40 +0000 Subject: [PATCH 07/35] KVM: SVM: Add KVM_SEV_RECEIVE_FINISH command mainline inclusion from mainline-v5.14 commit 6a443def87d2698f4fa2d7b57e7f4e5f0f61671a category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=6a443def87d2698f4fa2d7b57e7f4e5f0f61671a --------------------------- The command finalize the guest receiving process and make the SEV guest ready for the execution. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Paolo Bonzini Cc: Joerg Roedel Cc: Borislav Petkov Cc: Tom Lendacky Cc: x86@kernel.org Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Steve Rutherford Signed-off-by: Brijesh Singh Signed-off-by: Ashish Kalra Message-Id: Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- .../virt/kvm/amd-memory-encryption.rst | 8 +++++++ arch/x86/kvm/svm/sev.c | 23 +++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/Documentation/virt/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/amd-memory-encryption.rst index 8ae79af6b57a..c767d4579671 100644 --- a/Documentation/virt/kvm/amd-memory-encryption.rst +++ b/Documentation/virt/kvm/amd-memory-encryption.rst @@ -391,6 +391,14 @@ Returns: 0 on success, -negative on error __u32 trans_len; }; +17. KVM_SEV_RECEIVE_FINISH +------------------------ + +After completion of the migration flow, the KVM_SEV_RECEIVE_FINISH command can be +issued by the hypervisor to make the guest ready for execution. + +Returns: 0 on success, -negative on error + References ========== diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 6b3b8956fe49..26f3e368f1e5 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1514,6 +1514,26 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_receive_finish *data; + int ret; + + if (!sev_guest(kvm)) + return -ENOTTY; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, data, &argp->error); + + kfree(data); + return ret; +} + int svm_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1582,6 +1602,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_SEV_RECEIVE_UPDATE_DATA: r = sev_receive_update_data(kvm, &sev_cmd); break; + case KVM_SEV_RECEIVE_FINISH: + r = sev_receive_finish(kvm, &sev_cmd); + break; default: r = -EINVAL; goto out; -- Gitee From a8fbecdb8a0cab40b8d0e9724d90c42f772fd937 Mon Sep 17 00:00:00 2001 From: Ashish Kalra Date: Tue, 8 Jun 2021 18:05:43 +0000 Subject: [PATCH 08/35] KVM: X86: Introduce KVM_HC_MAP_GPA_RANGE hypercall mainline inclusion from mainline-v5.14 commit 0dbb11230437895f7cd6fc55da61cef011e997d8 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=0dbb11230437895f7cd6fc55da61cef011e997d8 --------------------------- This hypercall is used by the SEV guest to notify a change in the page encryption status to the hypervisor. The hypercall should be invoked only when the encryption attribute is changed from encrypted -> decrypted and vice versa. By default all guest pages are considered encrypted. The hypercall exits to userspace to manage the guest shared regions and integrate with the userspace VMM's migration code. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Paolo Bonzini Cc: Joerg Roedel Cc: Borislav Petkov Cc: Tom Lendacky Cc: x86@kernel.org Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Steve Rutherford Signed-off-by: Brijesh Singh Signed-off-by: Ashish Kalra Co-developed-by: Sean Christopherson Signed-off-by: Sean Christopherson Co-developed-by: Paolo Bonzini Signed-off-by: Paolo Bonzini Message-Id: <90778988e1ee01926ff9cac447aacb745f954c8c.1623174621.git.ashish.kalra@amd.com> Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- Documentation/virt/kvm/api.rst | 19 +++++++++++ Documentation/virt/kvm/cpuid.rst | 7 ++++ Documentation/virt/kvm/hypercalls.rst | 21 ++++++++++++ Documentation/virt/kvm/msr.rst | 13 ++++++++ arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/include/uapi/asm/kvm_para.h | 13 ++++++++ arch/x86/kvm/x86.c | 46 +++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 1 + include/uapi/linux/kvm_para.h | 1 + 9 files changed, 123 insertions(+) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 5d55487e2eb2..3b83d70113bf 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6880,6 +6880,25 @@ This capability indicates that the KVM virtual PTP service is supported in the host. A VMM can check whether the service is available to the guest on migration. +8.34 KVM_CAP_EXIT_HYPERCALL +--------------------------- + +:Capability: KVM_CAP_EXIT_HYPERCALL +:Architectures: x86 +:Type: vm + +This capability, if enabled, will cause KVM to exit to userspace +with KVM_EXIT_HYPERCALL exit reason to process some hypercalls. + +Calling KVM_CHECK_EXTENSION for this capability will return a bitmask +of hypercalls that can be configured to exit to userspace. +Right now, the only such hypercall is KVM_HC_MAP_GPA_RANGE. + +The argument to KVM_ENABLE_CAP is also a bitmask, and must be a subset +of the result of KVM_CHECK_EXTENSION. KVM will forward to userspace +the hypercalls whose corresponding bit is in the argument, and return +ENOSYS for the others. + 9. Known KVM API problems ========================= diff --git a/Documentation/virt/kvm/cpuid.rst b/Documentation/virt/kvm/cpuid.rst index cf62162d4be2..bda3e3e737d7 100644 --- a/Documentation/virt/kvm/cpuid.rst +++ b/Documentation/virt/kvm/cpuid.rst @@ -96,6 +96,13 @@ KVM_FEATURE_MSI_EXT_DEST_ID 15 guest checks this feature bit before using extended destination ID bits in MSI address bits 11-5. +KVM_FEATURE_HC_MAP_GPA_RANGE 16 guest checks this feature bit before + using the map gpa range hypercall + to notify the page state change + +KVM_FEATURE_MIGRATION_CONTROL 17 guest checks this feature bit before + using MSR_KVM_MIGRATION_CONTROL + KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24 host will warn if no guest-side per-cpu warps are expected in kvmclock diff --git a/Documentation/virt/kvm/hypercalls.rst b/Documentation/virt/kvm/hypercalls.rst index ed4fddd364ea..e56fa8b9cfca 100644 --- a/Documentation/virt/kvm/hypercalls.rst +++ b/Documentation/virt/kvm/hypercalls.rst @@ -169,3 +169,24 @@ a0: destination APIC ID :Usage example: When sending a call-function IPI-many to vCPUs, yield if any of the IPI target vCPUs was preempted. + +8. KVM_HC_MAP_GPA_RANGE +------------------------- +:Architecture: x86 +:Status: active +:Purpose: Request KVM to map a GPA range with the specified attributes. + +a0: the guest physical address of the start page +a1: the number of (4kb) pages (must be contiguous in GPA space) +a2: attributes + + Where 'attributes' : + * bits 3:0 - preferred page size encoding 0 = 4kb, 1 = 2mb, 2 = 1gb, etc... + * bit 4 - plaintext = 0, encrypted = 1 + * bits 63:5 - reserved (must be zero) + +**Implementation note**: this hypercall is implemented in userspace via +the KVM_CAP_EXIT_HYPERCALL capability. Userspace must enable that capability +before advertising KVM_FEATURE_HC_MAP_GPA_RANGE in the guest CPUID. In +addition, if the guest supports KVM_FEATURE_MIGRATION_CONTROL, userspace +must also set up an MSR filter to process writes to MSR_KVM_MIGRATION_CONTROL. diff --git a/Documentation/virt/kvm/msr.rst b/Documentation/virt/kvm/msr.rst index e37a14c323d2..9315fc385fb0 100644 --- a/Documentation/virt/kvm/msr.rst +++ b/Documentation/virt/kvm/msr.rst @@ -376,3 +376,16 @@ data: write '1' to bit 0 of the MSR, this causes the host to re-scan its queue and check if there are more notifications pending. The MSR is available if KVM_FEATURE_ASYNC_PF_INT is present in CPUID. + +MSR_KVM_MIGRATION_CONTROL: + 0x4b564d08 + +data: + This MSR is available if KVM_FEATURE_MIGRATION_CONTROL is present in + CPUID. Bit 0 represents whether live migration of the guest is allowed. + + When a guest is started, bit 0 will be 0 if the guest has encrypted + memory and 1 if the guest does not have encrypted memory. If the + guest is communicating page encryption status to the host using the + ``KVM_HC_MAP_GPA_RANGE`` hypercall, it can set bit 0 in this MSR to + allow live migration of the guest. diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index ce1792de39ca..9838585a8f32 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1083,6 +1083,8 @@ struct kvm_arch { struct kvm_x86_msr_filter __rcu *msr_filter; + u32 hypercall_exit_enabled; + struct kvm_pmu_event_filter *pmu_event_filter; struct task_struct *nx_huge_page_recovery_thread; diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h index 66ffdbdd3c81..6e64b27b2c1e 100644 --- a/arch/x86/include/uapi/asm/kvm_para.h +++ b/arch/x86/include/uapi/asm/kvm_para.h @@ -34,6 +34,8 @@ #define KVM_FEATURE_PV_SCHED_YIELD 13 #define KVM_FEATURE_ASYNC_PF_INT 14 #define KVM_FEATURE_MSI_EXT_DEST_ID 15 +#define KVM_FEATURE_HC_MAP_GPA_RANGE 16 +#define KVM_FEATURE_MIGRATION_CONTROL 17 #define KVM_HINTS_REALTIME 0 @@ -55,6 +57,7 @@ #define MSR_KVM_POLL_CONTROL 0x4b564d05 #define MSR_KVM_ASYNC_PF_INT 0x4b564d06 #define MSR_KVM_ASYNC_PF_ACK 0x4b564d07 +#define MSR_KVM_MIGRATION_CONTROL 0x4b564d08 struct kvm_steal_time { __u64 steal; @@ -91,6 +94,16 @@ struct kvm_clock_pairing { /* MSR_KVM_ASYNC_PF_INT */ #define KVM_ASYNC_PF_VEC_MASK GENMASK(7, 0) +/* MSR_KVM_MIGRATION_CONTROL */ +#define KVM_MIGRATION_READY (1 << 0) + +/* KVM_HC_MAP_GPA_RANGE */ +#define KVM_MAP_GPA_RANGE_PAGE_SZ_4K 0 +#define KVM_MAP_GPA_RANGE_PAGE_SZ_2M (1 << 0) +#define KVM_MAP_GPA_RANGE_PAGE_SZ_1G (1 << 1) +#define KVM_MAP_GPA_RANGE_ENC_STAT(n) (n << 4) +#define KVM_MAP_GPA_RANGE_ENCRYPTED KVM_MAP_GPA_RANGE_ENC_STAT(1) +#define KVM_MAP_GPA_RANGE_DECRYPTED KVM_MAP_GPA_RANGE_ENC_STAT(0) /* Operations for KVM_HC_MMU_OP */ #define KVM_MMU_OP_WRITE_PTE 1 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index af6c5d98bdc5..9f60cb550c55 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -108,6 +108,8 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS; +#define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE) + #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \ KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) @@ -4226,6 +4228,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = sizeof(struct kvm_xsave); break; } + case KVM_CAP_EXIT_HYPERCALL: + r = KVM_EXIT_HYPERCALL_VALID_MASK; + break; case KVM_CAP_X86_NOTIFY_VMEXIT: r = kvm_has_notify_vmexit; break; @@ -5820,6 +5825,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, } mutex_unlock(&kvm->lock); break; + case KVM_CAP_EXIT_HYPERCALL: + if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) { + r = -EINVAL; + break; + } + kvm->arch.hypercall_exit_enabled = cap->args[0]; + r = 0; + break; default: r = -EINVAL; break; @@ -8763,6 +8776,17 @@ static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id) kvm_vcpu_yield_to(target); } +static int complete_hypercall_exit(struct kvm_vcpu *vcpu) +{ + u64 ret = vcpu->run->hypercall.ret; + + if (!is_64_bit_mode(vcpu)) + ret = (u32)ret; + kvm_rax_write(vcpu, ret); + ++vcpu->stat.hypercalls; + return kvm_skip_emulated_instruction(vcpu); +} + int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) { unsigned long nr, a0, a1, a2, a3, ret; @@ -8831,6 +8855,28 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) if (is_x86_vendor_hygon() && kvm_x86_ops.vm_attestation) ret = kvm_x86_ops.vm_attestation(vcpu->kvm, a0, a1); break; + case KVM_HC_MAP_GPA_RANGE: { + u64 gpa = a0, npages = a1, attrs = a2; + + ret = -KVM_ENOSYS; + if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE))) + break; + + if (!PAGE_ALIGNED(gpa) || !npages || + gpa_to_gfn(gpa) + npages <= gpa_to_gfn(gpa)) { + ret = -KVM_EINVAL; + break; + } + + vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; + vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE; + vcpu->run->hypercall.args[0] = gpa; + vcpu->run->hypercall.args[1] = npages; + vcpu->run->hypercall.args[2] = attrs; + vcpu->run->hypercall.longmode = op_64_bit; + vcpu->arch.complete_userspace_io = complete_hypercall_exit; + return 0; + } default: ret = -KVM_ENOSYS; break; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 8c82b83a7468..f073f15dc70b 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1093,6 +1093,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_X86_BUS_LOCK_EXIT 193 #define KVM_CAP_SGX_ATTRIBUTE 196 #define KVM_CAP_PTP_KVM 198 +#define KVM_CAP_EXIT_HYPERCALL 201 #define KVM_CAP_XSAVE2 208 #define KVM_CAP_SYS_ATTRIBUTES 209 #define KVM_CAP_X86_TRIPLE_FAULT_EVENT 218 diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h index f32bf58c292c..67192835455e 100644 --- a/include/uapi/linux/kvm_para.h +++ b/include/uapi/linux/kvm_para.h @@ -29,6 +29,7 @@ #define KVM_HC_CLOCK_PAIRING 9 #define KVM_HC_SEND_IPI 10 #define KVM_HC_SCHED_YIELD 11 +#define KVM_HC_MAP_GPA_RANGE 12 #define KVM_HC_VM_ATTESTATION 100 /* Specific to Hygon CPU */ /* -- Gitee From bcd160e25ee8fc86835c83de632a02b323bd39a9 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 6 May 2021 10:58:25 -0700 Subject: [PATCH 09/35] KVM: SVM: Return -EFAULT if copy_to_user() for SEV mig packet header fails mainline inclusion from mainline-v5.14 commit b4a693924aab93f3747465b2261add46c82c3220 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=b4a693924aab93f3747465b2261add46c82c3220 --------------------------- Return -EFAULT if copy_to_user() fails; if accessing user memory faults, copy_to_user() returns the number of bytes remaining, not an error code. Reported-by: Dan Carpenter Cc: Steve Rutherford Cc: Brijesh Singh Cc: Ashish Kalra Fixes: d3d1af85e2c7 ("KVM: SVM: Add KVM_SEND_UPDATE_DATA command") Signed-off-by: Sean Christopherson Message-Id: <20210506175826.2166383-2-seanjc@google.com> Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- arch/x86/kvm/svm/sev.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 26f3e368f1e5..4b0987e186fd 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1305,8 +1305,9 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) } /* Copy packet header to userspace. */ - ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, - params.hdr_len); + if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len)) + ret = -EFAULT; e_free: kfree(data); -- Gitee From c62608e1965ce4f1e3eb120fb771c7e018d7a335 Mon Sep 17 00:00:00 2001 From: Mingwei Zhang Date: Sun, 12 Sep 2021 18:18:15 +0000 Subject: [PATCH 10/35] KVM: SVM: fix missing sev_decommission in sev_receive_start mainline inclusion from mainline-v5.14 commit f1815e0aa770f2127c5df31eb5c2f0e37b60fa77 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=f1815e0aa770f2127c5df31eb5c2f0e37b60fa77 --------------------------- DECOMMISSION the current SEV context if binding an ASID fails after RECEIVE_START. Per AMD's SEV API, RECEIVE_START generates a new guest context and thus needs to be paired with DECOMMISSION: The RECEIVE_START command is the only command other than the LAUNCH_START command that generates a new guest context and guest handle. The missing DECOMMISSION can result in subsequent SEV launch failures, as the firmware leaks memory and might not able to allocate more SEV guest contexts in the future. Note, LAUNCH_START suffered the same bug, but was previously fixed by commit 934002cd660b ("KVM: SVM: Call SEV Guest Decommission if ASID binding fails"). Cc: Alper Gun Cc: Borislav Petkov Cc: Brijesh Singh Cc: David Rienjes Cc: Marc Orr Cc: John Allen Cc: Peter Gonda Cc: Sean Christopherson Cc: Tom Lendacky Cc: Vipin Sharma Cc: stable@vger.kernel.org Reviewed-by: Marc Orr Acked-by: Brijesh Singh Fixes: af43cbbf954b ("KVM: SVM: Add support for KVM_SEV_RECEIVE_START command") Signed-off-by: Mingwei Zhang Reviewed-by: Sean Christopherson Message-Id: <20210912181815.3899316-1-mizhang@google.com> Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- arch/x86/kvm/svm/sev.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 4b0987e186fd..2496bb9bf7ba 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1415,8 +1415,10 @@ static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp) /* Bind ASID to this guest */ ret = sev_bind_asid(kvm, start->handle, error); - if (ret) + if (ret) { + sev_decommission(start->handle); goto e_free; + } params.handle = start->handle; if (copy_to_user((void __user *)(uintptr_t)argp->data, -- Gitee From dc91bacb4843016d7c3b232ffd5f2d8b21a59f6f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 14 Sep 2021 14:09:50 -0700 Subject: [PATCH 11/35] KVM: SEV: Pin guest memory for write for RECEIVE_UPDATE_DATA mainline inclusion from mainline-v5.14 commit 50c038018d6be20361e8a2890262746a4ac5b11f category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=50c038018d6be20361e8a2890262746a4ac5b11f --------------------------- Require the target guest page to be writable when pinning memory for RECEIVE_UPDATE_DATA. Per the SEV API, the PSP writes to guest memory: The result is then encrypted with GCTX.VEK and written to the memory pointed to by GUEST_PADDR field. Fixes: 15fb7de1a7f5 ("KVM: SVM: Add KVM_SEV_RECEIVE_UPDATE_DATA command") Cc: stable@vger.kernel.org Cc: Peter Gonda Cc: Marc Orr Cc: Tom Lendacky Cc: Brijesh Singh Signed-off-by: Sean Christopherson Message-Id: <20210914210951.2994260-2-seanjc@google.com> Reviewed-by: Brijesh Singh Reviewed-by: Peter Gonda Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- arch/x86/kvm/svm/sev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 2496bb9bf7ba..b3e4a5e12ce0 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1491,7 +1491,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) /* Pin guest memory */ ret = -EFAULT; guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, - PAGE_SIZE, &n, 0); + PAGE_SIZE, &n, 1); if (!guest_page) goto e_free; -- Gitee From cd750826cce28d77af6f8cf1493331a88e667c4f Mon Sep 17 00:00:00 2001 From: Masahiro Kozuka Date: Tue, 14 Sep 2021 14:09:51 -0700 Subject: [PATCH 12/35] KVM: SEV: Flush cache on non-coherent systems before RECEIVE_UPDATE_DATA mainline inclusion from mainline-v5.14 commit c8c340a9b4149fe5caa433f3b62463a1c8e07a46 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=c8c340a9b4149fe5caa433f3b62463a1c8e07a46 --------------------------- Flush the destination page before invoking RECEIVE_UPDATE_DATA, as the PSP encrypts the data with the guest's key when writing to guest memory. If the target memory was not previously encrypted, the cache may contain dirty, unecrypted data that will persist on non-coherent systems. Fixes: 15fb7de1a7f5 ("KVM: SVM: Add KVM_SEV_RECEIVE_UPDATE_DATA command") Cc: stable@vger.kernel.org Cc: Peter Gonda Cc: Marc Orr Cc: Tom Lendacky Cc: Brijesh Singh Signed-off-by: Masahiro Kozuka [sean: converted bug report to changelog] Signed-off-by: Sean Christopherson Signed-off-by: Paolo Bonzini Message-Id: <20210914210951.2994260-3-seanjc@google.com> Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- arch/x86/kvm/svm/sev.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index b3e4a5e12ce0..cdae13092180 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1495,6 +1495,13 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) if (!guest_page) goto e_free; + /* + * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP + * encrypts the written data with the guest's key, and the cache may + * contain dirty, unencrypted data. + */ + sev_clflush_pages(guest_page, n); + /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset; -- Gitee From 9219167f06e3547e8ffb0029cd05880a2443752f Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Tue, 24 Aug 2021 11:04:35 +0000 Subject: [PATCH 13/35] x86/kvm: Add AMD SEV specific Hypercall3 mainline inclusion from mainline-v5.14 commit 08c2336df78d01fd4d634b14262ea739c399ddbd category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=08c2336df78d01fd4d634b14262ea739c399ddbd --------------------------- KVM hypercall framework relies on alternative framework to patch the VMCALL -> VMMCALL on AMD platform. If a hypercall is made before apply_alternative() is called then it defaults to VMCALL. The approach works fine on non SEV guest. A VMCALL would causes #UD, and hypervisor will be able to decode the instruction and do the right things. But when SEV is active, guest memory is encrypted with guest key and hypervisor will not be able to decode the instruction bytes. To highlight the need to provide this interface, capturing the flow of apply_alternatives() : setup_arch() call init_hypervisor_platform() which detects the hypervisor platform the kernel is running under and then the hypervisor specific initialization code can make early hypercalls. For example, KVM specific initialization in case of SEV will try to mark the "__bss_decrypted" section's encryption state via early page encryption status hypercalls. Now, apply_alternatives() is called much later when setup_arch() calls check_bugs(), so we do need some kind of an early, pre-alternatives hypercall interface. Other cases of pre-alternatives hypercalls include marking per-cpu GHCB pages as decrypted on SEV-ES and per-cpu apf_reason, steal_time and kvm_apic_eoi as decrypted for SEV generally. Add SEV specific hypercall3, it unconditionally uses VMMCALL. The hypercall will be used by the SEV guest to notify encrypted pages to the hypervisor. This kvm_sev_hypercall3() function is abstracted and used as follows : All these early hypercalls are made through early_set_memory_XX() interfaces, which in turn invoke pv_ops (paravirt_ops). This early_set_memory_XX() -> pv_ops.mmu.notify_page_enc_status_changed() is a generic interface and can easily have SEV, TDX and any other future platform specific abstractions added to it. Currently, pv_ops.mmu.notify_page_enc_status_changed() callback is setup to invoke kvm_sev_hypercall3() in case of SEV. Similarly, in case of TDX, pv_ops.mmu.notify_page_enc_status_changed() can be setup to a TDX specific callback. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Paolo Bonzini Cc: Joerg Roedel Cc: Borislav Petkov Cc: Tom Lendacky Cc: x86@kernel.org Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Steve Rutherford Reviewed-by: Venu Busireddy Signed-off-by: Brijesh Singh Signed-off-by: Ashish Kalra Message-Id: <6fd25c749205dd0b1eb492c60d41b124760cc6ae.1629726117.git.ashish.kalra@amd.com> Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- arch/x86/include/asm/kvm_para.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 69299878b200..56935ebb1dfe 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -83,6 +83,18 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, return ret; } +static inline long kvm_sev_hypercall3(unsigned int nr, unsigned long p1, + unsigned long p2, unsigned long p3) +{ + long ret; + + asm volatile("vmmcall" + : "=a"(ret) + : "a"(nr), "b"(p1), "c"(p2), "d"(p3) + : "memory"); + return ret; +} + #ifdef CONFIG_KVM_GUEST void kvmclock_init(void); void kvmclock_disable(void); -- Gitee From 8f4b21599b75cb441a18da9f2105fa6174dddc55 Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Tue, 24 Aug 2021 11:05:00 +0000 Subject: [PATCH 14/35] mm: x86: Invoke hypercall when page encryption status is changed mainline inclusion from mainline-v5.14 commit 064ce6c550a0630789978bfec7a13ab2bd1bdcdf category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=064ce6c550a0630789978bfec7a13ab2bd1bdcdf --------------------------- Invoke a hypercall when a memory region is changed from encrypted -> decrypted and vice versa. Hypervisor needs to know the page encryption status during the guest migration. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Paolo Bonzini Cc: Joerg Roedel Cc: Borislav Petkov Cc: Tom Lendacky Cc: x86@kernel.org Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Steve Rutherford Reviewed-by: Venu Busireddy Signed-off-by: Brijesh Singh Signed-off-by: Ashish Kalra Reviewed-by: Borislav Petkov Message-Id: <0a237d5bb08793916c7790a3e653a2cbe7485761.1629726117.git.ashish.kalra@amd.com> Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- arch/x86/include/asm/paravirt.h | 6 +++ arch/x86/include/asm/paravirt_types.h | 1 + arch/x86/include/asm/set_memory.h | 1 + arch/x86/kernel/paravirt.c | 1 + arch/x86/mm/mem_encrypt.c | 67 +++++++++++++++++++++++---- arch/x86/mm/pat/set_memory.c | 6 +++ 6 files changed, 73 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index aa5aba325e30..0740fe7d0def 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -84,6 +84,12 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) PVOP_VCALL1(mmu.exit_mmap, mm); } +static inline void notify_page_enc_status_changed(unsigned long pfn, + int npages, bool enc) +{ + PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc); +} + #ifdef CONFIG_PARAVIRT_XXL static inline void load_sp0(unsigned long sp0) { diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index b30b56d47619..2cf082ff733f 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -212,6 +212,7 @@ struct pv_mmu_ops { /* Hook for intercepting the destruction of an mm_struct. */ void (*exit_mmap)(struct mm_struct *mm); + void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc); #ifdef CONFIG_PARAVIRT_XXL struct paravirt_callee_save read_cr2; diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h index ef47246d22f5..e7b23e574d1a 100644 --- a/arch/x86/include/asm/set_memory.h +++ b/arch/x86/include/asm/set_memory.h @@ -83,6 +83,7 @@ int set_pages_rw(struct page *page, int numpages); int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_default_noflush(struct page *page); +void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc); extern int kernel_set_to_readonly; diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 2da5c225bf04..2cc14ed576ca 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -328,6 +328,7 @@ struct paravirt_patch_template pv_ops = { (void (*)(struct mmu_gather *, void *))tlb_remove_page, .mmu.exit_mmap = paravirt_nop, + .mmu.notify_page_enc_status_changed = paravirt_nop, #ifdef CONFIG_PARAVIRT_XXL .mmu.read_cr2 = __PV_IS_CALLEE_SAVE(native_read_cr2), diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 554108da2a41..fad3835ca6a3 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -199,29 +199,76 @@ void __init sme_early_init(void) swiotlb_force = SWIOTLB_FORCE; } -static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) +static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot) { - pgprot_t old_prot, new_prot; - unsigned long pfn, pa, size; - pte_t new_pte; + unsigned long pfn = 0; + pgprot_t prot; switch (level) { case PG_LEVEL_4K: pfn = pte_pfn(*kpte); - old_prot = pte_pgprot(*kpte); + prot = pte_pgprot(*kpte); break; case PG_LEVEL_2M: pfn = pmd_pfn(*(pmd_t *)kpte); - old_prot = pmd_pgprot(*(pmd_t *)kpte); + prot = pmd_pgprot(*(pmd_t *)kpte); break; case PG_LEVEL_1G: pfn = pud_pfn(*(pud_t *)kpte); - old_prot = pud_pgprot(*(pud_t *)kpte); + prot = pud_pgprot(*(pud_t *)kpte); break; default: - return; + WARN_ONCE(1, "Invalid level for kpte\n"); + return 0; } + if (ret_prot) + *ret_prot = prot; + + return pfn; +} + +void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc) +{ +#ifdef CONFIG_PARAVIRT + unsigned long sz = npages << PAGE_SHIFT; + unsigned long vaddr_end = vaddr + sz; + + while (vaddr < vaddr_end) { + int psize, pmask, level; + unsigned long pfn; + pte_t *kpte; + + kpte = lookup_address(vaddr, &level); + if (!kpte || pte_none(*kpte)) { + WARN_ONCE(1, "kpte lookup for vaddr\n"); + return; + } + + pfn = pg_level_to_pfn(level, kpte, NULL); + if (!pfn) + continue; + + psize = page_level_size(level); + pmask = page_level_mask(level); + + notify_page_enc_status_changed(pfn, psize >> PAGE_SHIFT, enc); + + vaddr = (vaddr & pmask) + psize; + } +#endif +} + +static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) +{ + pgprot_t old_prot, new_prot; + unsigned long pfn, pa, size; + pte_t new_pte; + + pfn = pg_level_to_pfn(level, kpte, &old_prot); + if (!pfn) + return; + new_prot = old_prot; if (enc) pgprot_val(new_prot) |= _PAGE_ENC; @@ -256,12 +303,13 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) static int __init early_set_memory_enc_dec(unsigned long vaddr, unsigned long size, bool enc) { - unsigned long vaddr_end, vaddr_next; + unsigned long vaddr_end, vaddr_next, start; unsigned long psize, pmask; int split_page_size_mask; int level, ret; pte_t *kpte; + start = vaddr; vaddr_next = vaddr; vaddr_end = vaddr + size; @@ -316,6 +364,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr, ret = 0; + notify_range_enc_status_changed(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc); out: __flush_tlb_all(); return ret; diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 217dda690ed8..89ee01837648 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -2012,6 +2012,12 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) */ cpa_flush(&cpa, 0); + /* + * Notify hypervisor that a given memory range is mapped encrypted + * or decrypted. + */ + notify_range_enc_status_changed(addr, numpages, enc); + return ret; } -- Gitee From 7a266f4738db156cbdc65238ab904411c08c64a2 Mon Sep 17 00:00:00 2001 From: Ashish Kalra Date: Tue, 24 Aug 2021 11:06:40 +0000 Subject: [PATCH 15/35] EFI: Introduce the new AMD Memory Encryption GUID. mainline inclusion from mainline-v5.14 commit 2f70ddb1f71814aae525c58086fcb2f6974e6591 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=2f70ddb1f71814aae525c58086fcb2f6974e6591 --------------------------- Introduce a new AMD Memory Encryption GUID which is currently used for defining a new UEFI environment variable which indicates UEFI/OVMF support for the SEV live migration feature. This variable is setup when UEFI/OVMF detects host/hypervisor support for SEV live migration and later this variable is read by the kernel using EFI runtime services to verify if OVMF supports the live migration feature. Signed-off-by: Ashish Kalra Acked-by: Ard Biesheuvel Message-Id: <1cea22976d2208f34d47e0c1ce0ecac816c13111.1629726117.git.ashish.kalra@amd.com> Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- include/linux/efi.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/efi.h b/include/linux/efi.h index 9816e03cf05b..29f876c31ede 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -370,6 +370,7 @@ void efi_native_runtime_setup(void); /* OEM GUIDs */ #define DELLEMC_EFI_RCI2_TABLE_GUID EFI_GUID(0x2d9f28a2, 0xa886, 0x456a, 0x97, 0xa8, 0xf1, 0x1e, 0xf2, 0x4f, 0xf4, 0x55) +#define AMD_SEV_MEM_ENCRYPT_GUID EFI_GUID(0x0cf29b71, 0x9e51, 0x433a, 0xa3, 0xb7, 0x81, 0xf3, 0xab, 0x16, 0xb8, 0x75) typedef struct { efi_guid_t guid; -- Gitee From 712d7d704b5aff456514721b02002f4e1c5db9cc Mon Sep 17 00:00:00 2001 From: Ashish Kalra Date: Tue, 24 Aug 2021 11:07:07 +0000 Subject: [PATCH 16/35] x86/kvm: Add guest support for detecting and enabling SEV Live Migration feature. mainline inclusion from mainline-v5.14 commit f4495615d76cfe5a633b0886b5c30310ed94c357 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=f4495615d76cfe5a633b0886b5c30310ed94c357 --------------------------- The guest support for detecting and enabling SEV Live migration feature uses the following logic : - kvm_init_plaform() checks if its booted under the EFI - If not EFI, i) if kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL), issue a wrmsrl() to enable the SEV live migration support - If EFI, i) If kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL), read the UEFI variable which indicates OVMF support for live migration ii) the variable indicates live migration is supported, issue a wrmsrl() to enable the SEV live migration support The EFI live migration check is done using a late_initcall() callback. Also, ensure that _bss_decrypted section is marked as decrypted in the hypervisor's guest page encryption status tracking. Signed-off-by: Ashish Kalra Reviewed-by: Steve Rutherford Message-Id: Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- arch/x86/include/asm/mem_encrypt.h | 4 ++ arch/x86/kernel/kvm.c | 82 ++++++++++++++++++++++++++++++ arch/x86/mm/mem_encrypt.c | 5 ++ 3 files changed, 91 insertions(+) diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 6356d8e72080..8442ba5fc498 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -43,6 +43,8 @@ void __init sme_enable(struct boot_params *bp); int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size); int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size); +void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, + bool enc); void __init mem_encrypt_free_decrypted_mem(void); @@ -81,6 +83,8 @@ static inline int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; } static inline int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; } +static inline void __init +early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) {} static inline void mem_encrypt_free_decrypted_mem(void) { } diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index dd44a13794af..fdfb87955f8c 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -40,6 +41,7 @@ #include #include #include +#include DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled); @@ -481,6 +483,8 @@ static void kvm_guest_cpu_offline(bool shutdown) kvm_disable_steal_time(); if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) wrmsrl(MSR_KVM_PV_EOI_EN, 0); + if (kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) + wrmsrl(MSR_KVM_MIGRATION_CONTROL, 0); kvm_pv_disable_apf(); if (!shutdown) apf_task_wake_all(); @@ -586,6 +590,55 @@ static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector) __send_ipi_mask(local_mask, vector); } +static int __init setup_efi_kvm_sev_migration(void) +{ + efi_char16_t efi_sev_live_migration_enabled[] = L"SevLiveMigrationEnabled"; + efi_guid_t efi_variable_guid = AMD_SEV_MEM_ENCRYPT_GUID; + efi_status_t status; + unsigned long size; + bool enabled; + + if (!sev_active() || + !kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) + return 0; + + if (!efi_enabled(EFI_BOOT)) + return 0; + + if (!efi_enabled(EFI_RUNTIME_SERVICES)) { + pr_info("%s : EFI runtime services are not enabled\n", __func__); + return 0; + } + + size = sizeof(enabled); + + /* Get variable contents into buffer */ + status = efi.get_variable(efi_sev_live_migration_enabled, + &efi_variable_guid, NULL, &size, &enabled); + + if (status == EFI_NOT_FOUND) { + pr_info("%s : EFI live migration variable not found\n", __func__); + return 0; + } + + if (status != EFI_SUCCESS) { + pr_info("%s : EFI variable retrieval failed\n", __func__); + return 0; + } + + if (enabled == 0) { + pr_info("%s: live migration disabled in EFI\n", __func__); + return 0; + } + + pr_info("%s : live migration enabled in EFI\n", __func__); + wrmsrl(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY); + + return 1; +} + +late_initcall(setup_efi_kvm_sev_migration); + /* * Set the IPI entry points */ @@ -822,8 +875,37 @@ static bool __init kvm_msi_ext_dest_id(void) return kvm_para_has_feature(KVM_FEATURE_MSI_EXT_DEST_ID); } +static void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc) +{ + kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, pfn << PAGE_SHIFT, npages, + KVM_MAP_GPA_RANGE_ENC_STAT(enc) | KVM_MAP_GPA_RANGE_PAGE_SZ_4K); +} + static void __init kvm_init_platform(void) { + if (sev_active() && + kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) { + unsigned long nr_pages; + + pv_ops.mmu.notify_page_enc_status_changed = + kvm_sev_hc_page_enc_status; + + /* + * Ensure that _bss_decrypted section is marked as decrypted in the + * shared pages list. + */ + nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted, + PAGE_SIZE); + early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted, + nr_pages, 0); + + /* + * If not booted using EFI, enable Live migration support. + */ + if (!efi_enabled(EFI_BOOT)) + wrmsrl(MSR_KVM_MIGRATION_CONTROL, + KVM_MIGRATION_READY); + } kvmclock_init(); x86_platform.apic_post_init = kvm_apic_init; } diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index fad3835ca6a3..73f19c68900d 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -380,6 +380,11 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) return early_set_memory_enc_dec(vaddr, size, true); } +void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) +{ + notify_range_enc_status_changed(vaddr, npages, enc); +} + /* * SME and SEV are very similar but they are not the same, so there are * times that the kernel will need to distinguish between SME and SEV. The -- Gitee From 2c09ca8df4279e8fdab1d48571a5ce270a156a2b Mon Sep 17 00:00:00 2001 From: Ashish Kalra Date: Tue, 24 Aug 2021 11:07:45 +0000 Subject: [PATCH 17/35] x86/kvm: Add kexec support for SEV Live Migration. mainline inclusion from mainline-v5.14 commit 73f1b4fece216c2e72be74c4d0d0f71a0b944bec category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=73f1b4fece216c2e72be74c4d0d0f71a0b944bec --------------------------- Reset the host's shared pages list related to kernel specific page encryption status settings before we load a new kernel by kexec. We cannot reset the complete shared pages list here as we need to retain the UEFI/OVMF firmware specific settings. The host's shared pages list is maintained for the guest to keep track of all unencrypted guest memory regions, therefore we need to explicitly mark all shared pages as encrypted again before rebooting into the new guest kernel. Signed-off-by: Ashish Kalra Reviewed-by: Steve Rutherford Message-Id: <3e051424ab839ea470f88333273d7a185006754f.1629726117.git.ashish.kalra@amd.com> Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- arch/x86/kernel/kvm.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index fdfb87955f8c..6e12052dd607 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -886,10 +886,35 @@ static void __init kvm_init_platform(void) if (sev_active() && kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) { unsigned long nr_pages; + int i; pv_ops.mmu.notify_page_enc_status_changed = kvm_sev_hc_page_enc_status; + /* + * Reset the host's shared pages list related to kernel + * specific page encryption status settings before we load a + * new kernel by kexec. Reset the page encryption status + * during early boot intead of just before kexec to avoid SMP + * races during kvm_pv_guest_cpu_reboot(). + * NOTE: We cannot reset the complete shared pages list + * here as we need to retain the UEFI/OVMF firmware + * specific settings. + */ + + for (i = 0; i < e820_table->nr_entries; i++) { + struct e820_entry *entry = &e820_table->entries[i]; + + if (entry->type != E820_TYPE_RAM) + continue; + + nr_pages = DIV_ROUND_UP(entry->size, PAGE_SIZE); + + kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, entry->addr, + nr_pages, + KVM_MAP_GPA_RANGE_ENCRYPTED | KVM_MAP_GPA_RANGE_PAGE_SZ_4K); + } + /* * Ensure that _bss_decrypted section is marked as decrypted in the * shared pages list. -- Gitee From 572fc49cab760fe4a279184f800214a01c49c47c Mon Sep 17 00:00:00 2001 From: Ashish Kalra Date: Mon, 7 Jun 2021 06:15:32 +0000 Subject: [PATCH 18/35] KVM: SVM: Fix SEV SEND_START session length & SEND_UPDATE_DATA query length after commit 238eca821cee mainline inclusion from mainline-v5.14 commit 4f13d471e5d11034d56161af56d0f9396bc0b384 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=4f13d471e5d11034d56161af56d0f9396bc0b384 --------------------------- Commit 238eca821cee ("KVM: SVM: Allocate SEV command structures on local stack") uses the local stack to allocate the structures used to communicate with the PSP, which were earlier being kzalloced. This breaks SEV live migration for computing the SEND_START session length and SEND_UPDATE_DATA query length as session_len and trans_len and hdr_len fields are not zeroed respectively for the above commands before issuing the SEV Firmware API call, hence the firmware returns incorrect session length and update data header or trans length. Also the SEV Firmware API returns SEV_RET_INVALID_LEN firmware error for these length query API calls, and the return value and the firmware error needs to be passed to the userspace as it is, so need to remove the return check in the KVM code. Signed-off-by: Ashish Kalra Message-Id: <20210607061532.27459-1-Ashish.Kalra@amd.com> Fixes: 238eca821cee ("KVM: SVM: Allocate SEV command structures on local stack") Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- arch/x86/kvm/svm/sev.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index cdae13092180..1ba29d3d4076 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1090,15 +1090,12 @@ __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp, data->handle = sev->handle; ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, data, &argp->error); - if (ret < 0) - goto out; params->session_len = data->session_len; if (copy_to_user((void __user *)(uintptr_t)argp->data, params, sizeof(struct kvm_sev_send_start))) ret = -EFAULT; -out: kfree(data); return ret; } @@ -1215,8 +1212,6 @@ __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, data->handle = sev->handle; ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, data, &argp->error); - if (ret < 0) - goto out; params->hdr_len = data->hdr_len; params->trans_len = data->trans_len; @@ -1225,7 +1220,6 @@ __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, sizeof(struct kvm_sev_send_update_data))) ret = -EFAULT; -out: kfree(data); return ret; } -- Gitee From 6b8d9842530a9a8b7fa2a1f7058e03e1326645b5 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 14:42:49 +0800 Subject: [PATCH 19/35] crypto: ccp: Introduce init and free helpers to manage CSV RING_BUFFER queues hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA --------------------------- There are up to two queues created in RING_BUFFER mode, each with two sub-queues. The sub-queues store the command pointer entries (written only by the x86) and status entries (written only by the CSV Firmware) respectively. The two queues are low priority queue (required) and high priority queue (optional) respectively. In this change, we introduce csv_ring_buffer_queue_init() to initialize CSV RING_BUFFER queues, and csv_ring_buffer_queue_free() to cleanup CSV RING_BUFFER queues. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Signed-off-by: Zhiguang Ni --- drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/hygon/csv-dev.c | 101 +++++++++++++++++++++++++ drivers/crypto/ccp/hygon/ring-buffer.c | 37 +++++++++ drivers/crypto/ccp/hygon/ring-buffer.h | 19 +++++ drivers/crypto/ccp/sev-dev.h | 5 ++ include/linux/psp-hygon.h | 34 +++++++++ 6 files changed, 198 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/ccp/hygon/ring-buffer.c create mode 100644 drivers/crypto/ccp/hygon/ring-buffer.h diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index b7542a5a17b0..17fb2e6bad8c 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -16,7 +16,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ tee-dev.o \ platform-access.o \ hygon/psp-dev.o \ - hygon/csv-dev.o + hygon/csv-dev.o \ + hygon/ring-buffer.o ccp-$(CONFIG_TDM_DEV_HYGON) += hygon/tdm-dev.o obj-$(CONFIG_TDM_KERNEL_GUARD) += hygon/tdm-kernel-guard.o diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index c34e257c8b91..0294b0355144 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -17,6 +17,7 @@ #include "csv-dev.h" #include "psp-dev.h" +#include "ring-buffer.h" /* * Hygon CSV build info: @@ -142,3 +143,103 @@ const struct file_operations csv_fops = { .owner = THIS_MODULE, .unlocked_ioctl = csv_ioctl, }; + +/* + * __csv_ring_buffer_queue_init will allocate memory for command queue + * and status queue. If error occurs, this function will return directly, + * the caller must free the memories allocated for queues. + * + * Function csv_ring_buffer_queue_free() can be used to handling error + * return by this function and cleanup ring buffer queues when exiting + * from RING BUFFER mode. + * + * Return -ENOMEM if fail to allocate memory for queues, otherwise 0 + */ +static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer) +{ + void *cmd_ptr_buffer = NULL; + void *stat_val_buffer = NULL; + + /* If reach here, the command and status queues must be NULL */ + WARN_ON(ring_buffer->cmd_ptr.data || + ring_buffer->stat_val.data); + + cmd_ptr_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); + if (!cmd_ptr_buffer) + return -ENOMEM; + + /* the command queue will points to @cmd_ptr_buffer */ + csv_queue_init(&ring_buffer->cmd_ptr, cmd_ptr_buffer, + CSV_RING_BUFFER_LEN, CSV_RING_BUFFER_ESIZE); + + stat_val_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); + if (!stat_val_buffer) + return -ENOMEM; + + /* the status queue will points to @stat_val_buffer */ + csv_queue_init(&ring_buffer->stat_val, stat_val_buffer, + CSV_RING_BUFFER_LEN, CSV_RING_BUFFER_ESIZE); + return 0; +} + +int csv_ring_buffer_queue_init(void) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + int i, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ret = __csv_ring_buffer_queue_init(&sev->ring_buffer[i]); + if (ret) + goto e_free; + } + + return 0; + +e_free: + csv_ring_buffer_queue_free(); + return ret; +} +EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_init); + +int csv_ring_buffer_queue_free(void) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_ringbuffer_queue *ring_buffer; + int i; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (i = 0; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ring_buffer = &sev->ring_buffer[i]; + + /* + * If command queue is not NULL, it must points to memory + * that allocated in __csv_ring_buffer_queue_init(). + */ + if (ring_buffer->cmd_ptr.data) { + kfree((void *)ring_buffer->cmd_ptr.data); + csv_queue_cleanup(&ring_buffer->cmd_ptr); + } + + /* + * If status queue is not NULL, it must points to memory + * that allocated in __csv_ring_buffer_queue_init(). + */ + if (ring_buffer->stat_val.data) { + kfree((void *)ring_buffer->stat_val.data); + csv_queue_cleanup(&ring_buffer->stat_val); + } + } + return 0; +} +EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_free); diff --git a/drivers/crypto/ccp/hygon/ring-buffer.c b/drivers/crypto/ccp/hygon/ring-buffer.c new file mode 100644 index 000000000000..beeb325136c6 --- /dev/null +++ b/drivers/crypto/ccp/hygon/ring-buffer.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Platform Security Processor (PSP) interface + * + * Copyright (C) 2016-2023 Hygon Info Technologies Ltd. + * + * Author: Baoshun Fang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +#include "ring-buffer.h" + +int csv_queue_init(struct csv_queue *queue, + void *buffer, unsigned int size, size_t esize) +{ + size /= esize; + + queue->head = 0; + queue->tail = 0; + queue->esize = esize; + queue->data = (u64)buffer; + queue->mask = size - 1; + queue->data_align = ALIGN(queue->data, CSV_RING_BUFFER_ALIGN); + + return 0; +} + +void csv_queue_cleanup(struct csv_queue *queue) +{ + memset((void *)queue, 0, sizeof(struct csv_queue)); +} diff --git a/drivers/crypto/ccp/hygon/ring-buffer.h b/drivers/crypto/ccp/hygon/ring-buffer.h new file mode 100644 index 000000000000..5ea4e2f54b9a --- /dev/null +++ b/drivers/crypto/ccp/hygon/ring-buffer.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Platform Security Processor (PSP) interface driver + * + * Copyright (C) 2016-2023 Hygon Info Technologies Ltd. + * + * Author: Baoshun Fang + */ + +#ifndef __CCP_HYGON_RINGBUF_H__ +#define __CCP_HYGON_RINGBUF_H__ + +#include + +int csv_queue_init(struct csv_queue *queue, + void *buffer, unsigned int size, size_t esize); +void csv_queue_cleanup(struct csv_queue *queue); + +#endif /* __CCP_HYGON_RINGBUF_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h index c3d22231e1de..6e34d4651207 100644 --- a/drivers/crypto/ccp/sev-dev.h +++ b/drivers/crypto/ccp/sev-dev.h @@ -25,6 +25,8 @@ #include #include +#include "hygon/ring-buffer.h" + #define SEV_CMDRESP_CMD GENMASK(26, 16) #define SEV_CMD_COMPLETE BIT(1) #define SEV_CMDRESP_IOC BIT(0) @@ -53,6 +55,9 @@ struct sev_device { u8 build; void *cmd_buf; + + /* Management of the Hygon RING BUFFER mode */ + struct csv_ringbuffer_queue ring_buffer[CSV_COMMAND_PRIORITY_NUM]; }; int sev_dev_init(struct psp_device *psp); diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 96c2c2b67a89..8d052a72ba6d 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -24,6 +24,18 @@ enum csv_cmd { CSV_CMD_MAX, }; +/** + * Ring Buffer Mode regions: + * There are 4 regions and every region is a 4K area that must be 4K aligned. + * To accomplish this allocate an amount that is the size of area and the + * required alignment. + * The aligned address will be calculated from the returned address. + */ +#define CSV_RING_BUFFER_SIZE (32 * 1024) +#define CSV_RING_BUFFER_ALIGN (4 * 1024) +#define CSV_RING_BUFFER_LEN (CSV_RING_BUFFER_SIZE + CSV_RING_BUFFER_ALIGN) +#define CSV_RING_BUFFER_ESIZE 16 + /** * struct csv_data_hgsc_cert_import - HGSC_CERT_IMPORT command parameters * @@ -40,13 +52,35 @@ struct csv_data_hgsc_cert_import { u32 hgsc_cert_len; /* In */ } __packed; +#define CSV_COMMAND_PRIORITY_HIGH 0 +#define CSV_COMMAND_PRIORITY_LOW 1 +#define CSV_COMMAND_PRIORITY_NUM 2 + +struct csv_queue { + u32 head; + u32 tail; + u32 mask; /* mask = (size - 1), inicates the elements max count */ + u32 esize; /* size of an element */ + u64 data; + u64 data_align; +} __packed; + +struct csv_ringbuffer_queue { + struct csv_queue cmd_ptr; + struct csv_queue stat_val; +} __packed; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP int psp_do_cmd(int cmd, void *data, int *psp_ret); +int csv_ring_buffer_queue_init(void); +int csv_ring_buffer_queue_free(void); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; } +static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } +static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ -- Gitee From 81513e1c185f98f6d2794daf8fe98ce423394f8b Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 15:21:33 +0800 Subject: [PATCH 20/35] crypto: ccp: Add support for enqueue command pointers in CSV RING_BUFFER mode hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA --------------------------- In CSV RING_BUFFER mode, X86 will enqueue command pointers to the sub-queue which stores the command pointers. The priority will be given through parameter. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Signed-off-by: Zhiguang Ni --- drivers/crypto/ccp/hygon/csv-dev.c | 22 +++++++++++ drivers/crypto/ccp/hygon/ring-buffer.c | 54 ++++++++++++++++++++++++++ drivers/crypto/ccp/hygon/ring-buffer.h | 2 + include/linux/psp-hygon.h | 10 +++++ 4 files changed, 88 insertions(+) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 0294b0355144..09b4dd2aa830 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -243,3 +243,25 @@ int csv_ring_buffer_queue_free(void) return 0; } EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_free); + +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_cmdptr_entry cmdptr = { }; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + cmdptr.cmd_buf_ptr = __psp_pa(data); + cmdptr.cmd_id = cmd; + cmdptr.cmd_flags = flags; + + if (csv_enqueue_cmd(&sev->ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(csv_fill_cmd_queue); diff --git a/drivers/crypto/ccp/hygon/ring-buffer.c b/drivers/crypto/ccp/hygon/ring-buffer.c index beeb325136c6..8058d7a4fb66 100644 --- a/drivers/crypto/ccp/hygon/ring-buffer.c +++ b/drivers/crypto/ccp/hygon/ring-buffer.c @@ -13,9 +13,49 @@ #include #include +#include + +#include #include "ring-buffer.h" +static void enqueue_data(struct csv_queue *queue, + const void *src, + unsigned int len, unsigned int off) +{ + unsigned int size = queue->mask + 1; + unsigned int esize = queue->esize; + unsigned int l; + void *data; + + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + data = (void *)queue->data_align; + memcpy(data + off, src, l); + memcpy(data, src + l, len - l); + + /* + * Make sure that the data in the ring buffer is up to date before + * incrementing the queue->tail index counter. + */ + smp_wmb(); +} + +static unsigned int queue_avail_size(struct csv_queue *queue) +{ + /* + * According to the nature of unsigned Numbers, it always work + * well even though tail < head. Reserved 1 element to distinguish + * full and empty. + */ + return queue->mask - (queue->tail - queue->head); +} + int csv_queue_init(struct csv_queue *queue, void *buffer, unsigned int size, size_t esize) { @@ -35,3 +75,17 @@ void csv_queue_cleanup(struct csv_queue *queue) { memset((void *)queue, 0, sizeof(struct csv_queue)); } + +unsigned int csv_enqueue_cmd(struct csv_queue *queue, + const void *buf, unsigned int len) +{ + unsigned int size; + + size = queue_avail_size(queue); + if (len > size) + len = size; + + enqueue_data(queue, buf, len, queue->tail); + queue->tail += len; + return len; +} diff --git a/drivers/crypto/ccp/hygon/ring-buffer.h b/drivers/crypto/ccp/hygon/ring-buffer.h index 5ea4e2f54b9a..6e3c799c09e1 100644 --- a/drivers/crypto/ccp/hygon/ring-buffer.h +++ b/drivers/crypto/ccp/hygon/ring-buffer.h @@ -15,5 +15,7 @@ int csv_queue_init(struct csv_queue *queue, void *buffer, unsigned int size, size_t esize); void csv_queue_cleanup(struct csv_queue *queue); +unsigned int csv_enqueue_cmd(struct csv_queue *queue, + const void *buf, unsigned int len); #endif /* __CCP_HYGON_RINGBUF_H__ */ diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 8d052a72ba6d..7bf85ca246ad 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -56,6 +56,13 @@ struct csv_data_hgsc_cert_import { #define CSV_COMMAND_PRIORITY_LOW 1 #define CSV_COMMAND_PRIORITY_NUM 2 +struct csv_cmdptr_entry { + u16 cmd_id; + u16 cmd_flags; + u32 sw_data; + u64 cmd_buf_ptr; +} __packed; + struct csv_queue { u32 head; u32 tail; @@ -75,12 +82,15 @@ struct csv_ringbuffer_queue { int psp_do_cmd(int cmd, void *data, int *psp_ret); int csv_ring_buffer_queue_init(void); int csv_ring_buffer_queue_free(void); +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; } static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } +static inline +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ -- Gitee From 7107c86b854f6ff2c4e9b294ddaabc1fb7229895 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 15:35:47 +0800 Subject: [PATCH 21/35] crypto: ccp: Add support for dequeue status in CSV RING_BUFFER mode hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA --------------------------- In CSV RING_BUFFER mode, X86 will dequeue status entries written by PSP after the corresponding command has been handled. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Signed-off-by: Zhiguang Ni --- drivers/crypto/ccp/hygon/csv-dev.c | 32 +++++++++++++++++++++ drivers/crypto/ccp/hygon/ring-buffer.c | 39 ++++++++++++++++++++++++++ drivers/crypto/ccp/hygon/ring-buffer.h | 2 ++ include/linux/psp-hygon.h | 9 ++++++ 4 files changed, 82 insertions(+) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 09b4dd2aa830..bc54e1f8325d 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -265,3 +265,35 @@ int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) return 0; } EXPORT_SYMBOL_GPL(csv_fill_cmd_queue); + +int csv_check_stat_queue_status(int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int len; + int prio; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (prio = CSV_COMMAND_PRIORITY_HIGH; + prio < CSV_COMMAND_PRIORITY_NUM; prio++) { + do { + struct csv_statval_entry statval; + + len = csv_dequeue_stat(&sev->ring_buffer[prio].stat_val, + &statval, 1); + if (len) { + if (statval.status != 0) { + *psp_ret = statval.status; + return -EFAULT; + } + } + } while (len); + } + + return 0; +} +EXPORT_SYMBOL_GPL(csv_check_stat_queue_status); diff --git a/drivers/crypto/ccp/hygon/ring-buffer.c b/drivers/crypto/ccp/hygon/ring-buffer.c index 8058d7a4fb66..93402b13b93a 100644 --- a/drivers/crypto/ccp/hygon/ring-buffer.c +++ b/drivers/crypto/ccp/hygon/ring-buffer.c @@ -46,6 +46,31 @@ static void enqueue_data(struct csv_queue *queue, smp_wmb(); } +static void dequeue_data(struct csv_queue *queue, + void *dst, unsigned int len, unsigned int off) +{ + unsigned int size = queue->mask + 1; + unsigned int esize = queue->esize; + unsigned int l; + + off &= queue->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + memcpy(dst, (void *)(queue->data + off), l); + memcpy((void *)((uintptr_t)dst + l), (void *)queue->data, len - l); + + /* + * Make sure that the data is copied before incrementing the + * queue->tail index counter. + */ + smp_wmb(); +} + static unsigned int queue_avail_size(struct csv_queue *queue) { /* @@ -89,3 +114,17 @@ unsigned int csv_enqueue_cmd(struct csv_queue *queue, queue->tail += len; return len; } + +unsigned int csv_dequeue_stat(struct csv_queue *queue, + void *buf, unsigned int len) +{ + unsigned int size; + + size = queue->tail - queue->head; + if (len > size) + len = size; + + dequeue_data(queue, buf, len, queue->head); + queue->head += len; + return len; +} diff --git a/drivers/crypto/ccp/hygon/ring-buffer.h b/drivers/crypto/ccp/hygon/ring-buffer.h index 6e3c799c09e1..2c99ade02512 100644 --- a/drivers/crypto/ccp/hygon/ring-buffer.h +++ b/drivers/crypto/ccp/hygon/ring-buffer.h @@ -17,5 +17,7 @@ int csv_queue_init(struct csv_queue *queue, void csv_queue_cleanup(struct csv_queue *queue); unsigned int csv_enqueue_cmd(struct csv_queue *queue, const void *buf, unsigned int len); +unsigned int csv_dequeue_stat(struct csv_queue *queue, + void *buf, unsigned int len); #endif /* __CCP_HYGON_RINGBUF_H__ */ diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 7bf85ca246ad..656e8433dfff 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -63,6 +63,13 @@ struct csv_cmdptr_entry { u64 cmd_buf_ptr; } __packed; +struct csv_statval_entry { + u16 status; + u16 reserved0; + u32 reserved1; + u64 reserved2; +} __packed; + struct csv_queue { u32 head; u32 tail; @@ -83,6 +90,7 @@ int psp_do_cmd(int cmd, void *data, int *psp_ret); int csv_ring_buffer_queue_init(void); int csv_ring_buffer_queue_free(void); int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); +int csv_check_stat_queue_status(int *psp_ret); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ @@ -91,6 +99,7 @@ static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } static inline int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return -ENODEV; } +static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ -- Gitee From 0b8670ecf0b15975631a7e148dd2388d96196261 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 17:03:54 +0800 Subject: [PATCH 22/35] crypto: ccp: Add support to switch to CSV RING_BUFFER mode hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA --------------------------- Invoke RING_BUFFER command will switch CSV firmware to RING_BUFFER mode. When CSV firmware stays in RING_BUFFER mode, it will fetch commands from CSV RING_BUFFER queues which are filled by X86. The CSV firmware will exit RING_BUFFER mode after SHUTDOWN command is completed. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Signed-off-by: Zhiguang Ni --- drivers/crypto/ccp/hygon/csv-dev.c | 55 ++++++++++++++++++++++++++++++ drivers/crypto/ccp/hygon/csv-dev.h | 7 ++++ drivers/crypto/ccp/sev-dev.c | 4 +++ include/linux/psp-hygon.h | 40 ++++++++++++++++++++++ 4 files changed, 106 insertions(+) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index bc54e1f8325d..44323a4e8dba 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -27,6 +27,8 @@ u32 hygon_csv_build; EXPORT_SYMBOL_GPL(hygon_csv_build); +int csv_comm_mode = CSV_COMM_MAILBOX_ON; + /* * csv_update_api_version used to update the api version of HYGON CSV * firmwareat driver side. @@ -44,6 +46,7 @@ int csv_cmd_buffer_len(int cmd) { switch (cmd) { case CSV_CMD_HGSC_CERT_IMPORT: return sizeof(struct csv_data_hgsc_cert_import); + case CSV_CMD_RING_BUFFER: return sizeof(struct csv_data_ring_buffer); default: return 0; } } @@ -144,6 +147,58 @@ const struct file_operations csv_fops = { .unlocked_ioctl = csv_ioctl, }; +/* + * __csv_ring_buffer_enter_locked issues command to switch to RING BUFFER + * mode, the caller must acquire the mutex lock. + */ +static int __maybe_unused __csv_ring_buffer_enter_locked(int *error) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_data_ring_buffer *data; + struct csv_ringbuffer_queue *low_queue; + struct csv_ringbuffer_queue *hi_queue; + int ret = 0; + + if (!psp || !psp->sev_data || !hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + sev = psp->sev_data; + + if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) + return -EEXIST; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + low_queue = &sev->ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + hi_queue = &sev->ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + + data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); + data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); + data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); + data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); + data->queue_lo_size = 1; + data->queue_hi_size = 1; + data->int_on_empty = 1; + + ret = hygon_psp_hooks.__sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); + if (!ret) { + iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + csv_comm_mode = CSV_COMM_RINGBUFFER_ON; + } + + kfree(data); + return ret; +} + +void csv_restore_mailbox_mode_postprocess(void) +{ + csv_comm_mode = CSV_COMM_MAILBOX_ON; + csv_ring_buffer_queue_free(); +} + /* * __csv_ring_buffer_queue_init will allocate memory for command queue * and status queue. If error occurs, this function will return directly, diff --git a/drivers/crypto/ccp/hygon/csv-dev.h b/drivers/crypto/ccp/hygon/csv-dev.h index 677669e2371f..ceea324f727b 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.h +++ b/drivers/crypto/ccp/hygon/csv-dev.h @@ -14,9 +14,16 @@ #include extern u32 hygon_csv_build; +extern int csv_comm_mode; extern const struct file_operations csv_fops; void csv_update_api_version(struct sev_user_data_status *status); int csv_cmd_buffer_len(int cmd); +void csv_restore_mailbox_mode_postprocess(void); + +static inline bool csv_in_ring_buffer_mode(void) +{ + return csv_comm_mode == CSV_COMM_RINGBUFFER_ON; +} #endif /* __CCP_HYGON_CSV_DEV_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 3e2b1acf5570..4687bdf56fd6 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -338,6 +338,10 @@ static int __sev_platform_shutdown_locked(int *error) if (ret) return ret; + /* RING BUFFER mode exits if a SHUTDOWN command is executed */ + if (is_vendor_hygon() && csv_in_ring_buffer_mode()) + csv_restore_mailbox_mode_postprocess(); + sev->state = SEV_STATE_UNINIT; dev_dbg(sev->dev, "SEV firmware shutdown\n"); diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 656e8433dfff..614cbe0f5c4d 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -20,10 +20,21 @@ * Guest/platform management commands for CSV */ enum csv_cmd { + CSV_CMD_RING_BUFFER = 0x00F, CSV_CMD_HGSC_CERT_IMPORT = 0x300, CSV_CMD_MAX, }; +/** + * CSV communication state + */ +enum csv_comm_state { + CSV_COMM_MAILBOX_ON = 0x0, + CSV_COMM_RINGBUFFER_ON = 0x1, + + CSV_COMM_MAX +}; + /** * Ring Buffer Mode regions: * There are 4 regions and every region is a 4K area that must be 4K aligned. @@ -84,6 +95,35 @@ struct csv_ringbuffer_queue { struct csv_queue stat_val; } __packed; +/** + * struct csv_data_ring_buffer - RING_BUFFER command parameters + * + * @queue_lo_cmdptr_address: physical address of the region to be used for + * low priority queue's CmdPtr ring buffer + * @queue_lo_statval_address: physical address of the region to be used for + * low priority queue's StatVal ring buffer + * @queue_hi_cmdptr_address: physical address of the region to be used for + * high priority queue's CmdPtr ring buffer + * @queue_hi_statval_address: physical address of the region to be used for + * high priority queue's StatVal ring buffer + * @queue_lo_size: size of the low priority queue in 4K pages. Must be 1 + * @queue_hi_size: size of the high priority queue in 4K pages. Must be 1 + * @queue_lo_threshold: queue(low) size, below which an interrupt may be generated + * @queue_hi_threshold: queue(high) size, below which an interrupt may be generated + * @int_on_empty: unconditionally interrupt when both queues are found empty + */ +struct csv_data_ring_buffer { + u64 queue_lo_cmdptr_address; /* In */ + u64 queue_lo_statval_address; /* In */ + u64 queue_hi_cmdptr_address; /* In */ + u64 queue_hi_statval_address; /* In */ + u8 queue_lo_size; /* In */ + u8 queue_hi_size; /* In */ + u16 queue_lo_threshold; /* In */ + u16 queue_hi_threshold; /* In */ + u16 int_on_empty; /* In */ +} __packed; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP int psp_do_cmd(int cmd, void *data, int *psp_ret); -- Gitee From 2ab5860a0481ab954c5934d4f6664a93f1193c05 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 17:36:19 +0800 Subject: [PATCH 23/35] crypto: ccp: Add support for issue commands in CSV RING_BUFFER mode hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA --------------------------- The CSV firmware stays in Mailbox mode by default. Upon successfully switched to CSV RING_BUFFER mode, the semantics of the 3 registers used for communicate between X86 and CSV firmware will be changed: - The CmdResp register becomes the RBCtl register. It is only ever written by X86. - The CmdBufAddr_Hi register becomes the RBTail register. It is only ever written by X86. - The CmdBufAddr_Lo register becomes the RBHead register. It should never be written by X86; the PSP will update it. The CSV firmware will exit CSV RING_BUFFER mode when it read invalid value from the RBCtl register. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Signed-off-by: Zhiguang Ni --- drivers/crypto/ccp/hygon/csv-dev.c | 132 ++++++++++++++++++++++++++++- drivers/crypto/ccp/hygon/csv-dev.h | 14 +++ drivers/crypto/ccp/sev-dev.c | 3 +- include/linux/psp-hygon.h | 8 ++ 4 files changed, 155 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 44323a4e8dba..82a8e22ebaf8 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -151,7 +151,7 @@ const struct file_operations csv_fops = { * __csv_ring_buffer_enter_locked issues command to switch to RING BUFFER * mode, the caller must acquire the mutex lock. */ -static int __maybe_unused __csv_ring_buffer_enter_locked(int *error) +static int __csv_ring_buffer_enter_locked(int *error) { struct psp_device *psp = psp_master; struct sev_device *sev; @@ -193,6 +193,136 @@ static int __maybe_unused __csv_ring_buffer_enter_locked(int *error) return ret; } +static int csv_wait_cmd_ioc_ring_buffer(struct sev_device *sev, + unsigned int *reg, + unsigned int timeout) +{ + int ret; + + ret = wait_event_timeout(sev->int_queue, + sev->int_rcvd, timeout * HZ); + if (!ret) + return -ETIMEDOUT; + + *reg = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + + return 0; +} + +static int csv_get_cmd_status(struct sev_device *sev, int prio, int index) +{ + struct csv_queue *queue = &sev->ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)queue->data; + + return statval[index].status; +} + +static int __csv_do_ringbuf_cmds_locked(int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int rb_tail; + unsigned int rb_ctl; + int last_cmd_index; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* update rb tail */ + rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); + rb_tail |= (sev->ring_buffer[CSV_COMMAND_PRIORITY_HIGH].cmd_ptr.tail + << PSP_RBTAIL_QHI_TAIL_SHIFT); + rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); + rb_tail |= sev->ring_buffer[CSV_COMMAND_PRIORITY_LOW].cmd_ptr.tail; + iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + /* update rb ctl to trigger psp irq */ + sev->int_rcvd = 0; + + /* PSP response to x86 only when all queue is empty or error happends */ + rb_ctl = PSP_RBCTL_X86_WRITES | + PSP_RBCTL_RBMODE_ACT | + PSP_RBCTL_CLR_INTSTAT; + iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for all commands in ring buffer completed */ + ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, + (*hygon_psp_hooks.psp_timeout) * 10); + if (ret) { + if (psp_ret) + *psp_ret = 0; + dev_err(sev->dev, "csv ringbuffer mode command timed out, disabling PSP\n"); + *hygon_psp_hooks.psp_dead = true; + + return ret; + } + + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + ret = -EFAULT; + + if (psp_ret) { + last_cmd_index = (reg & PSP_RBHEAD_QHI_HEAD_MASK) + >> PSP_RBHEAD_QHI_HEAD_SHIFT; + *psp_ret = csv_get_cmd_status(sev, CSV_COMMAND_PRIORITY_HIGH, + last_cmd_index); + if (*psp_ret == 0) { + last_cmd_index = reg & PSP_RBHEAD_QLO_HEAD_MASK; + *psp_ret = csv_get_cmd_status(sev, + CSV_COMMAND_PRIORITY_LOW, last_cmd_index); + } + } + + return ret; +} + +/* + * csv_do_ringbuf_cmds will enter RING BUFFER mode and handling commands + * queued in RING BUFFER queues, the user is obligate to manage RING + * BUFFER queues including allocate, enqueue and free, etc. + */ +static int csv_do_ringbuf_cmds(int *psp_ret) +{ + struct sev_user_data_status data; + int rc; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + + rc = __csv_ring_buffer_enter_locked(psp_ret); + if (rc) + goto cmd_unlock; + + rc = __csv_do_ringbuf_cmds_locked(psp_ret); + + /* exit ringbuf mode by send CMD in mailbox mode */ + hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, NULL); + csv_comm_mode = CSV_COMM_MAILBOX_ON; + +cmd_unlock: + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return rc; +} + +int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) +{ + if (!filep || filep->f_op != &csv_fops) + return -EBADF; + + return csv_do_ringbuf_cmds(psp_ret); +} +EXPORT_SYMBOL_GPL(csv_issue_ringbuf_cmds_external_user); + void csv_restore_mailbox_mode_postprocess(void) { csv_comm_mode = CSV_COMM_MAILBOX_ON; diff --git a/drivers/crypto/ccp/hygon/csv-dev.h b/drivers/crypto/ccp/hygon/csv-dev.h index ceea324f727b..a84d791a5a1a 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.h +++ b/drivers/crypto/ccp/hygon/csv-dev.h @@ -12,6 +12,20 @@ #include #include +#include + +#define PSP_RBCTL_X86_WRITES BIT(31) +#define PSP_RBCTL_RBMODE_ACT BIT(30) +#define PSP_RBCTL_CLR_INTSTAT BIT(29) +#define PSP_RBTAIL_QHI_TAIL_SHIFT 16 +#define PSP_RBTAIL_QHI_TAIL_MASK 0x7FF0000 +#define PSP_RBTAIL_QLO_TAIL_MASK 0x7FF + +#define PSP_RBHEAD_QHI_HEAD_SHIFT 16 +#define PSP_RBHEAD_QHI_HEAD_MASK 0x7FF0000 +#define PSP_RBHEAD_QLO_HEAD_MASK 0x7FF + +#define PSP_RBHEAD_QPAUSE_INT_STAT BIT(30) extern u32 hygon_csv_build; extern int csv_comm_mode; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 4687bdf56fd6..ba8b6e3c2c40 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -88,7 +88,8 @@ static void sev_irq_handler(int irq, void *data, unsigned int status) /* Check if it is SEV command completion: */ reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); - if (FIELD_GET(PSP_CMDRESP_RESP, reg)) { + if (FIELD_GET(PSP_CMDRESP_RESP, reg) || + (is_vendor_hygon() && csv_in_ring_buffer_mode())) { sev->int_rcvd = 1; wake_up(&sev->int_queue); } diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 614cbe0f5c4d..25377d7948c8 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -11,6 +11,7 @@ #define __PSP_HYGON_H__ #include +#include /*****************************************************************************/ /***************************** CSV interface *********************************/ @@ -131,6 +132,11 @@ int csv_ring_buffer_queue_init(void); int csv_ring_buffer_queue_free(void); int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); int csv_check_stat_queue_status(int *psp_ret); +/** + * csv_issue_ringbuf_cmds_external_user - issue CSV commands into a ring + * buffer. + */ +int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ @@ -140,6 +146,8 @@ static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } static inline int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return -ENODEV; } static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } +static inline int +csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ -- Gitee From 94744670dad8213ea632ca3875ed07d86df2dc2b Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 17:51:55 +0800 Subject: [PATCH 24/35] KVM: SVM: Add KVM_CSV_COMMAND_BATCH command for applying CSV RING_BUFFER mode hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA --------------------------- The API KVM_CSV_COMMAD_BATCH receives data of structure kvm_csv_command_batch which embedded a link list of CSV command requests from userspace. It will do some preparation works to ensure data available for CSV RING_BUFFER mode, and then issues RING_BUFFER command. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Signed-off-by: Zhiguang Ni --- arch/x86/kvm/svm/csv.c | 209 +++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/csv.h | 21 ++++ arch/x86/kvm/svm/sev.c | 1 + include/uapi/linux/kvm.h | 18 ++++ 4 files changed, 249 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index ef69fe72e769..69d8faef3c48 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -26,6 +26,29 @@ struct hygon_kvm_hooks_table hygon_kvm_hooks; static struct kvm_x86_ops csv_x86_ops; static const char csv_vm_mnonce[] = "VM_ATTESTATION"; +static DEFINE_MUTEX(csv_cmd_batch_mutex); + +static int __csv_issue_ringbuf_cmds(int fd, int *psp_ret) +{ + struct fd f; + int ret; + + f = fdget(fd); + if (!f.file) + return -EBADF; + + ret = csv_issue_ringbuf_cmds_external_user(f.file, psp_ret); + + fdput(f); + return ret; +} + +static int csv_issue_ringbuf_cmds(struct kvm *kvm, int *psp_ret) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + + return __csv_issue_ringbuf_cmds(sev->fd, psp_ret); +} int csv_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) { @@ -93,6 +116,191 @@ int csv_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) return ret; } +static int csv_ringbuf_infos_free(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos) +{ + int i; + + for (i = 0; i < ringbuf_infos->num; i++) { + struct csv_ringbuf_info_item *item = ringbuf_infos->item[i]; + + if (item) { + if (item->data_vaddr) + kfree((void *)item->data_vaddr); + + if (item->hdr_vaddr) + kfree((void *)item->hdr_vaddr); + + if (item->pages) + hygon_kvm_hooks.sev_unpin_memory(kvm, item->pages, + item->n); + + kfree(item); + + ringbuf_infos->item[i] = NULL; + } + } + + return 0; +} + +typedef int (*csv_ringbuf_input_fn)(struct kvm *kvm, int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos); +typedef int (*csv_ringbuf_output_fn)(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos); + +static int get_cmd_helpers(__u32 cmd, + csv_ringbuf_input_fn *to_ringbuf_fn, + csv_ringbuf_output_fn *to_user_fn) +{ + int ret = 0; + + /* copy commands to ring buffer*/ + switch (cmd) { + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + int ret; + struct kvm_csv_command_batch params; + uintptr_t node_addr; + struct csv_ringbuf_infos *ringbuf_infos; + csv_ringbuf_input_fn csv_cmd_to_ringbuf_fn = NULL; + csv_ringbuf_output_fn csv_copy_to_user_fn = NULL; + int prio = CSV_COMMAND_PRIORITY_HIGH; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_csv_command_batch))) + return -EFAULT; + + /* return directly if node list is NULL */ + if (!params.csv_batch_list_uaddr) + return 0; + + /* ring buffer init */ + if (csv_ring_buffer_queue_init()) + return -EINVAL; + + if (get_cmd_helpers(params.command_id, + &csv_cmd_to_ringbuf_fn, &csv_copy_to_user_fn)) { + ret = -EINVAL; + goto err_free_ring_buffer; + } + + ringbuf_infos = kzalloc(sizeof(*ringbuf_infos), GFP_KERNEL); + if (!ringbuf_infos) { + ret = -ENOMEM; + goto err_free_ring_buffer; + } + + node_addr = (uintptr_t)params.csv_batch_list_uaddr; + while (node_addr) { + struct kvm_csv_batch_list_node node; + + if (copy_from_user(&node, (void __user *)node_addr, + sizeof(struct kvm_csv_batch_list_node))) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + + if (ringbuf_infos->num > SVM_RING_BUFFER_MAX) { + pr_err("%s: ring num is too large:%d, cmd:0x%x\n", + __func__, ringbuf_infos->num, params.command_id); + + ret = -EINVAL; + goto err_free_ring_buffer_infos_items; + } + + if (csv_cmd_to_ringbuf_fn(kvm, prio, + (uintptr_t)node.cmd_data_addr, + ringbuf_infos)) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + + /* 1st half set to HIGH queue, 2nd half set to LOW queue */ + if (ringbuf_infos->num == SVM_RING_BUFFER_MAX / 2) + prio = CSV_COMMAND_PRIORITY_LOW; + + node_addr = node.next_cmd_addr; + } + + /* ring buffer process */ + ret = csv_issue_ringbuf_cmds(kvm, &argp->error); + if (ret) + goto err_free_ring_buffer_infos_items; + + ret = csv_check_stat_queue_status(&argp->error); + if (ret) + goto err_free_ring_buffer_infos_items; + + if (csv_copy_to_user_fn && csv_copy_to_user_fn(kvm, ringbuf_infos)) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + +err_free_ring_buffer_infos_items: + csv_ringbuf_infos_free(kvm, ringbuf_infos); + kfree(ringbuf_infos); + +err_free_ring_buffer: + csv_ring_buffer_queue_free(); + + return ret; +} + +static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) +{ + struct kvm_sev_cmd sev_cmd; + int r; + + if (!hygon_kvm_hooks.sev_hooks_installed || + !(*hygon_kvm_hooks.sev_enabled)) + return -ENOTTY; + + if (!argp) + return 0; + + if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd))) + return -EFAULT; + + mutex_lock(&kvm->lock); + + switch (sev_cmd.id) { + case KVM_CSV_COMMAND_BATCH: + mutex_lock(&csv_cmd_batch_mutex); + r = csv_command_batch(kvm, &sev_cmd); + mutex_unlock(&csv_cmd_batch_mutex); + break; + default: + /* + * If the command is compatible between CSV and SEV, the + * native implementation of the driver is invoked. + * Release the mutex before calling the native ioctl function + * because it will acquires the mutex. + */ + mutex_unlock(&kvm->lock); + if (likely(csv_x86_ops.mem_enc_op)) + return csv_x86_ops.mem_enc_op(kvm, argp); + } + + if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd))) + r = -EFAULT; + + mutex_unlock(&kvm->lock); + return r; +} + void csv_exit(void) { } @@ -108,5 +316,6 @@ void __init csv_init(struct kvm_x86_ops *ops) memcpy(&csv_x86_ops, ops, sizeof(struct kvm_x86_ops)); + ops->mem_enc_op = csv_mem_enc_ioctl; ops->vm_attestation = csv_vm_attestation; } diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index 58b3f39c854c..4e9ec062ce4d 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -14,6 +14,26 @@ extern u32 hygon_csv_build; +/* same to the ring buffer max num */ +#define SVM_RING_BUFFER_MAX 4094 + +struct csv_ringbuf_info_item { + struct page **pages; + uintptr_t hdr_vaddr; + uintptr_t trans_vaddr; + uintptr_t data_vaddr; + uintptr_t trans_uaddr; + uintptr_t hdr_uaddr; + unsigned long trans_len; + unsigned long hdr_len; + unsigned long n; +}; + +struct csv_ringbuf_infos { + struct csv_ringbuf_info_item *item[SVM_RING_BUFFER_MAX]; + int num; +}; + #ifdef CONFIG_HYGON_CSV /* @@ -22,6 +42,7 @@ extern u32 hygon_csv_build; */ extern struct hygon_kvm_hooks_table { bool sev_hooks_installed; + bool *sev_enabled; int (*sev_issue_cmd)(struct kvm *kvm, int id, void *data, int *error); unsigned long (*get_num_contig_pages)(unsigned long idx, struct page **inpages, diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 1ba29d3d4076..7fc5fa2275c7 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1770,6 +1770,7 @@ void sev_vm_destroy(struct kvm *kvm) /* Code to set all of the function and vaiable pointers */ void sev_install_hooks(void) { + hygon_kvm_hooks.sev_enabled = &sev_enabled; hygon_kvm_hooks.sev_issue_cmd = sev_issue_cmd; hygon_kvm_hooks.get_num_contig_pages = get_num_contig_pages; hygon_kvm_hooks.sev_pin_memory = sev_pin_memory; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index f073f15dc70b..74b03d0deff4 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1925,4 +1925,22 @@ struct kvm_dirty_gfn { #define KVM_X86_NOTIFY_VMEXIT_ENABLED (1ULL << 0) #define KVM_X86_NOTIFY_VMEXIT_USER (1ULL << 1) +enum csv_cmd_id { + /* HYGON CSV batch command */ + KVM_CSV_COMMAND_BATCH = 0x18, + + KVM_CSV_NR_MAX, +}; + +struct kvm_csv_batch_list_node { + __u64 cmd_data_addr; + __u64 addr; + __u64 next_cmd_addr; +}; + +struct kvm_csv_command_batch { + __u32 command_id; + __u64 csv_batch_list_uaddr; +}; + #endif /* __LINUX_KVM_H */ -- Gitee From 7636bf03412720010d50d978e7398e6ddb9f079e Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 18:22:04 +0800 Subject: [PATCH 25/35] KVM: SVM: Prepare memory pool to allocate buffers for KVM_CSV_COMMAND_BATCH hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA --------------------------- In the upcoming patches, many buffers need to be allocated in KVM_CSV_COMMAND_BATCH code paths. To avoid memory allocation failures, directly allocate a memory pool in sev_hardware_setup() and free the memory pool in sev_hardware_teardown(). When KVM_CSV_COMMAND_BATCH handling a batch of SEND_UPDATE_DATA/RECEIVE_UPDATE_DATA commands, it will allocate trans buffers from the memory pool. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Signed-off-by: Zhiguang Ni --- arch/x86/kvm/svm/csv.c | 87 ++++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/csv.h | 6 +++ arch/x86/kvm/svm/sev.c | 24 +++++++++--- 3 files changed, 112 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 69d8faef3c48..6af6f8372976 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -116,6 +116,92 @@ int csv_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) return ret; } +/*--1024--1023--1024--1023--*/ +#define TRANS_MEMPOOL_1ST_BLOCK_OFFSET 0 +#define TRANS_MEMPOOL_2ND_BLOCK_OFFSET (1024 << PAGE_SHIFT) +#define TRANS_MEMPOOL_3RD_BLOCK_OFFSET (2047 << PAGE_SHIFT) +#define TRANS_MEMPOOL_4TH_BLOCK_OFFSET (3071 << PAGE_SHIFT) +#define TRANS_MEMPOOL_BLOCKS_MAX_OFFSET (4094 << PAGE_SHIFT) +#define TRANS_MEMPOOL_BLOCK_NUM 4 +#define TRANS_MEMPOOL_BLOCK_SIZE (1024 * PAGE_SIZE) + +static size_t g_mempool_offset; +void *g_trans_mempool[TRANS_MEMPOOL_BLOCK_NUM] = { 0, }; + +static void csv_reset_mempool_offset(void) +{ + g_mempool_offset = 0; +} + +int csv_alloc_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + WARN_ONCE(g_trans_mempool[i], + "g_trans_mempool[%d] was tainted\n", i); + + g_trans_mempool[i] = kzalloc(TRANS_MEMPOOL_BLOCK_SIZE, GFP_KERNEL); + if (!g_trans_mempool[i]) + goto free_trans_mempool; + } + + csv_reset_mempool_offset(); + return 0; + +free_trans_mempool: + csv_free_trans_mempool(); + pr_warn("Fail to allocate mem pool, CSV(2) live migration will very slow\n"); + + return -ENOMEM; +} + +void csv_free_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + kfree(g_trans_mempool[i]); + g_trans_mempool[i] = NULL; + } + + csv_reset_mempool_offset(); +} + +static void __maybe_unused *get_trans_data_from_mempool(size_t size) +{ + void *trans = NULL; + char *trans_data = NULL; + int i; + size_t offset; + + if (g_mempool_offset < TRANS_MEMPOOL_2ND_BLOCK_OFFSET) { + i = 0; + offset = g_mempool_offset - TRANS_MEMPOOL_1ST_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_3RD_BLOCK_OFFSET) { + i = 1; + offset = g_mempool_offset - TRANS_MEMPOOL_2ND_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_4TH_BLOCK_OFFSET) { + i = 2; + offset = g_mempool_offset - TRANS_MEMPOOL_3RD_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_BLOCKS_MAX_OFFSET) { + i = 3; + offset = g_mempool_offset - TRANS_MEMPOOL_4TH_BLOCK_OFFSET; + } else { + pr_err("mempool is full (offset: %lu)\n", g_mempool_offset); + return NULL; + } + + trans_data = (char *)g_trans_mempool[i]; + if (!trans_data) + return NULL; + + trans = &trans_data[offset]; + g_mempool_offset += size; + + return trans; +} + static int csv_ringbuf_infos_free(struct kvm *kvm, struct csv_ringbuf_infos *ringbuf_infos) { @@ -252,6 +338,7 @@ static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) err_free_ring_buffer_infos_items: csv_ringbuf_infos_free(kvm, ringbuf_infos); kfree(ringbuf_infos); + csv_reset_mempool_offset(); err_free_ring_buffer: csv_ring_buffer_queue_free(); diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index 4e9ec062ce4d..bbf6f6fd4301 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -57,11 +57,17 @@ extern struct hygon_kvm_hooks_table { void __init csv_init(struct kvm_x86_ops *ops); void csv_exit(void); +int csv_alloc_trans_mempool(void); +void csv_free_trans_mempool(void); + #else /* !CONFIG_HYGON_CSV */ static inline void __init csv_init(struct kvm_x86_ops *ops) { } static inline void csv_exit(void) { } +static inline int csv_alloc_trans_mempool(void) { return 0; } +static inline void csv_free_trans_mempool(void) { } + #endif /* CONFIG_HYGON_CSV */ #endif /* __SVM_CSV_H */ diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 7fc5fa2275c7..be553a183d79 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1850,12 +1850,22 @@ void __init sev_hardware_setup(void) sev_es_enabled = sev_es_supported; #ifdef CONFIG_HYGON_CSV - /* - * Install sev related function and variable pointers hooks only for - * Hygon CPUs. - */ - if (is_x86_vendor_hygon()) + /* Setup resources which are necessary for HYGON CSV */ + if (is_x86_vendor_hygon()) { + /* + * Install sev related function and variable pointers hooks + * no matter @sev_enabled is false. + */ sev_install_hooks(); + + /* + * Allocate a memory pool to speed up live migration of + * the CSV/CSV2 guests. If the allocation fails, no + * acceleration is performed at live migration. + */ + if (sev_enabled) + csv_alloc_trans_mempool(); + } #endif } @@ -1865,6 +1875,10 @@ void sev_hardware_teardown(void) if (!svm_sev_enabled()) return; + /* Free the memory pool that allocated in sev_hardware_setup(). */ + if (is_x86_vendor_hygon()) + csv_free_trans_mempool(); + bitmap_free(sev_asid_bitmap); bitmap_free(sev_reclaim_asid_bitmap); -- Gitee From 0b57969b0a5d5a925263c1b1e59c260bdaac8f78 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Sun, 1 Aug 2021 13:38:41 +0800 Subject: [PATCH 26/35] KVM: SVM: Add SEND_UPDATE_DATA command helper to support KVM_CSV_COMMAND_BATCH hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA --------------------------- When KVM_CSV_COMMAND_BATCH handling a batch of SEND_UPDATE_DATA commands, it need execute 3 steps: 1. Enqueue each SEND_UPDATE_DATA command data to CSV RING_BUFFER queues (as input of RING_BUFFER command) 2. Issue RING_BUFFER command 3. Copy the output of RING_BUFFER command to userspace In this change, we add sev_send_update_data_to_ringbuf() to prepare input required by RING_BUFFER command as dictated in step 1, and add sev_send_update_data_copy_to_user() to copy output userspace as dictated in step 3. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Signed-off-by: Zhiguang Ni --- arch/x86/kvm/svm/csv.c | 141 +++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/csv.h | 1 + arch/x86/kvm/svm/sev.c | 1 + 3 files changed, 143 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 6af6f8372976..648748e347e7 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -202,6 +203,142 @@ static void __maybe_unused *get_trans_data_from_mempool(size_t size) return trans; } +static int +csv_send_update_data_to_ringbuf(struct kvm *kvm, + int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_data *data; + struct kvm_sev_send_update_data params; + struct csv_ringbuf_info_item *item; + void *hdr, *trans_data; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)data_ptr, + sizeof(struct kvm_sev_send_update_data))) + return -EFAULT; + + /* + * userspace shouldn't query either header or trans length in ringbuf + * mode. + */ + if (!params.trans_len || !params.hdr_len) + return -EINVAL; + + if (!params.trans_uaddr || !params.guest_uaddr || + !params.guest_len || !params.hdr_uaddr) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) + return -EINVAL; + + /* Pin guest memory */ + guest_page = hygon_kvm_hooks.sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 0); + if (IS_ERR(guest_page)) + return PTR_ERR(guest_page); + + /* Allocate memory for header and transport buffer */ + ret = -ENOMEM; + hdr = kzalloc(params.hdr_len, GFP_KERNEL); + if (!hdr) + goto e_unpin; + + trans_data = get_trans_data_from_mempool(params.trans_len); + if (!trans_data) + goto e_free_hdr; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_free_hdr; + + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; + data->trans_address = __psp_pa(trans_data); + data->trans_len = params.trans_len; + + /* The SEND_UPDATE_DATA command requires C-bit to be always set. */ + data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + + offset; + data->guest_address |= *hygon_kvm_hooks.sev_me_mask; + data->guest_len = params.guest_len; + data->handle = sev->handle; + + ret = csv_fill_cmd_queue(prio, SEV_CMD_SEND_UPDATE_DATA, data, 0); + if (ret) + goto e_free; + + /* + * Create item to save page info and pointer, which will be freed + * in function csv_command_batch because it will be used after PSP + * return for copy_to_user. + */ + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + ret = -ENOMEM; + goto e_free; + } + + item->pages = guest_page; + item->n = n; + item->hdr_vaddr = (uintptr_t)hdr; + item->hdr_uaddr = params.hdr_uaddr; + item->hdr_len = params.hdr_len; + item->trans_vaddr = (uintptr_t)trans_data; + item->trans_uaddr = params.trans_uaddr; + item->trans_len = params.trans_len; + item->data_vaddr = (uintptr_t)data; + + ringbuf_infos->item[ringbuf_infos->num++] = item; + + /* copy to ring buffer success, data freed after commands completed */ + return 0; + +e_free: + kfree(data); +e_free_hdr: + kfree(hdr); +e_unpin: + hygon_kvm_hooks.sev_unpin_memory(kvm, guest_page, n); + return ret; +} + +static int +csv_send_update_data_copy_to_user(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos) +{ + int i, ret = 0; + + for (i = 0; i < ringbuf_infos->num; i++) { + struct csv_ringbuf_info_item *item = ringbuf_infos->item[i]; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)item->trans_uaddr, + (void *)item->trans_vaddr, item->trans_len)) { + ret = -EFAULT; + break; + } + + /* Copy packet header to userspace. */ + if (copy_to_user((void __user *)item->hdr_uaddr, + (void *)item->hdr_vaddr, item->hdr_len)) { + ret = -EFAULT; + break; + } + } + + return ret; +} + static int csv_ringbuf_infos_free(struct kvm *kvm, struct csv_ringbuf_infos *ringbuf_infos) { @@ -244,6 +381,10 @@ static int get_cmd_helpers(__u32 cmd, /* copy commands to ring buffer*/ switch (cmd) { + case KVM_SEV_SEND_UPDATE_DATA: + *to_ringbuf_fn = csv_send_update_data_to_ringbuf; + *to_user_fn = csv_send_update_data_copy_to_user; + break; default: ret = -EINVAL; break; diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index bbf6f6fd4301..ad3361e89f0a 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -43,6 +43,7 @@ struct csv_ringbuf_infos { extern struct hygon_kvm_hooks_table { bool sev_hooks_installed; bool *sev_enabled; + unsigned long *sev_me_mask; int (*sev_issue_cmd)(struct kvm *kvm, int id, void *data, int *error); unsigned long (*get_num_contig_pages)(unsigned long idx, struct page **inpages, diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index be553a183d79..019fe0acb003 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1771,6 +1771,7 @@ void sev_vm_destroy(struct kvm *kvm) void sev_install_hooks(void) { hygon_kvm_hooks.sev_enabled = &sev_enabled; + hygon_kvm_hooks.sev_me_mask = &sev_me_mask; hygon_kvm_hooks.sev_issue_cmd = sev_issue_cmd; hygon_kvm_hooks.get_num_contig_pages = get_num_contig_pages; hygon_kvm_hooks.sev_pin_memory = sev_pin_memory; -- Gitee From c142ce794117abd3f91f6283054eec1c1f841ffc Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Sun, 1 Aug 2021 13:50:54 +0800 Subject: [PATCH 27/35] KVM: SVM: Add RECEIVE_UPDATE_DATA command helper to support KVM_CSV_COMMAND_BATCH hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA --------------------------- When KVM_CSV_COMMAND_BATCH handling a batch of RECEIVE_UPDATE_DATA commands, it need execute 2 steps: 1. Enqueue each SEND_UPDATE_DATA command data to CSV RING_BUFFER queues (as input of RING_BUFFER command) 2. Issue RING_BUFFER command In this change, we add sev_receive_update_data_to_ringbuf() to prepare input required by RING_BUFFER command as dictated in step 1. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Signed-off-by: Zhiguang Ni --- arch/x86/kvm/svm/csv.c | 119 +++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/csv.h | 1 + arch/x86/kvm/svm/sev.c | 1 + 3 files changed, 121 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 648748e347e7..f2fc30633f3d 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -339,6 +339,121 @@ csv_send_update_data_copy_to_user(struct kvm *kvm, return ret; } +static int +csv_receive_update_data_to_ringbuf(struct kvm *kvm, + int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_sev_receive_update_data params; + struct sev_data_receive_update_data *data; + struct csv_ringbuf_info_item *item; + void *hdr = NULL, *trans = NULL; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)data_ptr, + sizeof(struct kvm_sev_receive_update_data))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.guest_uaddr || !params.guest_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) + return -EINVAL; + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + ret = -ENOMEM; + trans = get_trans_data_from_mempool(params.trans_len); + if (!trans) + goto e_free_hdr; + + if (copy_from_user(trans, (void __user *)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_free_hdr; + + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; + data->trans_address = __psp_pa(trans); + data->trans_len = params.trans_len; + + /* Pin guest memory */ + guest_page = hygon_kvm_hooks.sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 1); + if (IS_ERR(guest_page)) { + ret = PTR_ERR(guest_page); + goto e_free; + } + + /* + * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP + * encrypts the written data with the guest's key, and the cache may + * contain dirty, unencrypted data. + */ + hygon_kvm_hooks.sev_clflush_pages(guest_page, n); + + /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ + data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + + offset; + data->guest_address |= *hygon_kvm_hooks.sev_me_mask; + data->guest_len = params.guest_len; + data->handle = sev->handle; + + ret = csv_fill_cmd_queue(prio, SEV_CMD_RECEIVE_UPDATE_DATA, data, 0); + + if (ret) + goto e_unpin; + + /* + * Create item to save page info and pointer, whitch will be freed + * in function csv_command_batch because it will be used after PSP + * return for copy_to_user. + */ + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + ret = -ENOMEM; + goto e_unpin; + } + + item->pages = guest_page; + item->n = n; + item->hdr_vaddr = (uintptr_t)hdr; + item->trans_vaddr = (uintptr_t)trans; + item->data_vaddr = (uintptr_t)data; + + ringbuf_infos->item[ringbuf_infos->num++] = item; + + /* copy to ring buffer success, data freed after commands completed */ + return 0; + +e_unpin: + hygon_kvm_hooks.sev_unpin_memory(kvm, guest_page, n); +e_free: + kfree(data); +e_free_hdr: + kfree(hdr); + + return ret; +} + static int csv_ringbuf_infos_free(struct kvm *kvm, struct csv_ringbuf_infos *ringbuf_infos) { @@ -385,6 +500,10 @@ static int get_cmd_helpers(__u32 cmd, *to_ringbuf_fn = csv_send_update_data_to_ringbuf; *to_user_fn = csv_send_update_data_copy_to_user; break; + case KVM_SEV_RECEIVE_UPDATE_DATA: + *to_ringbuf_fn = csv_receive_update_data_to_ringbuf; + *to_user_fn = NULL; + break; default: ret = -EINVAL; break; diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index ad3361e89f0a..747bb6050399 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -53,6 +53,7 @@ extern struct hygon_kvm_hooks_table { int write); void (*sev_unpin_memory)(struct kvm *kvm, struct page **pages, unsigned long npages); + void (*sev_clflush_pages)(struct page *pages[], unsigned long npages); } hygon_kvm_hooks; void __init csv_init(struct kvm_x86_ops *ops); diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 019fe0acb003..c0c3b380b2fd 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1776,6 +1776,7 @@ void sev_install_hooks(void) hygon_kvm_hooks.get_num_contig_pages = get_num_contig_pages; hygon_kvm_hooks.sev_pin_memory = sev_pin_memory; hygon_kvm_hooks.sev_unpin_memory = sev_unpin_memory; + hygon_kvm_hooks.sev_clflush_pages = sev_clflush_pages; hygon_kvm_hooks.sev_hooks_installed = true; } -- Gitee From a5e1567c6988ac231e009bd06da5e779045a7273 Mon Sep 17 00:00:00 2001 From: Steve Rutherford Date: Thu, 24 Aug 2023 15:37:31 -0700 Subject: [PATCH 28/35] x86/sev: Make enc_dec_hypercall() accept a size instead of npages mainline inclusion from mainline-v6.1 commit ac3f9c9f1b37edaa7d1a9b908bc79d843955a1a2 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=ac3f9c9f1b37edaa7d1a9b908bc79d843955a1a2 --------------------------- enc_dec_hypercall() accepted a page count instead of a size, which forced its callers to round up. As a result, non-page aligned vaddrs caused pages to be spuriously marked as decrypted via the encryption status hypercall, which in turn caused consistent corruption of pages during live migration. Live migration requires accurate encryption status information to avoid migrating pages from the wrong perspective. Fixes: 064ce6c550a0 ("mm: x86: Invoke hypercall when page encryption status is changed") Signed-off-by: Steve Rutherford Signed-off-by: Ingo Molnar Reviewed-by: Tom Lendacky Reviewed-by: Pankaj Gupta Tested-by: Ben Hillier Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230824223731.2055016-1-srutherford@google.com Signed-off-by: Zhiguang Ni --- arch/x86/include/asm/mem_encrypt.h | 6 +++--- arch/x86/include/asm/set_memory.h | 2 +- arch/x86/kernel/kvm.c | 4 +--- arch/x86/mm/mem_encrypt.c | 11 +++++------ arch/x86/mm/pat/set_memory.c | 2 +- 5 files changed, 11 insertions(+), 14 deletions(-) diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 8442ba5fc498..4383272d4db9 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -43,8 +43,8 @@ void __init sme_enable(struct boot_params *bp); int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size); int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size); -void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, - bool enc); +void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, + unsigned long size, bool enc); void __init mem_encrypt_free_decrypted_mem(void); @@ -84,7 +84,7 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; static inline int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; } static inline void __init -early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) {} +early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) {} static inline void mem_encrypt_free_decrypted_mem(void) { } diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h index e7b23e574d1a..0973efddba65 100644 --- a/arch/x86/include/asm/set_memory.h +++ b/arch/x86/include/asm/set_memory.h @@ -83,7 +83,7 @@ int set_pages_rw(struct page *page, int numpages); int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_default_noflush(struct page *page); -void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc); +void notify_range_enc_status_changed(unsigned long vaddr, int size, bool enc); extern int kernel_set_to_readonly; diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 6e12052dd607..c566f1316f06 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -919,10 +919,8 @@ static void __init kvm_init_platform(void) * Ensure that _bss_decrypted section is marked as decrypted in the * shared pages list. */ - nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted, - PAGE_SIZE); early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted, - nr_pages, 0); + __end_bss_decrypted - __start_bss_decrypted, 0); /* * If not booted using EFI, enable Live migration support. diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 73f19c68900d..577fa0dbaa93 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -228,11 +228,10 @@ static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot) return pfn; } -void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc) +void notify_range_enc_status_changed(unsigned long vaddr, int size, bool enc) { #ifdef CONFIG_PARAVIRT - unsigned long sz = npages << PAGE_SHIFT; - unsigned long vaddr_end = vaddr + sz; + unsigned long vaddr_end = vaddr + size; while (vaddr < vaddr_end) { int psize, pmask, level; @@ -364,7 +363,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr, ret = 0; - notify_range_enc_status_changed(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc); + notify_range_enc_status_changed(start, size, enc); out: __flush_tlb_all(); return ret; @@ -380,9 +379,9 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) return early_set_memory_enc_dec(vaddr, size, true); } -void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) +void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) { - notify_range_enc_status_changed(vaddr, npages, enc); + notify_range_enc_status_changed(vaddr, size, enc); } /* diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 89ee01837648..8a0444880c9e 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -2016,7 +2016,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) * Notify hypervisor that a given memory range is mapped encrypted * or decrypted. */ - notify_range_enc_status_changed(addr, numpages, enc); + notify_range_enc_status_changed(addr, numpages << PAGE_SHIFT, enc); return ret; } -- Gitee From d45aea7c4c3bf569d330a40a1d5de88a1ad49622 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 6 May 2021 10:58:26 -0700 Subject: [PATCH 29/35] KVM: SVM: Fix sev_pin_memory() error checks in SEV migration utilities mainline inclusion from mainline-v5.13 commit c7a1b2b678c54ac19320daf525038d0e2e43ca7c category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=c7a1b2b678c54ac19320daf525038d0e2e43ca7c --------------------------- Use IS_ERR() instead of checking for a NULL pointer when querying for sev_pin_memory() failures. sev_pin_memory() always returns an error code cast to a pointer, or a valid pointer; it never returns NULL. Reported-by: Dan Carpenter Cc: Steve Rutherford Cc: Brijesh Singh Cc: Ashish Kalra Fixes: d3d1af85e2c7 ("KVM: SVM: Add KVM_SEND_UPDATE_DATA command") Fixes: 15fb7de1a7f5 ("KVM: SVM: Add KVM_SEV_RECEIVE_UPDATE_DATA command") Signed-off-by: Sean Christopherson Message-Id: <20210506175826.2166383-3-seanjc@google.com> Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- arch/x86/kvm/svm/sev.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index c0c3b380b2fd..10391d64ce68 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1257,8 +1257,8 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) /* Pin guest memory */ guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, PAGE_SIZE, &n, 0); - if (!guest_page) - return -EFAULT; + if (IS_ERR(guest_page)) + return PTR_ERR(guest_page); /* allocate memory for header and transport buffer */ ret = -ENOMEM; @@ -1483,11 +1483,12 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) data->trans_len = params.trans_len; /* Pin guest memory */ - ret = -EFAULT; guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, PAGE_SIZE, &n, 1); - if (!guest_page) + if (IS_ERR(guest_page)) { + ret = PTR_ERR(guest_page); goto e_free; + } /* * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP -- Gitee From d7a74e641501da4c3cc378b089b64850ce9e8ad7 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sun, 22 Oct 2023 10:21:25 +0800 Subject: [PATCH 30/35] mm: x86: Fix kabi breakage of pv_ops hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA --------------------------- Fix kabi breakage of pv_ops Fixes: ("mm: x86: Invoke hypercall when page encryption status is changed") Signed-off-by: hanliyang Signed-off-by: Zhiguang Ni --- arch/x86/include/asm/kvm_para.h | 12 ++++++++++++ arch/x86/include/asm/mem_encrypt.h | 12 ++++++++++++ arch/x86/include/asm/paravirt.h | 6 ------ arch/x86/include/asm/paravirt_types.h | 1 - arch/x86/kernel/kvm.c | 7 ++++--- arch/x86/kernel/paravirt.c | 1 - arch/x86/mm/mem_encrypt.c | 15 +++++++++++++++ 7 files changed, 43 insertions(+), 11 deletions(-) diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 56935ebb1dfe..02abda514bd3 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -116,6 +116,10 @@ static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token) return false; } +/* HYGON added to avoid kabi breakage of pv_ops (start) */ +extern void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc); +/* HYGON added to avoid kabi breakage of pv_ops (end) */ + #ifdef CONFIG_PARAVIRT_SPINLOCKS void __init kvm_spinlock_init(void); #else /* !CONFIG_PARAVIRT_SPINLOCKS */ @@ -152,6 +156,14 @@ static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token) { return false; } + +/* HYGON added to avoid kabi breakage of pv_ops (start) */ +static inline void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc) +{ + return; +} +/* HYGON added to avoid kabi breakage of pv_ops (end) */ + #endif #endif /* _ASM_X86_KVM_PARA_H */ diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 4383272d4db9..887e417ec563 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -57,6 +57,14 @@ void __init mem_encrypt_init(void); #define __bss_decrypted __section(".bss..decrypted") +/* HYGON added to avoid kabi breakage of pv_ops (start) */ +extern bool page_enc_status_kvm_hypercall_enable; +#define ENABLE_PAGE_ENC_STATUS_KVM_HYPERCALL \ +({ \ + page_enc_status_kvm_hypercall_enable = true; \ +}) +/* HYGON added to avoid kabi breakage of pv_ops (end) */ + #else /* !CONFIG_AMD_MEM_ENCRYPT */ #define sme_me_mask 0ULL @@ -92,6 +100,10 @@ static inline void mem_encrypt_init(void) { } #define __bss_decrypted +/* HYGON added to avoid kabi breakage of pv_ops (start) */ +#define ENABLE_PAGE_ENC_STATUS_KVM_HYPERCALL +/* HYGON added to avoid kabi breakage of pv_ops (end) */ + #endif /* CONFIG_AMD_MEM_ENCRYPT */ /* diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 0740fe7d0def..aa5aba325e30 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -84,12 +84,6 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) PVOP_VCALL1(mmu.exit_mmap, mm); } -static inline void notify_page_enc_status_changed(unsigned long pfn, - int npages, bool enc) -{ - PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc); -} - #ifdef CONFIG_PARAVIRT_XXL static inline void load_sp0(unsigned long sp0) { diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 2cf082ff733f..b30b56d47619 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -212,7 +212,6 @@ struct pv_mmu_ops { /* Hook for intercepting the destruction of an mm_struct. */ void (*exit_mmap)(struct mm_struct *mm); - void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc); #ifdef CONFIG_PARAVIRT_XXL struct paravirt_callee_save read_cr2; diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index c566f1316f06..f646209ef16d 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -875,7 +875,7 @@ static bool __init kvm_msi_ext_dest_id(void) return kvm_para_has_feature(KVM_FEATURE_MSI_EXT_DEST_ID); } -static void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc) +void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc) { kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, pfn << PAGE_SHIFT, npages, KVM_MAP_GPA_RANGE_ENC_STAT(enc) | KVM_MAP_GPA_RANGE_PAGE_SZ_4K); @@ -888,8 +888,9 @@ static void __init kvm_init_platform(void) unsigned long nr_pages; int i; - pv_ops.mmu.notify_page_enc_status_changed = - kvm_sev_hc_page_enc_status; + /* HYGON added to avoid kabi breakage of pv_ops (start) */ + ENABLE_PAGE_ENC_STATUS_KVM_HYPERCALL; + /* HYGON added to avoid kabi breakage of pv_ops (end) */ /* * Reset the host's shared pages list related to kernel diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 2cc14ed576ca..2da5c225bf04 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -328,7 +328,6 @@ struct paravirt_patch_template pv_ops = { (void (*)(struct mmu_gather *, void *))tlb_remove_page, .mmu.exit_mmap = paravirt_nop, - .mmu.notify_page_enc_status_changed = paravirt_nop, #ifdef CONFIG_PARAVIRT_XXL .mmu.read_cr2 = __PV_IS_CALLEE_SAVE(native_read_cr2), diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 577fa0dbaa93..c712407704f1 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -31,6 +31,10 @@ #include #include +/* HYGON added to avoid kabi breakage of pv_ops (start) */ +#include +/* HYGON added to avoid kabi breakage of pv_ops (end) */ + #include "mm_internal.h" #include @@ -228,6 +232,17 @@ static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot) return pfn; } +/* HYGON added to avoid kabi breakage of pv_ops (start) */ +bool page_enc_status_kvm_hypercall_enable; + +static inline void notify_page_enc_status_changed(unsigned long pfn, + int npages, bool enc) +{ + if (page_enc_status_kvm_hypercall_enable == true) + kvm_sev_hc_page_enc_status(pfn, npages, enc); +} +/* HYGON added to avoid kabi breakage of pv_ops (end) */ + void notify_range_enc_status_changed(unsigned long vaddr, int size, bool enc) { #ifdef CONFIG_PARAVIRT -- Gitee From 7683beeb5e1da83ca785096d2a419f121add937c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 21 Apr 2021 19:11:24 -0700 Subject: [PATCH 31/35] KVM: SVM: Remove an unnecessary prototype declaration of sev_flush_asids() mainline inclusion from mainline-v5.11 commit 82b7ae0481aeed393094e4f73bf4566a504b86bc category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=82b7ae0481aeed393094e4f73bf4566a504b86bc --------------------------- Remove the forward declaration of sev_flush_asids(), which is only a few lines above the function itself. No functional change intended. Reviewed by: Tom Lendacky Reviewed-by: Brijesh Singh Signed-off-by: Sean Christopherson Message-Id: <20210422021125.3417167-15-seanjc@google.com> Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- arch/x86/kvm/svm/sev.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 10391d64ce68..f13502c0d92e 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -41,7 +41,6 @@ static bool sev_es_enabled = true; module_param_named(sev_es, sev_es_enabled, bool, 0444); static u8 sev_enc_bit; -static int sev_flush_asids(void); static DECLARE_RWSEM(sev_deactivate_lock); static DEFINE_MUTEX(sev_bitmap_lock); unsigned int max_sev_asid; -- Gitee From 8b25d5329089f7255ab90cf75b071c35273d1f2e Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 21 Apr 2021 19:11:25 -0700 Subject: [PATCH 32/35] KVM: SVM: Skip SEV cache flush if no ASIDs have been used mainline inclusion from mainline-v5.11 commit 469bb32b68d5a414fea293c17b532329c6dc9612 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=469bb32b68d5a414fea293c17b532329c6dc9612 --------------------------- Skip SEV's expensive WBINVD and DF_FLUSH if there are no SEV ASIDs waiting to be reclaimed, e.g. if SEV was never used. This "fixes" an issue where the DF_FLUSH fails during hardware teardown if the original SEV_INIT failed. Ideally, SEV wouldn't be marked as enabled in KVM if SEV_INIT fails, but that's a problem for another day. Signed-off-by: Sean Christopherson Message-Id: <20210422021125.3417167-16-seanjc@google.com> Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- arch/x86/kvm/svm/sev.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index f13502c0d92e..6b836648d0f2 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -57,9 +57,15 @@ struct enc_region { unsigned long size; }; -static int sev_flush_asids(void) +/* Called with the sev_bitmap_lock held, or on shutdown */ +static int sev_flush_asids(int min_asid, int max_asid) { - int ret, error = 0; + int ret, pos, error = 0; + + /* Check if there are any ASIDs to reclaim before performing a flush */ + pos = find_next_bit(sev_reclaim_asid_bitmap, max_asid, min_asid); + if (pos >= max_asid) + return -EBUSY; /* * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail, @@ -81,14 +87,7 @@ static int sev_flush_asids(void) /* Must be called with the sev_bitmap_lock held */ static bool __sev_recycle_asids(int min_asid, int max_asid) { - int pos; - - /* Check if there are any ASIDs to reclaim before performing a flush */ - pos = find_next_bit(sev_reclaim_asid_bitmap, max_sev_asid, min_asid); - if (pos >= max_asid) - return false; - - if (sev_flush_asids()) + if (sev_flush_asids(min_asid, max_asid)) return false; /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */ @@ -1881,10 +1880,11 @@ void sev_hardware_teardown(void) if (is_x86_vendor_hygon()) csv_free_trans_mempool(); + /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ + sev_flush_asids(0, max_sev_asid); + bitmap_free(sev_asid_bitmap); bitmap_free(sev_reclaim_asid_bitmap); - - sev_flush_asids(); } /* -- Gitee From 333c10eef39a4d553febfa99c3afa8d017dd57e4 Mon Sep 17 00:00:00 2001 From: Peter Gonda Date: Tue, 7 Feb 2023 09:13:54 -0800 Subject: [PATCH 33/35] KVM: SVM: Fix potential overflow in SEV's send|receive_update_data() mainline inclusion from mainline-v6.6 commit f94f053aa3a5d6ff17951870483d9eb9e13de2e2 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=f94f053aa3a5d6ff17951870483d9eb9e13de2e2 --------------------------- KVM_SEV_SEND_UPDATE_DATA and KVM_SEV_RECEIVE_UPDATE_DATA have an integer overflow issue. Params.guest_len and offset are both 32 bits wide, with a large params.guest_len the check to confirm a page boundary is not crossed can falsely pass: /* Check if we are crossing the page boundary * offset = params.guest_uaddr & (PAGE_SIZE - 1); if ((params.guest_len + offset > PAGE_SIZE)) Add an additional check to confirm that params.guest_len itself is not greater than PAGE_SIZE. Note, this isn't a security concern as overflow can happen if and only if params.guest_len is greater than 0xfffff000, and the FW spec says these commands fail with lengths greater than 16KB, i.e. the PSP will detect KVM's goof. Fixes: 15fb7de1a7f5 ("KVM: SVM: Add KVM_SEV_RECEIVE_UPDATE_DATA command") Fixes: d3d1af85e2c7 ("KVM: SVM: Add KVM_SEND_UPDATE_DATA command") Reported-by: Andy Nguyen Suggested-by: Thomas Lendacky Signed-off-by: Peter Gonda Cc: David Rientjes Cc: Paolo Bonzini Cc: Sean Christopherson Cc: kvm@vger.kernel.org Cc: stable@vger.kernel.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Tom Lendacky Link: https://lore.kernel.org/r/20230207171354.4012821-1-pgonda@google.com Signed-off-by: Sean Christopherson Signed-off-by: Zhiguang Ni --- arch/x86/kvm/svm/sev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 6b836648d0f2..7904c665d951 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1249,7 +1249,7 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) /* Check if we are crossing the page boundary */ offset = params.guest_uaddr & (PAGE_SIZE - 1); - if ((params.guest_len + offset > PAGE_SIZE)) + if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) return -EINVAL; /* Pin guest memory */ @@ -1457,7 +1457,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) /* Check if we are crossing the page boundary */ offset = params.guest_uaddr & (PAGE_SIZE - 1); - if ((params.guest_len + offset > PAGE_SIZE)) + if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) return -EINVAL; hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); -- Gitee From 243e164079ab0127c2342b35e956b753222d2e23 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 24 May 2021 12:48:57 -0500 Subject: [PATCH 34/35] KVM: x86: Assume a 64-bit hypercall for guests with protected state mainline inclusion from mainline-v5.15 commit b5aead0064f33ae5e693a364e3204fe1c0ac9af2 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=b5aead0064f33ae5e693a364e3204fe1c0ac9af2 --------------------------- When processing a hypercall for a guest with protected state, currently SEV-ES guests, the guest CS segment register can't be checked to determine if the guest is in 64-bit mode. For an SEV-ES guest, it is expected that communication between the guest and the hypervisor is performed to shared memory using the GHCB. In order to use the GHCB, the guest must have been in long mode, otherwise writes by the guest to the GHCB would be encrypted and not be able to be comprehended by the hypervisor. Create a new helper function, is_64_bit_hypercall(), that assumes the guest is in 64-bit mode when the guest has protected state, and returns true, otherwise invoking is_64_bit_mode() to determine the mode. Update the hypercall related routines to use is_64_bit_hypercall() instead of is_64_bit_mode(). Add a WARN_ON_ONCE() to is_64_bit_mode() to catch occurences of calls to this helper function for a guest running with protected state. Fixes: f1c6366e3043 ("KVM: SVM: Add required changes to support intercepts under SEV-ES") Reported-by: Sean Christopherson Signed-off-by: Tom Lendacky Message-Id: Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini Signed-off-by: Zhiguang Ni --- arch/x86/kvm/hyperv.c | 4 ++-- arch/x86/kvm/x86.c | 2 +- arch/x86/kvm/x86.h | 12 ++++++++++++ 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 0702cfa22fbf..3d0e54ae9dc3 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1691,7 +1691,7 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) { bool longmode; - longmode = is_64_bit_mode(vcpu); + longmode = is_64_bit_hypercall(vcpu); if (longmode) kvm_rax_write(vcpu, result); else { @@ -1767,7 +1767,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) } #ifdef CONFIG_X86_64 - if (is_64_bit_mode(vcpu)) { + if (is_64_bit_hypercall(vcpu)) { param = kvm_rcx_read(vcpu); ingpa = kvm_rdx_read(vcpu); outgpa = kvm_r8_read(vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9f60cb550c55..56def51a773b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8803,7 +8803,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) trace_kvm_hypercall(nr, a0, a1, a2, a3); - op_64_bit = is_64_bit_mode(vcpu); + op_64_bit = is_64_bit_hypercall(vcpu); if (!op_64_bit) { nr &= 0xFFFFFFFF; a0 &= 0xFFFFFFFF; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 2536013e7e96..af96d739fbc7 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -95,12 +95,24 @@ static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) { int cs_db, cs_l; + WARN_ON_ONCE(vcpu->arch.guest_state_protected); + if (!is_long_mode(vcpu)) return false; kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); return cs_l; } +static inline bool is_64_bit_hypercall(struct kvm_vcpu *vcpu) +{ + /* + * If running with protected guest state, the CS register is not + * accessible. The hypercall register values will have had to been + * provided in 64-bit mode, so assume the guest is in 64-bit. + */ + return vcpu->arch.guest_state_protected || is_64_bit_mode(vcpu); +} + static inline bool x86_exception_has_error_code(unsigned int vector) { static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) | -- Gitee From 00e421ea4cb331ab424fa9afdf5a56a3ac2f5e95 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 27 Nov 2024 16:43:39 -0800 Subject: [PATCH 35/35] KVM: x86: Play nice with protected guests in complete_hypercall_exit() mainline inclusion from mainline-v6.6 commit 9b42d1e8e4fe9dc631162c04caa69b0d1860b0f0 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID5F38 CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=9b42d1e8e4fe9dc631162c04caa69b0d1860b0f0 --------------------------- Use is_64_bit_hypercall() instead of is_64_bit_mode() to detect a 64-bit hypercall when completing said hypercall. For guests with protected state, e.g. SEV-ES and SEV-SNP, KVM must assume the hypercall was made in 64-bit mode as the vCPU state needed to detect 64-bit mode is unavailable. Hacking the sev_smoke_test selftest to generate a KVM_HC_MAP_GPA_RANGE hypercall via VMGEXIT trips the WARN: ------------[ cut here ]------------ WARNING: CPU: 273 PID: 326626 at arch/x86/kvm/x86.h:180 complete_hypercall_exit+0x44/0xe0 [kvm] Modules linked in: kvm_amd kvm ... [last unloaded: kvm] CPU: 273 UID: 0 PID: 326626 Comm: sev_smoke_test Not tainted 6.12.0-smp--392e932fa0f3-feat #470 Hardware name: Google Astoria/astoria, BIOS 0.20240617.0-0 06/17/2024 RIP: 0010:complete_hypercall_exit+0x44/0xe0 [kvm] Call Trace: kvm_arch_vcpu_ioctl_run+0x2400/0x2720 [kvm] kvm_vcpu_ioctl+0x54f/0x630 [kvm] __se_sys_ioctl+0x6b/0xc0 do_syscall_64+0x83/0x160 entry_SYSCALL_64_after_hwframe+0x76/0x7e ---[ end trace 0000000000000000 ]--- Fixes: b5aead0064f3 ("KVM: x86: Assume a 64-bit hypercall for guests with protected state") Cc: stable@vger.kernel.org Cc: Tom Lendacky Reviewed-by: Xiaoyao Li Reviewed-by: Nikunj A Dadhania Reviewed-by: Tom Lendacky Reviewed-by: Binbin Wu Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20241128004344.4072099-2-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Zhiguang Ni --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 56def51a773b..17b06e336ab2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8780,7 +8780,7 @@ static int complete_hypercall_exit(struct kvm_vcpu *vcpu) { u64 ret = vcpu->run->hypercall.ret; - if (!is_64_bit_mode(vcpu)) + if (!is_64_bit_hypercall(vcpu)) ret = (u32)ret; kvm_rax_write(vcpu, ret); ++vcpu->stat.hypercalls; -- Gitee