From ae8b23bc4832042894d39f26148f4151d3ceec8f Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Tue, 11 Feb 2025 19:31:26 +0800 Subject: [PATCH 1/3] crypto: ccp: move vpsp-related functions to vpsp.c Upstream: no Previously, vpsp-related functions or data structure definitions were scattered across different files. To improve manageability and reduce conflicts during patch porting, vpsp-related content is now consolidated into psp-dev.c as much as possible. Signed-off-by: xiongmengbiao --- drivers/crypto/ccp/hygon/csv-dev.c | 475 +-------------------- drivers/crypto/ccp/hygon/psp-dev.c | 204 +-------- drivers/crypto/ccp/hygon/vpsp.c | 659 +++++++++++++++++++++++++++++ drivers/crypto/ccp/hygon/vpsp.h | 131 ++++++ include/linux/psp-hygon.h | 95 ----- 5 files changed, 793 insertions(+), 771 deletions(-) create mode 100644 drivers/crypto/ccp/hygon/vpsp.h diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 41edce5f0c72..b8e2632638a2 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -21,6 +21,7 @@ #include "psp-dev.h" #include "csv-dev.h" #include "ring-buffer.h" +#include "vpsp.h" /* * Hygon CSV build info: @@ -32,17 +33,6 @@ EXPORT_SYMBOL_GPL(hygon_csv_build); int csv_comm_mode = CSV_COMM_MAILBOX_ON; -/* defination of variabled used by virtual psp */ -enum VPSP_RB_CHECK_STATUS { - RB_NOT_CHECK = 0, - RB_CHECKING, - RB_CHECKED, - RB_CHECK_MAX -}; -#define VPSP_RB_IS_SUPPORTED(buildid) (buildid >= 1913) -#define VPSP_CMD_STATUS_RUNNING 0xffff -static DEFINE_MUTEX(vpsp_rb_mutex); -struct csv_ringbuffer_queue vpsp_ring_buffer[CSV_COMMAND_PRIORITY_NUM]; static uint8_t vpsp_rb_supported; static atomic_t vpsp_rb_check_status = ATOMIC_INIT(RB_NOT_CHECK); @@ -788,101 +778,6 @@ int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error #endif /* CONFIG_HYGON_CSV */ -static int get_queue_tail(struct csv_ringbuffer_queue *ringbuffer) -{ - return ringbuffer->cmd_ptr.tail & ringbuffer->cmd_ptr.mask; -} - -static int get_queue_head(struct csv_ringbuffer_queue *ringbuffer) -{ - return ringbuffer->cmd_ptr.head & ringbuffer->cmd_ptr.mask; -} - -static void vpsp_set_cmd_status(int prio, int index, int status) -{ - struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; - struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; - - statval[index].status = status; -} - -static int vpsp_get_cmd_status(int prio, int index) -{ - struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; - struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; - - return statval[index].status; -} - -static unsigned int vpsp_queue_cmd_size(int prio) -{ - return csv_cmd_queue_size(&vpsp_ring_buffer[prio].cmd_ptr); -} - -static int vpsp_dequeue_cmd(int prio, int index, - struct csv_cmdptr_entry *cmd_ptr) -{ - mutex_lock(&vpsp_rb_mutex); - - /* The status update must be before the head update */ - vpsp_set_cmd_status(prio, index, 0); - csv_dequeue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, (void *)cmd_ptr, 1); - - mutex_unlock(&vpsp_rb_mutex); - - return 0; -} - -/* - * Populate the command from the virtual machine to the queue to - * support execution in ringbuffer mode - */ -static int vpsp_fill_cmd_queue(int prio, int cmd, phys_addr_t phy_addr, uint16_t flags) -{ - struct csv_cmdptr_entry cmdptr = { }; - int index = -1; - - cmdptr.cmd_buf_ptr = phy_addr; - cmdptr.cmd_id = cmd; - cmdptr.cmd_flags = flags; - - mutex_lock(&vpsp_rb_mutex); - index = get_queue_tail(&vpsp_ring_buffer[prio]); - - /* If status is equal to VPSP_CMD_STATUS_RUNNING, then the queue is full */ - if (vpsp_get_cmd_status(prio, index) == VPSP_CMD_STATUS_RUNNING) { - index = -1; - goto out; - } - - /* The status must be written first, and then the cmd can be enqueued */ - vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); - if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { - vpsp_set_cmd_status(prio, index, 0); - index = -1; - goto out; - } - -out: - mutex_unlock(&vpsp_rb_mutex); - return index; -} - -static void vpsp_ring_update_head(struct csv_ringbuffer_queue *ring_buffer, - uint32_t new_head) -{ - uint32_t orig_head = get_queue_head(ring_buffer); - uint32_t comple_num = 0; - - if (new_head >= orig_head) - comple_num = new_head - orig_head; - else - comple_num = ring_buffer->cmd_ptr.mask - (orig_head - new_head) - + 1; - - ring_buffer->cmd_ptr.head += comple_num; -} - static int vpsp_ring_buffer_queue_init(void) { int i; @@ -897,162 +792,6 @@ static int vpsp_ring_buffer_queue_init(void) return 0; } -static int vpsp_psp_mutex_trylock(void) -{ - int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - - if (!hygon_psp_hooks.sev_dev_hooks_installed) - return -ENODEV; - - if (mutex_enabled) - return psp_mutex_trylock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); - else - return mutex_trylock(hygon_psp_hooks.sev_cmd_mutex); -} - -static int vpsp_psp_mutex_unlock(void) -{ - int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - - if (!hygon_psp_hooks.sev_dev_hooks_installed) - return -ENODEV; - - if (mutex_enabled) - psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); - else - mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); - - return 0; -} - -static int __vpsp_ring_buffer_enter_locked(int *error) -{ - int ret; - struct csv_data_ring_buffer *data; - struct csv_ringbuffer_queue *low_queue; - struct csv_ringbuffer_queue *hi_queue; - struct sev_device *sev = psp_master->sev_data; - - if (!hygon_psp_hooks.sev_dev_hooks_installed) - return -ENODEV; - - if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) - return -EEXIST; - - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - low_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]; - hi_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; - - data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); - data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); - data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); - data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); - data->queue_lo_size = 1; - data->queue_hi_size = 1; - data->int_on_empty = 1; - - ret = hygon_psp_hooks.__sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); - if (!ret) { - iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); - csv_comm_mode = CSV_COMM_RINGBUFFER_ON; - } - - kfree(data); - return ret; -} - -static int __vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) -{ - struct psp_device *psp = psp_master; - unsigned int reg, ret = 0; - unsigned int rb_tail, rb_head; - unsigned int rb_ctl; - struct sev_device *sev; - - if (!psp || !hygon_psp_hooks.sev_dev_hooks_installed) - return -ENODEV; - - if (*hygon_psp_hooks.psp_dead) - return -EBUSY; - - sev = psp->sev_data; - - /* update rb tail */ - rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); - rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); - rb_tail |= (get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) - << PSP_RBTAIL_QHI_TAIL_SHIFT); - rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); - rb_tail |= get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); - iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); - - /* update rb head */ - rb_head = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); - rb_head &= (~PSP_RBHEAD_QHI_HEAD_MASK); - rb_head |= (get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) - << PSP_RBHEAD_QHI_HEAD_SHIFT); - rb_head &= (~PSP_RBHEAD_QLO_HEAD_MASK); - rb_head |= get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); - iowrite32(rb_head, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); - - /* update rb ctl to trigger psp irq */ - sev->int_rcvd = 0; - /* PSP response to x86 only when all queue is empty or error happends */ - rb_ctl = (PSP_RBCTL_X86_WRITES | PSP_RBCTL_RBMODE_ACT | PSP_RBCTL_CLR_INTSTAT); - iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); - - /* wait for all commands in ring buffer completed */ - ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, (*hygon_psp_hooks.psp_timeout)*10); - if (ret) { - if (psp_ret) - *psp_ret = 0; - - dev_err(psp->dev, "csv command in ringbuffer mode timed out, disabling PSP\n"); - *hygon_psp_hooks.psp_dead = true; - return ret; - } - /* cmd error happends */ - if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) - ret = -EFAULT; - - /* update head */ - vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH], - (reg & PSP_RBHEAD_QHI_HEAD_MASK) >> PSP_RBHEAD_QHI_HEAD_SHIFT); - vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW], - reg & PSP_RBHEAD_QLO_HEAD_MASK); - - if (psp_ret) - *psp_ret = vpsp_get_cmd_status(prio, index); - - return ret; -} - -static int vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) -{ - struct sev_user_data_status data; - int rc; - - if (!hygon_psp_hooks.sev_dev_hooks_installed) - return -ENODEV; - - rc = __vpsp_ring_buffer_enter_locked(psp_ret); - if (rc) - goto end; - - rc = __vpsp_do_ringbuf_cmds_locked(psp_ret, prio, index); - - /* exit ringbuf mode by send CMD in mailbox mode */ - hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, - &data, NULL); - csv_comm_mode = CSV_COMM_MAILBOX_ON; - -end: - return rc; -} - /** * struct user_data_status - PLATFORM_STATUS command parameters * @@ -1089,7 +828,7 @@ struct user_data_status { * Check whether the firmware supports ringbuffer mode and parse * commands from the virtual machine */ -static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, +int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, struct vpsp_cmd *vcmd) { int ret, error; @@ -1141,213 +880,3 @@ static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, return rb_supported; } - -static int __vpsp_do_cmd_locked(int cmd, phys_addr_t phy_addr, int *psp_ret) -{ - struct psp_device *psp = psp_master; - struct sev_device *sev; - unsigned int phys_lsb, phys_msb; - unsigned int reg, ret = 0; - - if (!psp || !psp->sev_data) - return -ENODEV; - - if (*hygon_psp_hooks.psp_dead) - return -EBUSY; - - sev = psp->sev_data; - - /* Get the physical address of the command buffer */ - phys_lsb = phy_addr ? lower_32_bits(phy_addr) : 0; - phys_msb = phy_addr ? upper_32_bits(phy_addr) : 0; - - dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", - cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout); - - iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); - iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); - - sev->int_rcvd = 0; - - reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; - iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); - - /* wait for command completion */ - ret = hygon_psp_hooks.sev_wait_cmd_ioc(sev, ®, *hygon_psp_hooks.psp_timeout); - if (ret) { - if (psp_ret) - *psp_ret = 0; - - dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); - *hygon_psp_hooks.psp_dead = true; - - return ret; - } - - *hygon_psp_hooks.psp_timeout = *hygon_psp_hooks.psp_cmd_timeout; - - if (psp_ret) - *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); - - if (FIELD_GET(PSP_CMDRESP_STS, reg)) { - dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", - cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); - ret = -EIO; - } - - return ret; -} - -int vpsp_do_cmd(int cmd, phys_addr_t phy_addr, int *psp_ret) -{ - int rc; - int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - - if (is_vendor_hygon() && mutex_enabled) { - if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, - PSP_MUTEX_TIMEOUT) != 1) { - return -EBUSY; - } - } else { - mutex_lock(hygon_psp_hooks.sev_cmd_mutex); - } - - rc = __vpsp_do_cmd_locked(cmd, phy_addr, psp_ret); - - if (is_vendor_hygon() && mutex_enabled) - psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); - else - mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); - - return rc; -} - -/* - * Try to obtain the result again by the command index, this - * interface is used in ringbuffer mode - */ -int vpsp_try_get_result(uint8_t prio, uint32_t index, phys_addr_t phy_addr, - struct vpsp_ret *psp_ret) -{ - int ret = 0; - struct csv_cmdptr_entry cmd = {0}; - - if (!hygon_psp_hooks.sev_dev_hooks_installed) - return -ENODEV; - - /* Get the retult directly if the command has been executed */ - if (index >= 0 && vpsp_get_cmd_status(prio, index) != - VPSP_CMD_STATUS_RUNNING) { - psp_ret->pret = vpsp_get_cmd_status(prio, index); - psp_ret->status = VPSP_FINISH; - return 0; - } - - if (vpsp_psp_mutex_trylock()) { - /* Use mailbox mode to execute a command if there is only one command */ - if (vpsp_queue_cmd_size(prio) == 1) { - /* dequeue command from queue*/ - vpsp_dequeue_cmd(prio, index, &cmd); - - ret = __vpsp_do_cmd_locked(cmd.cmd_id, phy_addr, (int *)psp_ret); - psp_ret->status = VPSP_FINISH; - vpsp_psp_mutex_unlock(); - if (unlikely(ret)) { - if (ret == -EIO) { - ret = 0; - } else { - pr_err("[%s]: psp do cmd error, %d\n", - __func__, psp_ret->pret); - ret = -EIO; - goto end; - } - } - } else { - ret = vpsp_do_ringbuf_cmds_locked((int *)psp_ret, prio, - index); - psp_ret->status = VPSP_FINISH; - vpsp_psp_mutex_unlock(); - if (unlikely(ret)) { - pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed %d\n", - __func__, ret); - goto end; - } - } - } else { - /* Change the command to the running state if getting the mutex fails */ - psp_ret->index = index; - psp_ret->status = VPSP_RUNNING; - return 0; - } -end: - return ret; -} -EXPORT_SYMBOL_GPL(vpsp_try_get_result); - -/* - * Send the virtual psp command to the PSP device and try to get the - * execution result, the interface and the vpsp_try_get_result - * interface are executed asynchronously. If the execution succeeds, - * the result is returned to the VM. If the execution fails, the - * vpsp_try_get_result interface will be used to obtain the result - * later again - */ -int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret) -{ - int ret = 0; - int rb_supported; - int index = -1; - uint8_t prio = CSV_COMMAND_PRIORITY_LOW; - - if (!hygon_psp_hooks.sev_dev_hooks_installed) - return -ENODEV; - - /* ringbuffer mode check and parse command prio*/ - rb_supported = vpsp_rb_check_and_cmd_prio_parse(&prio, - (struct vpsp_cmd *)&cmd); - if (rb_supported) { - /* fill command in ringbuffer's queue and get index */ - index = vpsp_fill_cmd_queue(prio, cmd, phy_addr, 0); - if (unlikely(index < 0)) { - /* do mailbox command if queuing failed*/ - ret = vpsp_do_cmd(cmd, phy_addr, (int *)psp_ret); - if (unlikely(ret)) { - if (ret == -EIO) { - ret = 0; - } else { - pr_err("[%s]: psp do cmd error, %d\n", - __func__, psp_ret->pret); - ret = -EIO; - goto end; - } - } - psp_ret->status = VPSP_FINISH; - goto end; - } - - /* try to get result from the ringbuffer command */ - ret = vpsp_try_get_result(prio, index, phy_addr, psp_ret); - if (unlikely(ret)) { - pr_err("[%s]: vpsp_try_get_result failed %d\n", __func__, ret); - goto end; - } - } else { - /* mailbox mode */ - ret = vpsp_do_cmd(cmd, phy_addr, (int *)psp_ret); - if (unlikely(ret)) { - if (ret == -EIO) { - ret = 0; - } else { - pr_err("[%s]: psp do cmd error, %d\n", - __func__, psp_ret->pret); - ret = -EIO; - goto end; - } - } - psp_ret->status = VPSP_FINISH; - } - -end: - return ret; -} -EXPORT_SYMBOL_GPL(vpsp_try_do_cmd); diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index 6a0c2d9352bb..8bf5c2a88b75 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -15,11 +15,10 @@ #include #include #include -#include -#include #include #include "psp-dev.h" +#include "vpsp.h" /* Function and variable pointers for hooks */ struct hygon_psp_hooks_table hygon_psp_hooks; @@ -37,33 +36,6 @@ enum HYGON_PSP_OPCODE { HYGON_PSP_OPCODE_MAX_NR, }; -enum VPSP_DEV_CTRL_OPCODE { - VPSP_OP_VID_ADD, - VPSP_OP_VID_DEL, - VPSP_OP_SET_DEFAULT_VID_PERMISSION, - VPSP_OP_GET_DEFAULT_VID_PERMISSION, - VPSP_OP_SET_GPA, -}; - -struct vpsp_dev_ctrl { - unsigned char op; - /** - * To be compatible with old user mode, - * struct vpsp_dev_ctrl must be kept at 132 bytes. - */ - unsigned char resv[3]; - union { - unsigned int vid; - // Set or check the permissions for the default VID - unsigned int def_vid_perm; - struct { - u64 gpa_start; - u64 gpa_end; - } gpa; - unsigned char reserved[128]; - } __packed data; -}; - uint64_t atomic64_exchange(volatile uint64_t *dst, uint64_t val) { #if 0 @@ -109,7 +81,6 @@ int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms) return ret; } -EXPORT_SYMBOL_GPL(psp_mutex_lock_timeout); int psp_mutex_unlock(struct psp_mutex *mutex) { @@ -119,7 +90,6 @@ int psp_mutex_unlock(struct psp_mutex *mutex) atomic64_exchange(&mutex->locked, 0); return 0; } -EXPORT_SYMBOL_GPL(psp_mutex_unlock); static int mmap_psp(struct file *filp, struct vm_area_struct *vma) { @@ -174,143 +144,6 @@ static ssize_t write_psp(struct file *file, const char __user *buf, size_t count return written; } -DEFINE_RWLOCK(vpsp_rwlock); - -/* VPSP_VID_MAX_ENTRIES determines the maximum number of vms that can set vid. - * but, the performance of finding vid is determined by g_vpsp_vid_num, - * so VPSP_VID_MAX_ENTRIES can be set larger. - */ -#define VPSP_VID_MAX_ENTRIES 2048 -#define VPSP_VID_NUM_MAX 64 - -static struct vpsp_context g_vpsp_context_array[VPSP_VID_MAX_ENTRIES]; -static uint32_t g_vpsp_vid_num; -static int compare_vid_entries(const void *a, const void *b) -{ - return ((struct vpsp_context *)a)->pid - ((struct vpsp_context *)b)->pid; -} -static void swap_vid_entries(void *a, void *b, int size) -{ - struct vpsp_context entry; - - memcpy(&entry, a, size); - memcpy(a, b, size); - memcpy(b, &entry, size); -} - -/** - * When 'allow_default_vid' is set to 1, - * QEMU is allowed to use 'vid 0' by default - * in the absence of a valid 'vid' setting. - */ -uint32_t allow_default_vid = 1; -void vpsp_set_default_vid_permission(uint32_t is_allow) -{ - allow_default_vid = is_allow; -} - -int vpsp_get_default_vid_permission(void) -{ - return allow_default_vid; -} -EXPORT_SYMBOL_GPL(vpsp_get_default_vid_permission); - -/** - * get a vpsp context from pid - */ -int vpsp_get_context(struct vpsp_context **ctx, pid_t pid) -{ - struct vpsp_context new_entry = {.pid = pid}; - struct vpsp_context *existing_entry = NULL; - - read_lock(&vpsp_rwlock); - existing_entry = bsearch(&new_entry, g_vpsp_context_array, g_vpsp_vid_num, - sizeof(struct vpsp_context), compare_vid_entries); - read_unlock(&vpsp_rwlock); - - if (!existing_entry) - return -ENOENT; - - if (ctx) - *ctx = existing_entry; - - return 0; -} -EXPORT_SYMBOL_GPL(vpsp_get_context); - -/** - * Upon qemu startup, this section checks whether - * the '-device psp,vid' parameter is specified. - * If set, it utilizes the 'vpsp_add_vid' function - * to insert the 'vid' and 'pid' values into the 'g_vpsp_context_array'. - * The insertion is done in ascending order of 'pid'. - */ -static int vpsp_add_vid(uint32_t vid) -{ - pid_t cur_pid = task_pid_nr(current); - struct vpsp_context new_entry = {.vid = vid, .pid = cur_pid}; - - if (vpsp_get_context(NULL, cur_pid) == 0) - return -EEXIST; - if (g_vpsp_vid_num == VPSP_VID_MAX_ENTRIES) - return -ENOMEM; - if (vid >= VPSP_VID_NUM_MAX) - return -EINVAL; - - write_lock(&vpsp_rwlock); - memcpy(&g_vpsp_context_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_context)); - sort(g_vpsp_context_array, g_vpsp_vid_num, sizeof(struct vpsp_context), - compare_vid_entries, swap_vid_entries); - pr_info("PSP: add vid %d, by pid %d, total vid num is %d\n", vid, cur_pid, g_vpsp_vid_num); - write_unlock(&vpsp_rwlock); - return 0; -} - -/** - * Upon the virtual machine is shut down, - * the 'vpsp_del_vid' function is employed to remove - * the 'vid' associated with the current 'pid'. - */ -static int vpsp_del_vid(void) -{ - pid_t cur_pid = task_pid_nr(current); - int i, ret = -ENOENT; - - write_lock(&vpsp_rwlock); - for (i = 0; i < g_vpsp_vid_num; ++i) { - if (g_vpsp_context_array[i].pid == cur_pid) { - --g_vpsp_vid_num; - pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n", - g_vpsp_context_array[i].vid, cur_pid, g_vpsp_vid_num); - memmove(&g_vpsp_context_array[i], &g_vpsp_context_array[i + 1], - sizeof(struct vpsp_context) * (g_vpsp_vid_num - i)); - ret = 0; - goto end; - } - } - -end: - write_unlock(&vpsp_rwlock); - return ret; -} - -static int vpsp_set_gpa_range(u64 gpa_start, u64 gpa_end) -{ - pid_t cur_pid = task_pid_nr(current); - struct vpsp_context *ctx = NULL; - - vpsp_get_context(&ctx, cur_pid); - if (!ctx) { - pr_err("PSP: %s get vpsp_context failed from pid %d\n", __func__, cur_pid); - return -ENOENT; - } - - ctx->gpa_start = gpa_start; - ctx->gpa_end = gpa_end; - pr_info("PSP: set gpa range (start 0x%llx, end 0x%llx), by pid %d\n", - gpa_start, gpa_end, cur_pid); - return 0; -} /** * Try to pin a page @@ -373,39 +206,6 @@ static int psp_unpin_user_page(u64 vaddr) return 0; } -static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) -{ - int ret = 0; - unsigned char op = ctrl->op; - - switch (op) { - case VPSP_OP_VID_ADD: - ret = vpsp_add_vid(ctrl->data.vid); - break; - - case VPSP_OP_VID_DEL: - ret = vpsp_del_vid(); - break; - - case VPSP_OP_SET_DEFAULT_VID_PERMISSION: - vpsp_set_default_vid_permission(ctrl->data.def_vid_perm); - break; - - case VPSP_OP_GET_DEFAULT_VID_PERMISSION: - ctrl->data.def_vid_perm = vpsp_get_default_vid_permission(); - break; - - case VPSP_OP_SET_GPA: - ret = vpsp_set_gpa_range(ctrl->data.gpa.gpa_start, ctrl->data.gpa.gpa_end); - break; - - default: - ret = -EINVAL; - break; - } - return ret; -} - static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) { unsigned int opcode = 0; @@ -519,7 +319,6 @@ int hygon_psp_additional_setup(struct sp_device *sp) return ret; } -EXPORT_SYMBOL_GPL(hygon_psp_additional_setup); void hygon_psp_exit(struct kref *ref) { @@ -531,7 +330,6 @@ void hygon_psp_exit(struct kref *ref) psp_misc = NULL; hygon_psp_hooks.psp_misc = NULL; } -EXPORT_SYMBOL_GPL(hygon_psp_exit); int fixup_hygon_psp_caps(struct psp_device *psp) { diff --git a/drivers/crypto/ccp/hygon/vpsp.c b/drivers/crypto/ccp/hygon/vpsp.c index df62dab035b8..f54d896e0ca5 100644 --- a/drivers/crypto/ccp/hygon/vpsp.c +++ b/drivers/crypto/ccp/hygon/vpsp.c @@ -14,6 +14,14 @@ #include #include #include +#include +#include +#include + +#include "ring-buffer.h" +#include "psp-dev.h" +#include "csv-dev.h" +#include "vpsp.h" #ifdef pr_fmt #undef pr_fmt @@ -547,3 +555,654 @@ int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, g return ret; } EXPORT_SYMBOL_GPL(kvm_pv_psp_copy_forward_op); + +DEFINE_RWLOCK(vpsp_rwlock); + +/* VPSP_VID_MAX_ENTRIES determines the maximum number of vms that can set vid. + * but, the performance of finding vid is determined by g_vpsp_vid_num, + * so VPSP_VID_MAX_ENTRIES can be set larger. + */ +#define VPSP_VID_MAX_ENTRIES 2048 +#define VPSP_VID_NUM_MAX 64 + +static struct vpsp_context g_vpsp_context_array[VPSP_VID_MAX_ENTRIES]; +static uint32_t g_vpsp_vid_num; +static int compare_vid_entries(const void *a, const void *b) +{ + return ((struct vpsp_context *)a)->pid - ((struct vpsp_context *)b)->pid; +} +static void swap_vid_entries(void *a, void *b, int size) +{ + struct vpsp_context entry; + + memcpy(&entry, a, size); + memcpy(a, b, size); + memcpy(b, &entry, size); +} + +/** + * When 'allow_default_vid' is set to 1, + * QEMU is allowed to use 'vid 0' by default + * in the absence of a valid 'vid' setting. + */ +uint32_t allow_default_vid = 1; +void vpsp_set_default_vid_permission(uint32_t is_allow) +{ + allow_default_vid = is_allow; +} + +int vpsp_get_default_vid_permission(void) +{ + return allow_default_vid; +} + +/** + * get a vpsp context from pid + */ +int vpsp_get_context(struct vpsp_context **ctx, pid_t pid) +{ + struct vpsp_context new_entry = {.pid = pid}; + struct vpsp_context *existing_entry = NULL; + + read_lock(&vpsp_rwlock); + existing_entry = bsearch(&new_entry, g_vpsp_context_array, g_vpsp_vid_num, + sizeof(struct vpsp_context), compare_vid_entries); + read_unlock(&vpsp_rwlock); + + if (!existing_entry) + return -ENOENT; + + if (ctx) + *ctx = existing_entry; + + return 0; +} + +/** + * Upon qemu startup, this section checks whether + * the '-device psp,vid' parameter is specified. + * If set, it utilizes the 'vpsp_add_vid' function + * to insert the 'vid' and 'pid' values into the 'g_vpsp_context_array'. + * The insertion is done in ascending order of 'pid'. + */ +static int vpsp_add_vid(uint32_t vid) +{ + pid_t cur_pid = task_pid_nr(current); + struct vpsp_context new_entry = {.vid = vid, .pid = cur_pid}; + + if (vpsp_get_context(NULL, cur_pid) == 0) + return -EEXIST; + if (g_vpsp_vid_num == VPSP_VID_MAX_ENTRIES) + return -ENOMEM; + if (vid >= VPSP_VID_NUM_MAX) + return -EINVAL; + + write_lock(&vpsp_rwlock); + memcpy(&g_vpsp_context_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_context)); + sort(g_vpsp_context_array, g_vpsp_vid_num, sizeof(struct vpsp_context), + compare_vid_entries, swap_vid_entries); + pr_info("PSP: add vid %d, by pid %d, total vid num is %d\n", vid, cur_pid, g_vpsp_vid_num); + write_unlock(&vpsp_rwlock); + return 0; +} + +/** + * Upon the virtual machine is shut down, + * the 'vpsp_del_vid' function is employed to remove + * the 'vid' associated with the current 'pid'. + */ +static int vpsp_del_vid(void) +{ + pid_t cur_pid = task_pid_nr(current); + int i, ret = -ENOENT; + + write_lock(&vpsp_rwlock); + for (i = 0; i < g_vpsp_vid_num; ++i) { + if (g_vpsp_context_array[i].pid == cur_pid) { + --g_vpsp_vid_num; + pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n", + g_vpsp_context_array[i].vid, cur_pid, g_vpsp_vid_num); + memmove(&g_vpsp_context_array[i], &g_vpsp_context_array[i + 1], + sizeof(struct vpsp_context) * (g_vpsp_vid_num - i)); + ret = 0; + goto end; + } + } + +end: + write_unlock(&vpsp_rwlock); + return ret; +} + +static int vpsp_set_gpa_range(u64 gpa_start, u64 gpa_end) +{ + pid_t cur_pid = task_pid_nr(current); + struct vpsp_context *ctx = NULL; + + vpsp_get_context(&ctx, cur_pid); + if (!ctx) { + pr_err("PSP: %s get vpsp_context failed from pid %d\n", __func__, cur_pid); + return -ENOENT; + } + + ctx->gpa_start = gpa_start; + ctx->gpa_end = gpa_end; + pr_info("PSP: set gpa range (start 0x%llx, end 0x%llx), by pid %d\n", + gpa_start, gpa_end, cur_pid); + return 0; +} + +int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) +{ + int ret = 0; + unsigned char op = ctrl->op; + + switch (op) { + case VPSP_OP_VID_ADD: + ret = vpsp_add_vid(ctrl->data.vid); + break; + + case VPSP_OP_VID_DEL: + ret = vpsp_del_vid(); + break; + + case VPSP_OP_SET_DEFAULT_VID_PERMISSION: + vpsp_set_default_vid_permission(ctrl->data.def_vid_perm); + break; + + case VPSP_OP_GET_DEFAULT_VID_PERMISSION: + ctrl->data.def_vid_perm = vpsp_get_default_vid_permission(); + break; + + case VPSP_OP_SET_GPA: + ret = vpsp_set_gpa_range(ctrl->data.gpa.gpa_start, ctrl->data.gpa.gpa_end); + break; + + default: + ret = -EINVAL; + break; + } + return ret; +} + + +static DEFINE_MUTEX(vpsp_rb_mutex); +struct csv_ringbuffer_queue vpsp_ring_buffer[CSV_COMMAND_PRIORITY_NUM]; + +static int get_queue_tail(struct csv_ringbuffer_queue *ringbuffer) +{ + return ringbuffer->cmd_ptr.tail & ringbuffer->cmd_ptr.mask; +} + +static int get_queue_head(struct csv_ringbuffer_queue *ringbuffer) +{ + return ringbuffer->cmd_ptr.head & ringbuffer->cmd_ptr.mask; +} + +static void vpsp_set_cmd_status(int prio, int index, int status) +{ + struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; + + statval[index].status = status; +} + +static int vpsp_get_cmd_status(int prio, int index) +{ + struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; + + return statval[index].status; +} + +static unsigned int vpsp_queue_cmd_size(int prio) +{ + return csv_cmd_queue_size(&vpsp_ring_buffer[prio].cmd_ptr); +} + +static int vpsp_dequeue_cmd(int prio, int index, + struct csv_cmdptr_entry *cmd_ptr) +{ + mutex_lock(&vpsp_rb_mutex); + + /* The status update must be before the head update */ + vpsp_set_cmd_status(prio, index, 0); + csv_dequeue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, (void *)cmd_ptr, 1); + + mutex_unlock(&vpsp_rb_mutex); + + return 0; +} + +/* + * Populate the command from the virtual machine to the queue to + * support execution in ringbuffer mode + */ +static int vpsp_fill_cmd_queue(int prio, int cmd, phys_addr_t phy_addr, uint16_t flags) +{ + struct csv_cmdptr_entry cmdptr = { }; + int index = -1; + + cmdptr.cmd_buf_ptr = phy_addr; + cmdptr.cmd_id = cmd; + cmdptr.cmd_flags = flags; + + mutex_lock(&vpsp_rb_mutex); + index = get_queue_tail(&vpsp_ring_buffer[prio]); + + /* If status is equal to VPSP_CMD_STATUS_RUNNING, then the queue is full */ + if (vpsp_get_cmd_status(prio, index) == VPSP_CMD_STATUS_RUNNING) { + index = -1; + goto out; + } + + /* The status must be written first, and then the cmd can be enqueued */ + vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); + if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { + vpsp_set_cmd_status(prio, index, 0); + index = -1; + goto out; + } + +out: + mutex_unlock(&vpsp_rb_mutex); + return index; +} + +static void vpsp_ring_update_head(struct csv_ringbuffer_queue *ring_buffer, + uint32_t new_head) +{ + uint32_t orig_head = get_queue_head(ring_buffer); + uint32_t comple_num = 0; + + if (new_head >= orig_head) + comple_num = new_head - orig_head; + else + comple_num = ring_buffer->cmd_ptr.mask - (orig_head - new_head) + + 1; + + ring_buffer->cmd_ptr.head += comple_num; +} + +static int vpsp_psp_mutex_trylock(void) +{ + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (mutex_enabled) + return psp_mutex_trylock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + return mutex_trylock(hygon_psp_hooks.sev_cmd_mutex); +} + +static int vpsp_psp_mutex_unlock(void) +{ + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return 0; +} + +static int __vpsp_ring_buffer_enter_locked(int *error) +{ + int ret; + struct csv_data_ring_buffer *data; + struct csv_ringbuffer_queue *low_queue; + struct csv_ringbuffer_queue *hi_queue; + struct sev_device *sev = psp_master->sev_data; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) + return -EEXIST; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + low_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + hi_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + + data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); + data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); + data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); + data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); + data->queue_lo_size = 1; + data->queue_hi_size = 1; + data->int_on_empty = 1; + + ret = hygon_psp_hooks.__sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); + if (!ret) { + iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + csv_comm_mode = CSV_COMM_RINGBUFFER_ON; + } + + kfree(data); + return ret; +} + +static int vpsp_wait_cmd_ioc_ring_buffer(struct sev_device *sev, + unsigned int *reg, + unsigned int timeout) +{ + int ret; + + ret = wait_event_timeout(sev->int_queue, + sev->int_rcvd, timeout * HZ); + if (!ret) + return -ETIMEDOUT; + + *reg = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + + return 0; +} + +static int __vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +{ + struct psp_device *psp = psp_master; + unsigned int reg, ret = 0; + unsigned int rb_tail, rb_head; + unsigned int rb_ctl; + struct sev_device *sev; + + if (!psp || !hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* update rb tail */ + rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); + rb_tail |= (get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) + << PSP_RBTAIL_QHI_TAIL_SHIFT); + rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); + rb_tail |= get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); + iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + /* update rb head */ + rb_head = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + rb_head &= (~PSP_RBHEAD_QHI_HEAD_MASK); + rb_head |= (get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) + << PSP_RBHEAD_QHI_HEAD_SHIFT); + rb_head &= (~PSP_RBHEAD_QLO_HEAD_MASK); + rb_head |= get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); + iowrite32(rb_head, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + + /* update rb ctl to trigger psp irq */ + sev->int_rcvd = 0; + /* PSP response to x86 only when all queue is empty or error happends */ + rb_ctl = (PSP_RBCTL_X86_WRITES | PSP_RBCTL_RBMODE_ACT | PSP_RBCTL_CLR_INTSTAT); + iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for all commands in ring buffer completed */ + ret = vpsp_wait_cmd_ioc_ring_buffer(sev, ®, (*hygon_psp_hooks.psp_timeout)*10); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(psp->dev, "csv command in ringbuffer mode timed out, disabling PSP\n"); + *hygon_psp_hooks.psp_dead = true; + return ret; + } + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + ret = -EFAULT; + + /* update head */ + vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH], + (reg & PSP_RBHEAD_QHI_HEAD_MASK) >> PSP_RBHEAD_QHI_HEAD_SHIFT); + vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW], + reg & PSP_RBHEAD_QLO_HEAD_MASK); + + if (psp_ret) + *psp_ret = vpsp_get_cmd_status(prio, index); + + return ret; +} + +static int vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +{ + struct sev_user_data_status data; + int rc; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + rc = __vpsp_ring_buffer_enter_locked(psp_ret); + if (rc) + goto end; + + rc = __vpsp_do_ringbuf_cmds_locked(psp_ret, prio, index); + + /* exit ringbuf mode by send CMD in mailbox mode */ + hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, + &data, NULL); + csv_comm_mode = CSV_COMM_MAILBOX_ON; + +end: + return rc; +} + +static int __vpsp_do_cmd_locked(int cmd, phys_addr_t phy_addr, int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int phys_lsb, phys_msb; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data || !hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* Get the physical address of the command buffer */ + phys_lsb = phy_addr ? lower_32_bits(phy_addr) : 0; + phys_msb = phy_addr ? upper_32_bits(phy_addr) : 0; + + dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout); + + iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + sev->int_rcvd = 0; + + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for command completion */ + ret = hygon_psp_hooks.sev_wait_cmd_ioc(sev, ®, *hygon_psp_hooks.psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); + *hygon_psp_hooks.psp_dead = true; + + return ret; + } + + *hygon_psp_hooks.psp_timeout = *hygon_psp_hooks.psp_cmd_timeout; + + if (psp_ret) + *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); + + if (FIELD_GET(PSP_CMDRESP_STS, reg)) { + dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", + cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); + ret = -EIO; + } + + return ret; +} + +int vpsp_do_cmd(int cmd, phys_addr_t phy_addr, int *psp_ret) +{ + int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) { + return -EBUSY; + } + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } + + rc = __vpsp_do_cmd_locked(cmd, phy_addr, psp_ret); + + if (mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return rc; +} + +/* + * Try to obtain the result again by the command index, this + * interface is used in ringbuffer mode + */ +int vpsp_try_get_result(uint8_t prio, uint32_t index, phys_addr_t phy_addr, + struct vpsp_ret *psp_ret) +{ + int ret = 0; + struct csv_cmdptr_entry cmd = {0}; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + /* Get the retult directly if the command has been executed */ + if (index >= 0 && vpsp_get_cmd_status(prio, index) != + VPSP_CMD_STATUS_RUNNING) { + psp_ret->pret = vpsp_get_cmd_status(prio, index); + psp_ret->status = VPSP_FINISH; + return 0; + } + + if (vpsp_psp_mutex_trylock()) { + /* Use mailbox mode to execute a command if there is only one command */ + if (vpsp_queue_cmd_size(prio) == 1) { + /* dequeue command from queue*/ + vpsp_dequeue_cmd(prio, index, &cmd); + + ret = __vpsp_do_cmd_locked(cmd.cmd_id, phy_addr, (int *)psp_ret); + psp_ret->status = VPSP_FINISH; + vpsp_psp_mutex_unlock(); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + } else { + ret = vpsp_do_ringbuf_cmds_locked((int *)psp_ret, prio, + index); + psp_ret->status = VPSP_FINISH; + vpsp_psp_mutex_unlock(); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed %d\n", + __func__, ret); + goto end; + } + } + } else { + /* Change the command to the running state if getting the mutex fails */ + psp_ret->index = index; + psp_ret->status = VPSP_RUNNING; + return 0; + } +end: + return ret; +} + +/* + * Send the virtual psp command to the PSP device and try to get the + * execution result, the interface and the vpsp_try_get_result + * interface are executed asynchronously. If the execution succeeds, + * the result is returned to the VM. If the execution fails, the + * vpsp_try_get_result interface will be used to obtain the result + * later again + */ +int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret) +{ + int ret = 0; + int rb_supported; + int index = -1; + uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + /* ringbuffer mode check and parse command prio*/ + rb_supported = vpsp_rb_check_and_cmd_prio_parse(&prio, + (struct vpsp_cmd *)&cmd); + if (rb_supported) { + /* fill command in ringbuffer's queue and get index */ + index = vpsp_fill_cmd_queue(prio, cmd, phy_addr, 0); + if (unlikely(index < 0)) { + /* do mailbox command if queuing failed*/ + ret = vpsp_do_cmd(cmd, phy_addr, (int *)psp_ret); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + psp_ret->status = VPSP_FINISH; + goto end; + } + + /* try to get result from the ringbuffer command */ + ret = vpsp_try_get_result(prio, index, phy_addr, psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_try_get_result failed %d\n", __func__, ret); + goto end; + } + } else { + /* mailbox mode */ + ret = vpsp_do_cmd(cmd, phy_addr, (int *)psp_ret); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + psp_ret->status = VPSP_FINISH; + } + +end: + return ret; +} diff --git a/drivers/crypto/ccp/hygon/vpsp.h b/drivers/crypto/ccp/hygon/vpsp.h new file mode 100644 index 000000000000..179dde46dda0 --- /dev/null +++ b/drivers/crypto/ccp/hygon/vpsp.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Secure Processor interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Mengbiao Xiong + */ + +#ifndef __CCP_HYGON_VPSP_H__ +#define __CCP_HYGON_VPSP_H__ + + +/* + * enum VPSP_CMD_STATUS - virtual psp command status + * + * @VPSP_INIT: the initial command from guest + * @VPSP_RUNNING: the middle command to check and run ringbuffer command + * @VPSP_FINISH: inform the guest that the command ran successfully + */ +enum VPSP_CMD_STATUS { + VPSP_INIT = 0, + VPSP_RUNNING, + VPSP_FINISH, + VPSP_MAX +}; + +/** + * struct vpsp_cmd - virtual psp command + * + * @cmd_id: the command id is used to distinguish different commands + * @is_high_rb: indicates the ringbuffer level in which the command is placed + */ +struct vpsp_cmd { + u32 cmd_id : 31; + u32 is_high_rb : 1; +}; + +/** + * struct vpsp_ret - virtual psp return result + * + * @pret: the return code from device + * @resv: reserved bits + * @format: indicates that the error is a unix error code(is 0) or a psp error(is 1) + * @index: used to distinguish the position of command in the ringbuffer + * @status: indicates the current status of the related command + */ +struct vpsp_ret { + u32 pret : 16; + u32 resv : 1; + u32 format : 1; + u32 index : 12; + u32 status : 2; +}; +#define VPSP_RET_SYS_FORMAT 1 +#define VPSP_RET_PSP_FORMAT 0 + +#define PSP_2MB_MASK (2*1024*1024 - 1) +#define PSP_HUGEPAGE_2MB (2*1024*1024) +#define PSP_HUGEPAGE_NUM_MAX 128 +#define TKM_CMD_ID_MIN 0x120 +#define TKM_CMD_ID_MAX 0x12f +#define TKM_PSP_CMDID TKM_CMD_ID_MIN +#define TKM_PSP_CMDID_OFFSET 0x128 +#define PSP_VID_MASK 0xff +#define PSP_VID_SHIFT 56 +#define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT)) +#define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK) +#define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT)) + +struct vpsp_context { + u32 vid; + pid_t pid; + u64 gpa_start; + u64 gpa_end; + + // `vm_is_bound` indicates whether the binding operation has been performed + u32 vm_is_bound; + u32 vm_handle; // only for csv +}; + +enum VPSP_DEV_CTRL_OPCODE { + VPSP_OP_VID_ADD, + VPSP_OP_VID_DEL, + VPSP_OP_SET_DEFAULT_VID_PERMISSION, + VPSP_OP_GET_DEFAULT_VID_PERMISSION, + VPSP_OP_SET_GPA, +}; + +struct vpsp_dev_ctrl { + unsigned char op; + /** + * To be compatible with old user mode, + * struct vpsp_dev_ctrl must be kept at 132 bytes. + */ + unsigned char resv[3]; + union { + unsigned int vid; + // Set or check the permissions for the default VID + unsigned int def_vid_perm; + struct { + u64 gpa_start; + u64 gpa_end; + } gpa; + unsigned char reserved[128]; + } __packed data; +}; + +/* defination of variabled used by virtual psp */ +enum VPSP_RB_CHECK_STATUS { + RB_NOT_CHECK = 0, + RB_CHECKING, + RB_CHECKED, + RB_CHECK_MAX +}; +#define VPSP_RB_IS_SUPPORTED(buildid) (buildid >= 1913) +#define VPSP_CMD_STATUS_RUNNING 0xffff + +extern struct csv_ringbuffer_queue vpsp_ring_buffer[CSV_COMMAND_PRIORITY_NUM]; +extern struct hygon_psp_hooks_table hygon_psp_hooks; + +int vpsp_try_get_result(uint8_t prio, uint32_t index, + phys_addr_t phy_addr, struct vpsp_ret *psp_ret); +int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret); +int vpsp_get_context(struct vpsp_context **ctx, pid_t pid); +int vpsp_get_default_vid_permission(void); +int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl); +int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, + struct vpsp_cmd *vcmd); + +#endif /* __CCP_HYGON_VPSP_H__ */ diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 95ba4af56435..1588ffb3dc3e 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -430,50 +430,6 @@ struct csv3_data_receive_encrypt_context { u32 vmcb_block_len; /* In */ } __packed; -/* - * enum VPSP_CMD_STATUS - virtual psp command status - * - * @VPSP_INIT: the initial command from guest - * @VPSP_RUNNING: the middle command to check and run ringbuffer command - * @VPSP_FINISH: inform the guest that the command ran successfully - */ -enum VPSP_CMD_STATUS { - VPSP_INIT = 0, - VPSP_RUNNING, - VPSP_FINISH, - VPSP_MAX -}; - -/** - * struct vpsp_cmd - virtual psp command - * - * @cmd_id: the command id is used to distinguish different commands - * @is_high_rb: indicates the ringbuffer level in which the command is placed - */ -struct vpsp_cmd { - u32 cmd_id : 31; - u32 is_high_rb : 1; -}; - -/** - * struct vpsp_ret - virtual psp return result - * - * @pret: the return code from device - * @resv: reserved bits - * @format: indicates that the error is a unix error code(is 0) or a psp error(is 1) - * @index: used to distinguish the position of command in the ringbuffer - * @status: indicates the current status of the related command - */ -struct vpsp_ret { - u32 pret : 16; - u32 resv : 1; - u32 format : 1; - u32 index : 12; - u32 status : 2; -}; -#define VPSP_RET_SYS_FORMAT 1 -#define VPSP_RET_PSP_FORMAT 0 - struct kvm_vpsp { struct kvm *kvm; int (*write_guest)(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); @@ -483,30 +439,6 @@ struct kvm_vpsp { u8 is_csv_guest; }; -#define PSP_2MB_MASK (2*1024*1024 - 1) -#define PSP_HUGEPAGE_2MB (2*1024*1024) -#define PSP_HUGEPAGE_NUM_MAX 128 -#define TKM_CMD_ID_MIN 0x120 -#define TKM_CMD_ID_MAX 0x12f -#define TKM_PSP_CMDID TKM_CMD_ID_MIN -#define TKM_PSP_CMDID_OFFSET 0x128 -#define PSP_VID_MASK 0xff -#define PSP_VID_SHIFT 56 -#define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT)) -#define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK) -#define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT)) - -struct vpsp_context { - u32 vid; - pid_t pid; - u64 gpa_start; - u64 gpa_end; - - // `vm_is_bound` indicates whether the binding operation has been performed - u32 vm_is_bound; - u32 vm_handle; // only for csv -}; - #ifdef CONFIG_CRYPTO_DEV_SP_PSP int psp_do_cmd(int cmd, void *data, int *psp_ret); @@ -522,15 +454,6 @@ int csv_check_stat_queue_status(int *psp_ret); */ int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); -int vpsp_try_get_result(uint8_t prio, uint32_t index, - phys_addr_t phy_addr, struct vpsp_ret *psp_ret); - -int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret); - -int vpsp_get_context(struct vpsp_context **ctx, pid_t pid); - -int vpsp_get_default_vid_permission(void); - int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa); int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, @@ -561,24 +484,6 @@ static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } static inline int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; } -static inline int -vpsp_try_get_result(uint8_t prio, - uint32_t index, phys_addr_t phy_addr, struct vpsp_ret *psp_ret) { return -ENODEV; } - -static inline int -vpsp_try_do_cmd(uint32_t vid, int cmd, - void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } - -static inline int -vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, - struct vpsp_ret *psp_ret) { return -ENODEV; } - -static inline int -vpsp_get_context(struct vpsp_context **ctx, pid_t pid) { return -ENODEV; } - -static inline int -vpsp_get_default_vid_permission(void) { return -ENODEV; } - static inline int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa) { return -ENODEV; } -- Gitee From 7f9323de5eacc46601f88a2a541610ee2ddda4be Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Sun, 9 Feb 2025 13:48:31 +0800 Subject: [PATCH 2/3] crypto: ccp: Use a workqueue to clean up the vpsp ringbuffer Upstream: no The Guest uses the vmmcall instruction to enter the Host Kernel state and send commands to the PSP. If the traditional interrupt mode is used to wait for command completion, the Guest's vCPU may freeze during the sleep-wait-for-interrupt process. Using a workqueue allows the Guest to quickly submit a command to the ringbuffer queue after entering the Host Kernel and return to the Guest without waiting for an interrupt. The Guest can later poll the command result via the vmmcall instruction again, avoiding long-term vCPU freezing. Signed-off-by: xiongmengbiao --- drivers/crypto/ccp/hygon/psp-dev.c | 32 +- drivers/crypto/ccp/hygon/vpsp.c | 499 +++++++++++++++++++---------- drivers/crypto/ccp/hygon/vpsp.h | 42 ++- 3 files changed, 384 insertions(+), 189 deletions(-) diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index 8bf5c2a88b75..970b16130aba 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -25,6 +25,10 @@ struct hygon_psp_hooks_table hygon_psp_hooks; static unsigned int psp_int_rcvd; wait_queue_head_t psp_int_queue; +struct kmem_cache *vpsp_cmd_ctx_slab; +static struct workqueue_struct *vpsp_wq; +static struct work_struct vpsp_work; + static struct psp_misc_dev *psp_misc; #define HYGON_PSP_IOC_TYPE 'H' enum HYGON_PSP_OPCODE { @@ -289,6 +293,17 @@ int hygon_psp_additional_setup(struct sp_device *sp) if (!psp_misc) { struct miscdevice *misc; + vpsp_wq = create_singlethread_workqueue("vpsp_workqueue"); + if (!vpsp_wq) + return -ENOMEM; + + INIT_WORK(&vpsp_work, vpsp_worker_handler); + + vpsp_cmd_ctx_slab = kmem_cache_create("vpsp_cmd_ctx", + sizeof(struct vpsp_cmd_ctx), 0, SLAB_HWCACHE_ALIGN, NULL); + if (!vpsp_cmd_ctx_slab) + return -ENOMEM; + psp_misc = devm_kzalloc(dev, sizeof(*psp_misc), GFP_KERNEL); if (!psp_misc) return -ENOMEM; @@ -329,6 +344,9 @@ void hygon_psp_exit(struct kref *ref) free_page((unsigned long)misc_dev->data_pg_aligned); psp_misc = NULL; hygon_psp_hooks.psp_misc = NULL; + kmem_cache_destroy(vpsp_cmd_ctx_slab); + flush_workqueue(vpsp_wq); + destroy_workqueue(vpsp_wq); } int fixup_hygon_psp_caps(struct psp_device *psp) @@ -501,11 +519,15 @@ static irqreturn_t psp_irq_handler_hygon(int irq, void *data) /* Check if it is SEV command completion: */ reg = ioread32(psp->io_regs + psp->vdata->sev->cmdresp_reg); if (reg & PSP_CMDRESP_RESP) { - psp_int_rcvd = 1; - wake_up(&psp_int_queue); - if (sev != NULL) { - sev->int_rcvd = 1; - wake_up(&sev->int_queue); + if (vpsp_in_ringbuffer_mode) { + queue_work(vpsp_wq, &vpsp_work); + } else { + psp_int_rcvd = 1; + wake_up(&psp_int_queue); + if (sev != NULL) { + sev->int_rcvd = 1; + wake_up(&sev->int_queue); + } } } } diff --git a/drivers/crypto/ccp/hygon/vpsp.c b/drivers/crypto/ccp/hygon/vpsp.c index f54d896e0ca5..40b170498c1a 100644 --- a/drivers/crypto/ccp/hygon/vpsp.c +++ b/drivers/crypto/ccp/hygon/vpsp.c @@ -17,6 +17,8 @@ #include #include #include +#include +#include #include "ring-buffer.h" #include "psp-dev.h" @@ -55,23 +57,138 @@ * |<=> psp device driver */ +/** + * used to locate the command context, + * when the guest enters the host via vmmcall + */ +DEFINE_HASHTABLE(vpsp_cmd_ctx_table, 11); +DEFINE_RWLOCK(table_rwlock); +bool vpsp_in_ringbuffer_mode; +static struct vpsp_cmd_ctx *vpsp_cmd_ctx_array[CSV_COMMAND_PRIORITY_NUM] + [CSV_RING_BUFFER_SIZE / CSV_RING_BUFFER_ESIZE]; + +static struct vpsp_cmd_ctx *vpsp_hashtable_find_cmd_ctx(gpa_t key1, pid_t key2) +{ + struct vpsp_cmd_ctx *entry = NULL; + bool is_found = false; + + read_lock(&table_rwlock); + hash_for_each_possible(vpsp_cmd_ctx_table, entry, node, key1) { + if (entry->key1 == key1 && entry->key2 == key2) { + is_found = true; + break; + } + } + read_unlock(&table_rwlock); + if (!is_found) + entry = NULL; + + return entry; +} + +static void vpsp_hashtable_add_cmd_ctx(struct vpsp_cmd_ctx *ctx) +{ + struct vpsp_cmd_ctx *entry = NULL; + + write_lock(&table_rwlock); + hash_for_each_possible(vpsp_cmd_ctx_table, entry, node, ctx->key1) { + if (entry->key1 == ctx->key1 && + entry->key2 == ctx->key2) { + vpsp_cmd_ctx_obj_put(entry, true); + break; + } + } + hash_add(vpsp_cmd_ctx_table, &ctx->node, ctx->key1); + write_unlock(&table_rwlock); + + vpsp_cmd_ctx_obj_get(ctx); +} + +static void vpsp_hashtable_remove_cmd_ctx(struct vpsp_cmd_ctx *ctx) +{ + write_lock(&table_rwlock); + hash_del(&ctx->node); + write_unlock(&table_rwlock); + + vpsp_cmd_ctx_obj_put(ctx, false); +} + +/** + * Create a vpsp_cmd_ctx object and insert it into the + * vpsp_cmd_ctx_table hash table. + * + * @hkey: The key value for the hash table vpsp_cmd_ctx_table + * + * Return: the address of the vpsp_cmd_ctx object + * if created successfully, otherwise returns NULL + */ +static struct vpsp_cmd_ctx *vpsp_cmd_ctx_create(gpa_t key1, pid_t key2) +{ + struct vpsp_cmd_ctx *cmd_ctx = kmem_cache_zalloc(vpsp_cmd_ctx_slab, GFP_KERNEL); + + if (cmd_ctx) { + /** + * According to the implementation of refcount, + * the initial value must be greater than 0. + */ + refcount_set(&cmd_ctx->ref, 1); + cmd_ctx->statval = VPSP_CMD_STATUS_RUNNING; + cmd_ctx->key1 = key1; + cmd_ctx->key2 = key2; + vpsp_hashtable_add_cmd_ctx(cmd_ctx); + } + return cmd_ctx; +} + +/** + * Destroys the specified vpsp_cmd_ctx object, + * indicating it will no longer be accessed. + * + * But does not necessarily free the cmd_ctx memory immediately, + * only additional to perform decrement refcount. + * + * Actual memory release occurs when the refcount drops to 0, + * which may happen during the vpsp_worker_handler or + * vpsp_cmd_ctx_destroy process. + * + * @cmd_ctx: the vpsp_cmd_ctx object + */ +static void vpsp_cmd_ctx_destroy(struct vpsp_cmd_ctx *cmd_ctx) +{ + if (!cmd_ctx) + return; + /** + * The initial refcount is 1, + * need to additional decrement a refcount. + */ + vpsp_cmd_ctx_obj_put(cmd_ctx, false); + vpsp_hashtable_remove_cmd_ctx(cmd_ctx); +} + +void vpsp_cmd_ctx_obj_get(struct vpsp_cmd_ctx *cmd_ctx) +{ + refcount_inc(&cmd_ctx->ref); +} + +void vpsp_cmd_ctx_obj_put(struct vpsp_cmd_ctx *cmd_ctx, bool force) +{ + do { + if (refcount_dec_and_test(&cmd_ctx->ref)) { + kfree(cmd_ctx->data); + memset(cmd_ctx, 0, sizeof(*cmd_ctx)); + kmem_cache_free(vpsp_cmd_ctx_slab, cmd_ctx); + force = false; + } + } while (force); +} + struct psp_cmdresp_head { uint32_t buf_size; uint32_t cmdresp_size; uint32_t cmdresp_code; } __packed; -/* save command data for restoring later */ -struct vpsp_hbuf_wrapper { - void *data; - uint32_t data_size; -}; - -/* Virtual PSP host memory information maintenance, used in ringbuffer mode */ -struct vpsp_hbuf_wrapper -g_hbuf_wrap[CSV_COMMAND_PRIORITY_NUM][CSV_RING_BUFFER_SIZE / CSV_RING_BUFFER_ESIZE] = {0}; - -static int check_gpa_range(struct vpsp_context *vpsp_ctx, gpa_t addr, uint32_t size) +static int check_gpa_range(struct vpsp_dev_ctx *vpsp_ctx, gpa_t addr, uint32_t size) { if (!vpsp_ctx || !addr) return -EFAULT; @@ -81,7 +198,7 @@ static int check_gpa_range(struct vpsp_context *vpsp_ctx, gpa_t addr, uint32_t s return -EFAULT; } -static int check_psp_mem_range(struct vpsp_context *vpsp_ctx, +static int check_psp_mem_range(struct vpsp_dev_ctx *vpsp_ctx, void *data, uint32_t size) { if ((((uintptr_t)data + size - 1) & ~PSP_2MB_MASK) != @@ -97,19 +214,21 @@ static int check_psp_mem_range(struct vpsp_context *vpsp_ctx, } /** - * Copy the guest data to the host kernel buffer - * and record the host buffer address in 'hbuf'. - * This 'hbuf' is used to restore context information - * during asynchronous processing. + * Copy Guest data to the Host kernel buffer + * and allocate a cmd_ctx to insert into the vpsp_cmd_ctx_table. */ static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, - struct vpsp_hbuf_wrapper *hbuf) + struct vpsp_cmd_ctx **cmd_ctx) { int ret = 0; void *data = NULL; struct psp_cmdresp_head psp_head; uint32_t data_size; + if (unlikely(!cmd_ctx)) + return -EFAULT; + *cmd_ctx = NULL; + if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, &psp_head, sizeof(struct psp_cmdresp_head)))) return -EFAULT; @@ -122,34 +241,42 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, if (!data) return -ENOMEM; - if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, data, data_size))) { + *cmd_ctx = vpsp_cmd_ctx_create(data_gpa, vpsp->kvm->userspace_pid); + if (!(*cmd_ctx)) { ret = -EFAULT; goto end; } - hbuf->data = data; - hbuf->data_size = data_size; + if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, data, data_size))) { + ret = -EFAULT; + goto end; + } + (*cmd_ctx)->data = data; + (*cmd_ctx)->data_size = data_size; end: + if (ret) { + vpsp_cmd_ctx_destroy(*cmd_ctx); + kfree(data); + } return ret; } static int kvm_pv_psp_cmd_post_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, - struct vpsp_hbuf_wrapper *hbuf) + struct vpsp_cmd_ctx *cmd_ctx) { int ret = 0; /* restore cmdresp's buffer from context */ - if (unlikely(vpsp->write_guest(vpsp->kvm, data_gpa, hbuf->data, - hbuf->data_size))) { + if (unlikely(vpsp->write_guest(vpsp->kvm, data_gpa, cmd_ctx->data, + cmd_ctx->data_size))) { pr_err("[%s]: kvm_write_guest for cmdresp data failed\n", __func__); ret = -EFAULT; goto end; } end: - kfree(hbuf->data); - memset(hbuf, 0, sizeof(*hbuf)); + vpsp_cmd_ctx_destroy(cmd_ctx); return ret; } @@ -256,7 +383,7 @@ static phys_addr_t gpa_to_hpa(struct kvm_vpsp *vpsp, unsigned long data_gpa) } -static int check_cmd_forward_op_permission(struct kvm_vpsp *vpsp, struct vpsp_context *vpsp_ctx, +static int check_cmd_forward_op_permission(struct kvm_vpsp *vpsp, struct vpsp_dev_ctx *vpsp_ctx, uint64_t data, uint32_t cmd) { int ret; @@ -302,7 +429,7 @@ static int check_cmd_forward_op_permission(struct kvm_vpsp *vpsp, struct vpsp_co static int check_cmd_copy_forward_op_permission(struct kvm_vpsp *vpsp, - struct vpsp_context *vpsp_ctx, + struct vpsp_dev_ctx *vpsp_ctx, uint64_t data, uint32_t cmd) { int ret = 0; @@ -326,7 +453,7 @@ check_cmd_copy_forward_op_permission(struct kvm_vpsp *vpsp, return ret; } -static int vpsp_try_bind_vtkm(struct kvm_vpsp *vpsp, struct vpsp_context *vpsp_ctx, +static int vpsp_try_bind_vtkm(struct kvm_vpsp *vpsp, struct vpsp_dev_ctx *vpsp_ctx, uint32_t cmd, uint32_t *psp_ret) { int ret; @@ -361,58 +488,65 @@ int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, gpa_t data_gpa, uint32_t psp_ret) { int ret; - uint64_t data_hpa; - uint32_t index = 0, vid = 0; + uint32_t vid = 0; struct vpsp_ret psp_async = {0}; - struct vpsp_context *vpsp_ctx = NULL; - struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; - uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + struct vpsp_dev_ctx *vpsp_dev_ctx = NULL; + struct vpsp_cmd_ctx *cmd_ctx = NULL; phys_addr_t hpa; - vpsp_get_context(&vpsp_ctx, vpsp->kvm->userspace_pid); + vpsp_get_dev_ctx(&vpsp_dev_ctx, vpsp->kvm->userspace_pid); - ret = check_cmd_forward_op_permission(vpsp, vpsp_ctx, data_gpa, cmd); + ret = check_cmd_forward_op_permission(vpsp, vpsp_dev_ctx, data_gpa, cmd); if (unlikely(ret)) { pr_err("directly operation not allowed\n"); goto end; } - ret = vpsp_try_bind_vtkm(vpsp, vpsp_ctx, cmd, (uint32_t *)&psp_async); + ret = vpsp_try_bind_vtkm(vpsp, vpsp_dev_ctx, cmd, (uint32_t *)&psp_async); if (unlikely(ret || *(uint32_t *)&psp_async)) { pr_err("try to bind vtkm failed (ret %x, psp_async %x)\n", ret, *(uint32_t *)&psp_async); goto end; } - if (vpsp_ctx) - vid = vpsp_ctx->vid; + if (vpsp_dev_ctx) + vid = vpsp_dev_ctx->vid; *((uint32_t *)&psp_async) = psp_ret; - hpa = gpa_to_hpa(vpsp, data_gpa); - if (unlikely(!hpa)) { - ret = -EFAULT; - goto end; - } - - data_hpa = PUT_PSP_VID(hpa, vid); - switch (psp_async.status) { case VPSP_INIT: + cmd_ctx = vpsp_cmd_ctx_create(data_gpa, + vpsp->kvm->userspace_pid); + if (unlikely(!cmd_ctx)) { + ret = -ENOMEM; + goto end; + } + + hpa = gpa_to_hpa(vpsp, data_gpa); + if (unlikely(!hpa)) { + ret = -EFAULT; + goto end; + } /* try to send command to the device for execution*/ - ret = vpsp_try_do_cmd(cmd, data_hpa, &psp_async); + ret = vpsp_try_do_cmd(cmd, PUT_PSP_VID(hpa, vid), cmd_ctx, &psp_async); if (unlikely(ret)) { - pr_err("[%s]: vpsp_do_cmd failed\n", __func__); + pr_err("[%s]: vpsp_try_do_cmd failed\n", __func__); goto end; } break; case VPSP_RUNNING: - prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : - CSV_COMMAND_PRIORITY_LOW; - index = psp_async.index; + cmd_ctx = vpsp_hashtable_find_cmd_ctx(data_gpa, vpsp->kvm->userspace_pid); + if (unlikely(!cmd_ctx)) { + pr_err("[%s]: vpsp_hashtable_find_cmd_ctx failed, data_gpa %llx\n", + __func__, data_gpa); + ret = -EFAULT; + goto end; + } + /* try to get the execution result from ringbuffer*/ - ret = vpsp_try_get_result(prio, index, data_hpa, &psp_async); + ret = vpsp_try_get_result(cmd_ctx, &psp_async); if (unlikely(ret)) { pr_err("[%s]: vpsp_try_get_result failed\n", __func__); goto end; @@ -424,6 +558,9 @@ int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, break; } + if (psp_async.status == VPSP_FINISH) + vpsp_cmd_ctx_destroy(cmd_ctx); + end: /** * In order to indicate both system errors and PSP errors, @@ -435,6 +572,8 @@ int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, if (ret > 0) ret = -ret; psp_async.pret = (uint16_t)ret; + psp_async.status = VPSP_FINISH; + vpsp_cmd_ctx_destroy(cmd_ctx); } return *((int *)&psp_async); } @@ -452,24 +591,21 @@ int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, g { int ret = 0; struct vpsp_ret psp_ret = {0}; - struct vpsp_hbuf_wrapper hbuf = {0}; - struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; - struct vpsp_context *vpsp_ctx = NULL; + struct vpsp_cmd_ctx *cmd_ctx = NULL; + struct vpsp_dev_ctx *vpsp_dev_ctx = NULL; phys_addr_t data_paddr = 0; - uint8_t prio = CSV_COMMAND_PRIORITY_LOW; - uint32_t index = 0; uint32_t vid = 0; - vpsp_get_context(&vpsp_ctx, vpsp->kvm->userspace_pid); + vpsp_get_dev_ctx(&vpsp_dev_ctx, vpsp->kvm->userspace_pid); - ret = check_cmd_copy_forward_op_permission(vpsp, vpsp_ctx, data_gpa, cmd); + ret = check_cmd_copy_forward_op_permission(vpsp, vpsp_dev_ctx, data_gpa, cmd); if (unlikely(ret)) { pr_err("copy operation not allowed\n"); return -EPERM; } - if (vpsp_ctx) - vid = vpsp_ctx->vid; + if (vpsp_dev_ctx) + vid = vpsp_dev_ctx->vid; if (unlikely(vpsp->read_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)))) @@ -478,32 +614,25 @@ int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, g switch (psp_ret.status) { case VPSP_INIT: /* copy data from guest */ - ret = kvm_pv_psp_cmd_pre_op(vpsp, data_gpa, &hbuf); + ret = kvm_pv_psp_cmd_pre_op(vpsp, data_gpa, &cmd_ctx); if (unlikely(ret)) { - psp_ret.status = VPSP_FINISH; pr_err("[%s]: kvm_pv_psp_cmd_pre_op failed\n", __func__); ret = -EFAULT; goto end; } - data_paddr = PUT_PSP_VID(__psp_pa(hbuf.data), vid); + data_paddr = PUT_PSP_VID(__psp_pa(cmd_ctx->data), vid); /* try to send command to the device for execution*/ - ret = vpsp_try_do_cmd(cmd, data_paddr, (struct vpsp_ret *)&psp_ret); + ret = vpsp_try_do_cmd(cmd, data_paddr, cmd_ctx, (struct vpsp_ret *)&psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_try_do_cmd failed\n", __func__); ret = -EFAULT; goto end; } - if (psp_ret.status == VPSP_RUNNING) { - prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : - CSV_COMMAND_PRIORITY_LOW; - g_hbuf_wrap[prio][psp_ret.index] = hbuf; - break; - - } else if (psp_ret.status == VPSP_FINISH) { - ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, &hbuf); + if (psp_ret.status == VPSP_FINISH) { + ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, cmd_ctx); if (unlikely(ret)) { pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", __func__); @@ -514,13 +643,16 @@ int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, g break; case VPSP_RUNNING: - prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : - CSV_COMMAND_PRIORITY_LOW; - index = psp_ret.index; - data_paddr = PUT_PSP_VID(__psp_pa(g_hbuf_wrap[prio][index].data), vid); + cmd_ctx = vpsp_hashtable_find_cmd_ctx(data_gpa, vpsp->kvm->userspace_pid); + if (unlikely(!cmd_ctx)) { + pr_err("[%s]: vpsp_hashtable_find_cmd_ctx failed, data_gpa %llx\n", + __func__, data_gpa); + ret = -EFAULT; + goto end; + } + /* try to get the execution result from ringbuffer*/ - ret = vpsp_try_get_result(prio, index, data_paddr, - (struct vpsp_ret *)&psp_ret); + ret = vpsp_try_get_result(cmd_ctx, (struct vpsp_ret *)&psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_try_get_result failed\n", __func__); ret = -EFAULT; @@ -532,8 +664,8 @@ int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, g goto end; } else if (psp_ret.status == VPSP_FINISH) { /* copy data to guest */ - ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, - &g_hbuf_wrap[prio][index]); + ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, cmd_ctx); + cmd_ctx = NULL; if (unlikely(ret)) { pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", __func__); @@ -550,13 +682,17 @@ int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, g break; } end: + if (ret) { + psp_ret.status = VPSP_FINISH; + vpsp_cmd_ctx_destroy(cmd_ctx); + } /* return psp_ret to guest */ vpsp->write_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)); return ret; } EXPORT_SYMBOL_GPL(kvm_pv_psp_copy_forward_op); -DEFINE_RWLOCK(vpsp_rwlock); +DEFINE_RWLOCK(vpsp_dev_rwlock); /* VPSP_VID_MAX_ENTRIES determines the maximum number of vms that can set vid. * but, the performance of finding vid is determined by g_vpsp_vid_num, @@ -565,15 +701,15 @@ DEFINE_RWLOCK(vpsp_rwlock); #define VPSP_VID_MAX_ENTRIES 2048 #define VPSP_VID_NUM_MAX 64 -static struct vpsp_context g_vpsp_context_array[VPSP_VID_MAX_ENTRIES]; +static struct vpsp_dev_ctx g_vpsp_context_array[VPSP_VID_MAX_ENTRIES]; static uint32_t g_vpsp_vid_num; static int compare_vid_entries(const void *a, const void *b) { - return ((struct vpsp_context *)a)->pid - ((struct vpsp_context *)b)->pid; + return ((struct vpsp_dev_ctx *)a)->pid - ((struct vpsp_dev_ctx *)b)->pid; } static void swap_vid_entries(void *a, void *b, int size) { - struct vpsp_context entry; + struct vpsp_dev_ctx entry; memcpy(&entry, a, size); memcpy(a, b, size); @@ -597,17 +733,17 @@ int vpsp_get_default_vid_permission(void) } /** - * get a vpsp context from pid + * get a vpsp device context from pid */ -int vpsp_get_context(struct vpsp_context **ctx, pid_t pid) +int vpsp_get_dev_ctx(struct vpsp_dev_ctx **ctx, pid_t pid) { - struct vpsp_context new_entry = {.pid = pid}; - struct vpsp_context *existing_entry = NULL; + struct vpsp_dev_ctx new_entry = {.pid = pid}; + struct vpsp_dev_ctx *existing_entry = NULL; - read_lock(&vpsp_rwlock); + read_lock(&vpsp_dev_rwlock); existing_entry = bsearch(&new_entry, g_vpsp_context_array, g_vpsp_vid_num, - sizeof(struct vpsp_context), compare_vid_entries); - read_unlock(&vpsp_rwlock); + sizeof(struct vpsp_dev_ctx), compare_vid_entries); + read_unlock(&vpsp_dev_rwlock); if (!existing_entry) return -ENOENT; @@ -628,21 +764,21 @@ int vpsp_get_context(struct vpsp_context **ctx, pid_t pid) static int vpsp_add_vid(uint32_t vid) { pid_t cur_pid = task_pid_nr(current); - struct vpsp_context new_entry = {.vid = vid, .pid = cur_pid}; + struct vpsp_dev_ctx new_entry = {.vid = vid, .pid = cur_pid}; - if (vpsp_get_context(NULL, cur_pid) == 0) + if (vpsp_get_dev_ctx(NULL, cur_pid) == 0) return -EEXIST; if (g_vpsp_vid_num == VPSP_VID_MAX_ENTRIES) return -ENOMEM; if (vid >= VPSP_VID_NUM_MAX) return -EINVAL; - write_lock(&vpsp_rwlock); - memcpy(&g_vpsp_context_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_context)); - sort(g_vpsp_context_array, g_vpsp_vid_num, sizeof(struct vpsp_context), + write_lock(&vpsp_dev_rwlock); + memcpy(&g_vpsp_context_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_dev_ctx)); + sort(g_vpsp_context_array, g_vpsp_vid_num, sizeof(struct vpsp_dev_ctx), compare_vid_entries, swap_vid_entries); pr_info("PSP: add vid %d, by pid %d, total vid num is %d\n", vid, cur_pid, g_vpsp_vid_num); - write_unlock(&vpsp_rwlock); + write_unlock(&vpsp_dev_rwlock); return 0; } @@ -656,30 +792,30 @@ static int vpsp_del_vid(void) pid_t cur_pid = task_pid_nr(current); int i, ret = -ENOENT; - write_lock(&vpsp_rwlock); + write_lock(&vpsp_dev_rwlock); for (i = 0; i < g_vpsp_vid_num; ++i) { if (g_vpsp_context_array[i].pid == cur_pid) { --g_vpsp_vid_num; pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n", g_vpsp_context_array[i].vid, cur_pid, g_vpsp_vid_num); memmove(&g_vpsp_context_array[i], &g_vpsp_context_array[i + 1], - sizeof(struct vpsp_context) * (g_vpsp_vid_num - i)); + sizeof(struct vpsp_dev_ctx) * (g_vpsp_vid_num - i)); ret = 0; goto end; } } end: - write_unlock(&vpsp_rwlock); + write_unlock(&vpsp_dev_rwlock); return ret; } static int vpsp_set_gpa_range(u64 gpa_start, u64 gpa_end) { pid_t cur_pid = task_pid_nr(current); - struct vpsp_context *ctx = NULL; + struct vpsp_dev_ctx *ctx = NULL; - vpsp_get_context(&ctx, cur_pid); + vpsp_get_dev_ctx(&ctx, cur_pid); if (!ctx) { pr_err("PSP: %s get vpsp_context failed from pid %d\n", __func__, cur_pid); return -ENOENT; @@ -760,17 +896,30 @@ static unsigned int vpsp_queue_cmd_size(int prio) return csv_cmd_queue_size(&vpsp_ring_buffer[prio].cmd_ptr); } -static int vpsp_dequeue_cmd(int prio, int index, - struct csv_cmdptr_entry *cmd_ptr) +static int vpsp_dequeue_and_notify(int prio, struct csv_cmdptr_entry *cmd_ptr) { - mutex_lock(&vpsp_rb_mutex); + struct vpsp_cmd_ctx *ctx = NULL; + int mask = vpsp_ring_buffer[prio].cmd_ptr.mask; + int index = vpsp_ring_buffer[prio].cmd_ptr.head & mask; + + ctx = vpsp_cmd_ctx_array[prio][index]; + if (ctx) { + /** + * Write the result back to the cmd ctx, + * after which we can safely perform + * the ringbuffer dequeue operation without + * waiting for the Guest to retrieve the result. + */ + ctx->statval = vpsp_get_cmd_status(prio, index); + vpsp_cmd_ctx_obj_put(ctx, false); + } /* The status update must be before the head update */ vpsp_set_cmd_status(prio, index, 0); - csv_dequeue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, (void *)cmd_ptr, 1); + mutex_lock(&vpsp_rb_mutex); + csv_dequeue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, (void *)cmd_ptr, 1); mutex_unlock(&vpsp_rb_mutex); - return 0; } @@ -790,12 +939,6 @@ static int vpsp_fill_cmd_queue(int prio, int cmd, phys_addr_t phy_addr, uint16_t mutex_lock(&vpsp_rb_mutex); index = get_queue_tail(&vpsp_ring_buffer[prio]); - /* If status is equal to VPSP_CMD_STATUS_RUNNING, then the queue is full */ - if (vpsp_get_cmd_status(prio, index) == VPSP_CMD_STATUS_RUNNING) { - index = -1; - goto out; - } - /* The status must be written first, and then the cmd can be enqueued */ vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { @@ -809,11 +952,13 @@ static int vpsp_fill_cmd_queue(int prio, int cmd, phys_addr_t phy_addr, uint16_t return index; } -static void vpsp_ring_update_head(struct csv_ringbuffer_queue *ring_buffer, - uint32_t new_head) +static void vpsp_ring_update_head(int prio, uint32_t new_head) { + struct csv_ringbuffer_queue *ring_buffer = &vpsp_ring_buffer[prio]; uint32_t orig_head = get_queue_head(ring_buffer); + struct csv_cmdptr_entry entry; uint32_t comple_num = 0; + int i; if (new_head >= orig_head) comple_num = new_head - orig_head; @@ -821,7 +966,8 @@ static void vpsp_ring_update_head(struct csv_ringbuffer_queue *ring_buffer, comple_num = ring_buffer->cmd_ptr.mask - (orig_head - new_head) + 1; - ring_buffer->cmd_ptr.head += comple_num; + for (i = 0; i < comple_num; ++i) + vpsp_dequeue_and_notify(prio, &entry); } static int vpsp_psp_mutex_trylock(void) @@ -891,26 +1037,40 @@ static int __vpsp_ring_buffer_enter_locked(int *error) return ret; } -static int vpsp_wait_cmd_ioc_ring_buffer(struct sev_device *sev, - unsigned int *reg, - unsigned int timeout) +void vpsp_worker_handler(struct work_struct *unused) { - int ret; + struct sev_user_data_status data; + struct sev_device *sev = psp_master->sev_data; + unsigned int reg; - ret = wait_event_timeout(sev->int_queue, - sev->int_rcvd, timeout * HZ); - if (!ret) - return -ETIMEDOUT; + reg = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + goto end; - *reg = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + /* update head */ + vpsp_ring_update_head(CSV_COMMAND_PRIORITY_HIGH, + (reg & PSP_RBHEAD_QHI_HEAD_MASK) >> PSP_RBHEAD_QHI_HEAD_SHIFT); + vpsp_ring_update_head(CSV_COMMAND_PRIORITY_LOW, + reg & PSP_RBHEAD_QLO_HEAD_MASK); - return 0; +end: + /** + * Before send new mailbox command, set vpsp_in_ringbuffer_mode + * to false to avoid nested triggering of the workqueue. + */ + vpsp_in_ringbuffer_mode = false; + + /* exit ringbuf mode by send CMD in mailbox mode */ + hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, + &data, NULL); + csv_comm_mode = CSV_COMM_MAILBOX_ON; + vpsp_psp_mutex_unlock(); } -static int __vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +static int __vpsp_do_ringbuf_cmds_locked(void) { struct psp_device *psp = psp_master; - unsigned int reg, ret = 0; unsigned int rb_tail, rb_head; unsigned int rb_ctl; struct sev_device *sev; @@ -947,35 +1107,12 @@ static int __vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) rb_ctl = (PSP_RBCTL_X86_WRITES | PSP_RBCTL_RBMODE_ACT | PSP_RBCTL_CLR_INTSTAT); iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); - /* wait for all commands in ring buffer completed */ - ret = vpsp_wait_cmd_ioc_ring_buffer(sev, ®, (*hygon_psp_hooks.psp_timeout)*10); - if (ret) { - if (psp_ret) - *psp_ret = 0; - - dev_err(psp->dev, "csv command in ringbuffer mode timed out, disabling PSP\n"); - *hygon_psp_hooks.psp_dead = true; - return ret; - } - /* cmd error happends */ - if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) - ret = -EFAULT; - - /* update head */ - vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH], - (reg & PSP_RBHEAD_QHI_HEAD_MASK) >> PSP_RBHEAD_QHI_HEAD_SHIFT); - vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW], - reg & PSP_RBHEAD_QLO_HEAD_MASK); - - if (psp_ret) - *psp_ret = vpsp_get_cmd_status(prio, index); - - return ret; + vpsp_in_ringbuffer_mode = true; + return 0; } -static int vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +static int vpsp_do_ringbuf_cmds_locked(int *psp_ret) { - struct sev_user_data_status data; int rc; if (!hygon_psp_hooks.sev_dev_hooks_installed) @@ -985,13 +1122,7 @@ static int vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) if (rc) goto end; - rc = __vpsp_do_ringbuf_cmds_locked(psp_ret, prio, index); - - /* exit ringbuf mode by send CMD in mailbox mode */ - hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, - &data, NULL); - csv_comm_mode = CSV_COMM_MAILBOX_ON; - + rc = __vpsp_do_ringbuf_cmds_locked(); end: return rc; } @@ -1083,28 +1214,36 @@ int vpsp_do_cmd(int cmd, phys_addr_t phy_addr, int *psp_ret) * Try to obtain the result again by the command index, this * interface is used in ringbuffer mode */ -int vpsp_try_get_result(uint8_t prio, uint32_t index, phys_addr_t phy_addr, - struct vpsp_ret *psp_ret) +int vpsp_try_get_result(struct vpsp_cmd_ctx *cmd_ctx, struct vpsp_ret *psp_ret) { int ret = 0; + uint8_t prio = cmd_ctx->rb_prio; + uint16_t statval = VPSP_CMD_STATUS_RUNNING; + uint32_t index = cmd_ctx->rb_index; + phys_addr_t phy_addr = cmd_ctx->psp_cmdbuf_paddr; struct csv_cmdptr_entry cmd = {0}; if (!hygon_psp_hooks.sev_dev_hooks_installed) return -ENODEV; /* Get the retult directly if the command has been executed */ - if (index >= 0 && vpsp_get_cmd_status(prio, index) != - VPSP_CMD_STATUS_RUNNING) { - psp_ret->pret = vpsp_get_cmd_status(prio, index); - psp_ret->status = VPSP_FINISH; - return 0; + if (index >= 0) { + if (cmd_ctx->statval != VPSP_CMD_STATUS_RUNNING) + statval = cmd_ctx->statval; + else + statval = vpsp_get_cmd_status(prio, index); + if (statval != VPSP_CMD_STATUS_RUNNING) { + psp_ret->pret = statval; + psp_ret->status = VPSP_FINISH; + return 0; + } } if (vpsp_psp_mutex_trylock()) { /* Use mailbox mode to execute a command if there is only one command */ if (vpsp_queue_cmd_size(prio) == 1) { /* dequeue command from queue*/ - vpsp_dequeue_cmd(prio, index, &cmd); + vpsp_dequeue_and_notify(prio, &cmd); ret = __vpsp_do_cmd_locked(cmd.cmd_id, phy_addr, (int *)psp_ret); psp_ret->status = VPSP_FINISH; @@ -1120,19 +1259,18 @@ int vpsp_try_get_result(uint8_t prio, uint32_t index, phys_addr_t phy_addr, } } } else { - ret = vpsp_do_ringbuf_cmds_locked((int *)psp_ret, prio, - index); - psp_ret->status = VPSP_FINISH; - vpsp_psp_mutex_unlock(); + ret = vpsp_do_ringbuf_cmds_locked((int *)psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed %d\n", __func__, ret); + psp_ret->status = VPSP_FINISH; + vpsp_psp_mutex_unlock(); goto end; } + psp_ret->status = VPSP_RUNNING; } } else { /* Change the command to the running state if getting the mutex fails */ - psp_ret->index = index; psp_ret->status = VPSP_RUNNING; return 0; } @@ -1148,7 +1286,8 @@ int vpsp_try_get_result(uint8_t prio, uint32_t index, phys_addr_t phy_addr, * vpsp_try_get_result interface will be used to obtain the result * later again */ -int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret) +int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, + struct vpsp_cmd_ctx *cmd_ctx, struct vpsp_ret *psp_ret) { int ret = 0; int rb_supported; @@ -1181,8 +1320,14 @@ int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret) goto end; } + cmd_ctx->rb_index = index; + cmd_ctx->rb_prio = prio; + cmd_ctx->psp_cmdbuf_paddr = phy_addr; + vpsp_cmd_ctx_array[prio][index] = cmd_ctx; + vpsp_cmd_ctx_obj_get(cmd_ctx); + /* try to get result from the ringbuffer command */ - ret = vpsp_try_get_result(prio, index, phy_addr, psp_ret); + ret = vpsp_try_get_result(cmd_ctx, psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_try_get_result failed %d\n", __func__, ret); goto end; diff --git a/drivers/crypto/ccp/hygon/vpsp.h b/drivers/crypto/ccp/hygon/vpsp.h index 179dde46dda0..6bae00ab8726 100644 --- a/drivers/crypto/ccp/hygon/vpsp.h +++ b/drivers/crypto/ccp/hygon/vpsp.h @@ -42,14 +42,14 @@ struct vpsp_cmd { * @pret: the return code from device * @resv: reserved bits * @format: indicates that the error is a unix error code(is 0) or a psp error(is 1) - * @index: used to distinguish the position of command in the ringbuffer + * @resv2: reserved bits * @status: indicates the current status of the related command */ struct vpsp_ret { u32 pret : 16; u32 resv : 1; u32 format : 1; - u32 index : 12; + u32 resv2 : 12; u32 status : 2; }; #define VPSP_RET_SYS_FORMAT 1 @@ -68,7 +68,7 @@ struct vpsp_ret { #define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK) #define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT)) -struct vpsp_context { +struct vpsp_dev_ctx { u32 vid; pid_t pid; u64 gpa_start; @@ -79,6 +79,28 @@ struct vpsp_context { u32 vm_handle; // only for csv }; +struct vpsp_cmd_ctx { + void *data; // copy forward mode only + uint32_t data_size; // copy forward mode only + uint8_t rb_prio; + uint32_t rb_index; + uint32_t statval; + phys_addr_t psp_cmdbuf_paddr; + refcount_t ref; + + /** + * key1 indicates the GPA + * to the data passed by the Guest + * + * key2 indicates the pid of Qemu Process + * + * Serves as the key for the vpsp_cmd_ctx_table. + */ + gpa_t key1; + pid_t key2; + struct hlist_node node; +}; + enum VPSP_DEV_CTRL_OPCODE { VPSP_OP_VID_ADD, VPSP_OP_VID_DEL, @@ -118,11 +140,17 @@ enum VPSP_RB_CHECK_STATUS { extern struct csv_ringbuffer_queue vpsp_ring_buffer[CSV_COMMAND_PRIORITY_NUM]; extern struct hygon_psp_hooks_table hygon_psp_hooks; +extern bool vpsp_in_ringbuffer_mode; +extern struct kmem_cache *vpsp_cmd_ctx_slab; + +void vpsp_worker_handler(struct work_struct *unused); +int vpsp_try_get_result(struct vpsp_cmd_ctx *cmd_ctx, struct vpsp_ret *psp_ret); +int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, + struct vpsp_cmd_ctx *cmd_ctx, struct vpsp_ret *psp_ret); +void vpsp_cmd_ctx_obj_get(struct vpsp_cmd_ctx *cmd_ctx); -int vpsp_try_get_result(uint8_t prio, uint32_t index, - phys_addr_t phy_addr, struct vpsp_ret *psp_ret); -int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret); -int vpsp_get_context(struct vpsp_context **ctx, pid_t pid); +void vpsp_cmd_ctx_obj_put(struct vpsp_cmd_ctx *cmd_ctx, bool force); +int vpsp_get_dev_ctx(struct vpsp_dev_ctx **ctx, pid_t pid); int vpsp_get_default_vid_permission(void); int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl); int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, -- Gitee From 84004b6cbc38b9d8b9de40f0607847634b807e55 Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Tue, 11 Feb 2025 15:44:57 +0800 Subject: [PATCH 3/3] crypto: ccp: Support vpsp ringbuffer overcommit Upstream: no The PSP firmware allows the ringbuffer to additionally submit some inactive commands as placeholders. During PSP firmware execution, the C86 can write new commands into the positions of these inactive commands, which are then executed by the PSP. This reduces the number of C86 to PSP I/O operations. Signed-off-by: xiongmengbiao --- drivers/crypto/ccp/hygon/csv-dev.c | 4 + drivers/crypto/ccp/hygon/psp-dev.c | 4 +- drivers/crypto/ccp/hygon/vpsp.c | 113 ++++++++++++++++++++++++----- drivers/crypto/ccp/hygon/vpsp.h | 7 +- 4 files changed, 108 insertions(+), 20 deletions(-) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index b8e2632638a2..0a6ea0f51c4f 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -34,6 +34,7 @@ EXPORT_SYMBOL_GPL(hygon_csv_build); int csv_comm_mode = CSV_COMM_MAILBOX_ON; static uint8_t vpsp_rb_supported; +uint8_t vpsp_rb_oc_supported; // support overcommit static atomic_t vpsp_rb_check_status = ATOMIC_INIT(RB_NOT_CHECK); /* @@ -862,6 +863,9 @@ int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, goto end; } WRITE_ONCE(vpsp_rb_supported, 1); + + if (VPSP_RB_OC_IS_SUPPORTED(status->build)) + WRITE_ONCE(vpsp_rb_oc_supported, 1); } atomic_set(&vpsp_rb_check_status, RB_CHECKED); diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index 970b16130aba..31a170ac8417 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -301,8 +301,10 @@ int hygon_psp_additional_setup(struct sp_device *sp) vpsp_cmd_ctx_slab = kmem_cache_create("vpsp_cmd_ctx", sizeof(struct vpsp_cmd_ctx), 0, SLAB_HWCACHE_ALIGN, NULL); - if (!vpsp_cmd_ctx_slab) + if (!vpsp_cmd_ctx_slab) { + destroy_workqueue(vpsp_wq); return -ENOMEM; + } psp_misc = devm_kzalloc(dev, sizeof(*psp_misc), GFP_KERNEL); if (!psp_misc) diff --git a/drivers/crypto/ccp/hygon/vpsp.c b/drivers/crypto/ccp/hygon/vpsp.c index 40b170498c1a..4fd989e9c55d 100644 --- a/drivers/crypto/ccp/hygon/vpsp.c +++ b/drivers/crypto/ccp/hygon/vpsp.c @@ -558,8 +558,10 @@ int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, break; } - if (psp_async.status == VPSP_FINISH) + if (psp_async.status == VPSP_FINISH) { vpsp_cmd_ctx_destroy(cmd_ctx); + ret = 0; + } end: /** @@ -865,11 +867,27 @@ int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) static DEFINE_MUTEX(vpsp_rb_mutex); struct csv_ringbuffer_queue vpsp_ring_buffer[CSV_COMMAND_PRIORITY_NUM]; -static int get_queue_tail(struct csv_ringbuffer_queue *ringbuffer) +static unsigned int vpsp_queue_cmd_size(int prio) +{ + return csv_cmd_queue_size(&vpsp_ring_buffer[prio].cmd_ptr); +} + +static int get_queue_tail(int prio) { + struct csv_ringbuffer_queue *ringbuffer = &vpsp_ring_buffer[prio]; return ringbuffer->cmd_ptr.tail & ringbuffer->cmd_ptr.mask; } +static int get_queue_overcommit_tail(int prio) +{ + uint32_t que_size = vpsp_queue_cmd_size(prio); + struct csv_ringbuffer_queue *ringbuffer = &vpsp_ring_buffer[prio]; + + if (que_size >= VPSP_RB_OVERCOMMIT_SIZE || que_size == 0 || !vpsp_rb_oc_supported) + return get_queue_tail(prio); + return (ringbuffer->cmd_ptr.head + VPSP_RB_OVERCOMMIT_SIZE) & ringbuffer->cmd_ptr.mask; +} + static int get_queue_head(struct csv_ringbuffer_queue *ringbuffer) { return ringbuffer->cmd_ptr.head & ringbuffer->cmd_ptr.mask; @@ -891,11 +909,6 @@ static int vpsp_get_cmd_status(int prio, int index) return statval[index].status; } -static unsigned int vpsp_queue_cmd_size(int prio) -{ - return csv_cmd_queue_size(&vpsp_ring_buffer[prio].cmd_ptr); -} - static int vpsp_dequeue_and_notify(int prio, struct csv_cmdptr_entry *cmd_ptr) { struct vpsp_cmd_ctx *ctx = NULL; @@ -923,6 +936,24 @@ static int vpsp_dequeue_and_notify(int prio, struct csv_cmdptr_entry *cmd_ptr) return 0; } +/** + * Ensure that the 'status' field of cmd statval + * in the range from tail to overcommit tail in the queue is 0. + */ +static void vpsp_queue_overcommit_entry_inactive(int prio) +{ + int tail = 0, overcommit_tail = 0, i = 0; + + mutex_lock(&vpsp_rb_mutex); + + tail = get_queue_tail(prio); + overcommit_tail = get_queue_overcommit_tail(prio); + for (i = tail; i < overcommit_tail; ++i) + vpsp_set_cmd_status(prio, i, 0); + + mutex_unlock(&vpsp_rb_mutex); +} + /* * Populate the command from the virtual machine to the queue to * support execution in ringbuffer mode @@ -937,14 +968,34 @@ static int vpsp_fill_cmd_queue(int prio, int cmd, phys_addr_t phy_addr, uint16_t cmdptr.cmd_flags = flags; mutex_lock(&vpsp_rb_mutex); - index = get_queue_tail(&vpsp_ring_buffer[prio]); - - /* The status must be written first, and then the cmd can be enqueued */ - vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); - if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { - vpsp_set_cmd_status(prio, index, 0); - index = -1; - goto out; + index = get_queue_tail(prio); + + /** + * If the firmware does not support the overcommit function: + * the firmware may not check the 'status' before executing cmd. + * Therefore, the 'status' must be written before the cmd be enqueued, + * otherwise, X86 may overwrite the result written by the firmware. + * + * If the firmware support the overcommit function: + * The firmware will forcefully check the 'status' + * before executing cmd until the 'status' becomes 0xffff. + * In order to prevent the firmware from getting the cmd to be valid, + * the 'status' must be written after waiting for the cmd to be queued. + */ + if (vpsp_rb_oc_supported) { + if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { + vpsp_set_cmd_status(prio, index, 0); + index = -1; + goto out; + } + vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); + } else { + vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); + if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { + vpsp_set_cmd_status(prio, index, 0); + index = -1; + goto out; + } } out: @@ -1042,12 +1093,17 @@ void vpsp_worker_handler(struct work_struct *unused) struct sev_user_data_status data; struct sev_device *sev = psp_master->sev_data; unsigned int reg; + unsigned int rb_head, rb_tail; reg = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); /* cmd error happends */ if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) goto end; + rb_head = reg; + rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + pr_debug("ringbuffer exit rb_head %x, rb_tail %x\n", rb_head, rb_tail); /* update head */ vpsp_ring_update_head(CSV_COMMAND_PRIORITY_HIGH, (reg & PSP_RBHEAD_QHI_HEAD_MASK) >> PSP_RBHEAD_QHI_HEAD_SHIFT); @@ -1074,6 +1130,8 @@ static int __vpsp_do_ringbuf_cmds_locked(void) unsigned int rb_tail, rb_head; unsigned int rb_ctl; struct sev_device *sev; + struct csv_queue *queue; + struct csv_cmdptr_entry *first_cmd; if (!psp || !hygon_psp_hooks.sev_dev_hooks_installed) return -ENODEV; @@ -1084,12 +1142,13 @@ static int __vpsp_do_ringbuf_cmds_locked(void) sev = psp->sev_data; /* update rb tail */ + vpsp_queue_overcommit_entry_inactive(CSV_COMMAND_PRIORITY_LOW); rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); - rb_tail |= (get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) + rb_tail |= (get_queue_tail(CSV_COMMAND_PRIORITY_HIGH) << PSP_RBTAIL_QHI_TAIL_SHIFT); rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); - rb_tail |= get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); + rb_tail |= get_queue_overcommit_tail(CSV_COMMAND_PRIORITY_LOW); iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); /* update rb head */ @@ -1101,6 +1160,26 @@ static int __vpsp_do_ringbuf_cmds_locked(void) rb_head |= get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); iowrite32(rb_head, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + /** + * In some PSP firmware, even if the high priority queue is empty, + * it will still try to read the element at the head of the queue and try to process it. + * When the element at the head of the queue happens to be an illegal cmd id, + * PSP returns the PSP_RBHEAD_QPAUSE_INT_STAT error. + * + * Therefore, now we need to manually set the head element of the queue to + * the default tkm cmd id before sending the ringbuffer each time when + * the high priority queue is empty. + * + * The low priority queue has no such bug, and future PSP firmware should fix it. + */ + if (vpsp_queue_cmd_size(CSV_COMMAND_PRIORITY_HIGH) == 0) { + queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH].cmd_ptr; + first_cmd = (struct csv_cmdptr_entry *)queue->data_align; + first_cmd[queue->head & queue->mask].cmd_id = TKM_PSP_CMDID; + } + + pr_debug("ringbuffer launch rb_head %x, rb_tail %x\n", rb_head, rb_tail); + /* update rb ctl to trigger psp irq */ sev->int_rcvd = 0; /* PSP response to x86 only when all queue is empty or error happends */ diff --git a/drivers/crypto/ccp/hygon/vpsp.h b/drivers/crypto/ccp/hygon/vpsp.h index 6bae00ab8726..5432c12a23a3 100644 --- a/drivers/crypto/ccp/hygon/vpsp.h +++ b/drivers/crypto/ccp/hygon/vpsp.h @@ -128,20 +128,23 @@ struct vpsp_dev_ctrl { } __packed data; }; -/* defination of variabled used by virtual psp */ +/* definition of variables used by virtual psp */ enum VPSP_RB_CHECK_STATUS { RB_NOT_CHECK = 0, RB_CHECKING, RB_CHECKED, RB_CHECK_MAX }; -#define VPSP_RB_IS_SUPPORTED(buildid) (buildid >= 1913) +#define VPSP_RB_IS_SUPPORTED(buildid) (buildid >= 1913) +#define VPSP_RB_OC_IS_SUPPORTED(buildid) (buildid >= 2167) #define VPSP_CMD_STATUS_RUNNING 0xffff +#define VPSP_RB_OVERCOMMIT_SIZE 1024 extern struct csv_ringbuffer_queue vpsp_ring_buffer[CSV_COMMAND_PRIORITY_NUM]; extern struct hygon_psp_hooks_table hygon_psp_hooks; extern bool vpsp_in_ringbuffer_mode; extern struct kmem_cache *vpsp_cmd_ctx_slab; +extern uint8_t vpsp_rb_oc_supported; void vpsp_worker_handler(struct work_struct *unused); int vpsp_try_get_result(struct vpsp_cmd_ctx *cmd_ctx, struct vpsp_ret *psp_ret); -- Gitee