diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 0a6ea0f51c4f263bf29964475b6cacd5e93f91c6..35a5a783ee75130910a187968afe5f82a2d778bd 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -21,7 +21,6 @@ #include "psp-dev.h" #include "csv-dev.h" #include "ring-buffer.h" -#include "vpsp.h" /* * Hygon CSV build info: @@ -33,10 +32,6 @@ EXPORT_SYMBOL_GPL(hygon_csv_build); int csv_comm_mode = CSV_COMM_MAILBOX_ON; -static uint8_t vpsp_rb_supported; -uint8_t vpsp_rb_oc_supported; // support overcommit -static atomic_t vpsp_rb_check_status = ATOMIC_INIT(RB_NOT_CHECK); - /* * csv_update_api_version used to update the api version of HYGON CSV * firmwareat driver side. @@ -611,7 +606,7 @@ int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) cmdptr.cmd_id = cmd; cmdptr.cmd_flags = flags; - if (csv_enqueue_cmd(&sev->ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) + if (enqueue_cmd(&sev->ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) return -EFAULT; return 0; @@ -635,7 +630,7 @@ int csv_check_stat_queue_status(int *psp_ret) do { struct csv_statval_entry statval; - len = csv_dequeue_stat(&sev->ring_buffer[prio].stat_val, + len = dequeue_stat(&sev->ring_buffer[prio].stat_val, &statval, 1); if (len) { if (statval.status != 0) { @@ -779,20 +774,6 @@ int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error #endif /* CONFIG_HYGON_CSV */ -static int vpsp_ring_buffer_queue_init(void) -{ - int i; - int ret; - - for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { - ret = __csv_ring_buffer_queue_init(&vpsp_ring_buffer[i]); - if (ret) - return ret; - } - - return 0; -} - /** * struct user_data_status - PLATFORM_STATUS command parameters * @@ -824,63 +805,3 @@ struct user_data_status { build : 31; /* Out */ uint32_t guest_count; /* Out */ } __packed; - -/* - * Check whether the firmware supports ringbuffer mode and parse - * commands from the virtual machine - */ -int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, - struct vpsp_cmd *vcmd) -{ - int ret, error; - int rb_supported; - int rb_check_old = RB_NOT_CHECK; - struct user_data_status *status = NULL; - - if (atomic_try_cmpxchg(&vpsp_rb_check_status, &rb_check_old, - RB_CHECKING)) { - /* get buildid to check if the firmware supports ringbuffer mode */ - status = kzalloc(sizeof(*status), GFP_KERNEL); - if (!status) { - atomic_set(&vpsp_rb_check_status, RB_CHECKED); - goto end; - } - ret = sev_platform_status((struct sev_user_data_status *)status, - &error); - if (ret) { - pr_warn("failed to get status[%#x], use default command mode.\n", error); - atomic_set(&vpsp_rb_check_status, RB_CHECKED); - kfree(status); - goto end; - } - - /* check if the firmware supports the ringbuffer mode */ - if (VPSP_RB_IS_SUPPORTED(status->build)) { - if (vpsp_ring_buffer_queue_init()) { - pr_warn("vpsp_ring_buffer_queue_init fail, use default command mode\n"); - atomic_set(&vpsp_rb_check_status, RB_CHECKED); - kfree(status); - goto end; - } - WRITE_ONCE(vpsp_rb_supported, 1); - - if (VPSP_RB_OC_IS_SUPPORTED(status->build)) - WRITE_ONCE(vpsp_rb_oc_supported, 1); - } - - atomic_set(&vpsp_rb_check_status, RB_CHECKED); - kfree(status); - } - -end: - rb_supported = READ_ONCE(vpsp_rb_supported); - /* parse prio by vcmd */ - if (rb_supported && vcmd->is_high_rb) - *prio = CSV_COMMAND_PRIORITY_HIGH; - else - *prio = CSV_COMMAND_PRIORITY_LOW; - /* clear rb level bit in vcmd */ - vcmd->is_high_rb = 0; - - return rb_supported; -} diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index 31a170ac841793b04e04ce192b53c2ab112405f0..dae530c37f7512a1330e4c5db99deb335767ddf8 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -16,19 +16,24 @@ #include #include #include +#include #include "psp-dev.h" #include "vpsp.h" +#include "csv-dev.h" /* Function and variable pointers for hooks */ struct hygon_psp_hooks_table hygon_psp_hooks; -static unsigned int psp_int_rcvd; +unsigned int psp_int_rcvd; wait_queue_head_t psp_int_queue; struct kmem_cache *vpsp_cmd_ctx_slab; -static struct workqueue_struct *vpsp_wq; -static struct work_struct vpsp_work; +static struct workqueue_struct *psp_wq; +static struct work_struct psp_work; +static struct psp_ringbuffer_cmd_buf *psp_grb_cmdbuf; +static work_func_t psp_worker_notify; +bool psp_in_nowait_mode; static struct psp_misc_dev *psp_misc; #define HYGON_PSP_IOC_TYPE 'H' enum HYGON_PSP_OPCODE { @@ -40,6 +45,12 @@ enum HYGON_PSP_OPCODE { HYGON_PSP_OPCODE_MAX_NR, }; +#define HYGON_RESOURCE2_IOC_TYPE 'R' +enum HYGON_PSP_RESOURCE2_OPCODE { + HYGON_RESOURCE2_OP_GET_PCI_BAR_RANGE = 1, + HYGON_RESOURCE2_OPCODE_MAX_NR, +}; + uint64_t atomic64_exchange(volatile uint64_t *dst, uint64_t val) { #if 0 @@ -95,6 +106,30 @@ int psp_mutex_unlock(struct psp_mutex *mutex) return 0; } +void psp_worker_register_notify(work_func_t notify) +{ + psp_worker_notify = notify; +} + +static void psp_worker_handler(struct work_struct *unused) +{ + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return; + + if (psp_worker_notify) { + psp_worker_notify(unused); + psp_worker_notify = NULL; + psp_in_nowait_mode = false; + } + + if (mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); +} + static int mmap_psp(struct file *filp, struct vm_area_struct *vma) { unsigned long page; @@ -274,6 +309,63 @@ static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) return ret; } +static resource_size_t get_master_psp_bar_size(void) +{ + struct psp_device *psp = psp_master; + struct pci_dev *pdev = to_pci_dev(psp->dev); + + return pci_resource_len(pdev, 2); +} + +static long ioctl_psp_resource2(struct file *file, unsigned int ioctl, unsigned long arg) +{ + unsigned int opcode = 0; + resource_size_t bar_size = 0; + int ret = -EFAULT; + + if (_IOC_TYPE(ioctl) != HYGON_RESOURCE2_IOC_TYPE) { + pr_err("%s: invalid ioctl type: 0x%x\n", __func__, _IOC_TYPE(ioctl)); + return -EINVAL; + } + + opcode = _IOC_NR(ioctl); + switch (opcode) { + case HYGON_RESOURCE2_OP_GET_PCI_BAR_RANGE: + bar_size = get_master_psp_bar_size(); + + if (copy_to_user((void __user *)arg, &bar_size, + sizeof(unsigned long))) + return -EFAULT; + ret = 0; + break; + + default: + pr_err("%s: invalid ioctl number: %d\n", __func__, opcode); + return -EINVAL; + } + return ret; +} + +static int mmap_psp_resource2(struct file *filp, struct vm_area_struct *vma) +{ + struct psp_device *psp = psp_master; + struct pci_dev *pdev = to_pci_dev(psp->dev); + int bar = 2; + + vma->vm_page_prot = pgprot_device(vma->vm_page_prot); + vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT); + + return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + +static const struct file_operations psp_source2_fops = { + .owner = THIS_MODULE, + .mmap = mmap_psp_resource2, + .unlocked_ioctl = ioctl_psp_resource2, +}; + static const struct file_operations psp_fops = { .owner = THIS_MODULE, .mmap = mmap_psp, @@ -293,16 +385,23 @@ int hygon_psp_additional_setup(struct sp_device *sp) if (!psp_misc) { struct miscdevice *misc; - vpsp_wq = create_singlethread_workqueue("vpsp_workqueue"); - if (!vpsp_wq) + psp_wq = create_singlethread_workqueue("psp_workqueue"); + if (!psp_wq) return -ENOMEM; - INIT_WORK(&vpsp_work, vpsp_worker_handler); + INIT_WORK(&psp_work, psp_worker_handler); vpsp_cmd_ctx_slab = kmem_cache_create("vpsp_cmd_ctx", sizeof(struct vpsp_cmd_ctx), 0, SLAB_HWCACHE_ALIGN, NULL); if (!vpsp_cmd_ctx_slab) { - destroy_workqueue(vpsp_wq); + destroy_workqueue(psp_wq); + return -ENOMEM; + } + + psp_grb_cmdbuf = kmalloc(sizeof(*psp_grb_cmdbuf), GFP_KERNEL); + if (!psp_grb_cmdbuf) { + kmem_cache_destroy(vpsp_cmd_ctx_slab); + destroy_workqueue(psp_wq); return -ENOMEM; } @@ -320,7 +419,7 @@ int hygon_psp_additional_setup(struct sp_device *sp) psp_mutex_init(&psp_misc->data_pg_aligned->mb_mutex); *(uint32_t *)((void *)psp_misc->data_pg_aligned + 8) = 0xdeadbeef; - misc = &psp_misc->misc; + misc = &psp_misc->dev_misc; misc->minor = MISC_DYNAMIC_MINOR; misc->name = "hygon_psp_config"; misc->fops = &psp_fops; @@ -328,6 +427,17 @@ int hygon_psp_additional_setup(struct sp_device *sp) ret = misc_register(misc); if (ret) return ret; + + misc = &psp_misc->resource2_misc; + misc->minor = MISC_DYNAMIC_MINOR; + misc->name = "hygon_psp_resource2"; + misc->fops = &psp_source2_fops; + + ret = misc_register(misc); + if (ret) + return ret; + + psp_ringbuffer_queue_init(vpsp_ring_buffer); kref_init(&psp_misc->refcount); hygon_psp_hooks.psp_misc = psp_misc; } else { @@ -341,14 +451,17 @@ void hygon_psp_exit(struct kref *ref) { struct psp_misc_dev *misc_dev = container_of(ref, struct psp_misc_dev, refcount); - misc_deregister(&misc_dev->misc); + misc_deregister(&misc_dev->dev_misc); + misc_deregister(&misc_dev->resource2_misc); ClearPageReserved(virt_to_page(misc_dev->data_pg_aligned)); free_page((unsigned long)misc_dev->data_pg_aligned); psp_misc = NULL; hygon_psp_hooks.psp_misc = NULL; kmem_cache_destroy(vpsp_cmd_ctx_slab); - flush_workqueue(vpsp_wq); - destroy_workqueue(vpsp_wq); + flush_workqueue(psp_wq); + destroy_workqueue(psp_wq); + kfree(psp_grb_cmdbuf); + psp_ringbuffer_queue_free(vpsp_ring_buffer); } int fixup_hygon_psp_caps(struct psp_device *psp) @@ -377,7 +490,21 @@ static int psp_wait_cmd_ioc(struct psp_device *psp, return 0; } -static int __psp_do_cmd_locked(int cmd, void *data, int *psp_ret) +static int psp_wait_cmd_ioc_ringbuffer(struct psp_device *psp, + unsigned int *reg, unsigned int timeout) +{ + int ret; + + ret = wait_event_timeout(psp_int_queue, + psp_int_rcvd, timeout * HZ); + if (!ret) + return -ETIMEDOUT; + + *reg = ioread32(psp->io_regs + psp->vdata->sev->cmdbuff_addr_lo_reg); + return 0; +} + +int psp_do_cmd_locked(int cmd, void *data, int *psp_ret, uint32_t op) { struct psp_device *psp = psp_master; unsigned int phys_lsb, phys_msb; @@ -389,9 +516,25 @@ static int __psp_do_cmd_locked(int cmd, void *data, int *psp_ret) if (*hygon_psp_hooks.psp_dead) return -EBUSY; - /* Get the physical address of the command buffer */ - phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; - phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; + if (op & PSP_DO_CMD_OP_NOWAIT) { + if (psp_worker_notify) + psp_in_nowait_mode = true; + else { + dev_err(psp->dev, "psp_worker_notify not registered in nowait mode\n"); + return -EINVAL; + } + } else { + psp_in_nowait_mode = false; + } + + if (op & PSP_DO_CMD_OP_PHYADDR) { + phys_lsb = data ? lower_32_bits((phys_addr_t)data) : 0; + phys_msb = data ? upper_32_bits((phys_addr_t)data) : 0; + } else { + /* Get the physical address of the command buffer */ + phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; + phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; + } dev_dbg(psp->dev, "psp command id %#x buffer 0x%08x%08x timeout %us\n", cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_cmd_timeout); @@ -404,27 +547,28 @@ static int __psp_do_cmd_locked(int cmd, void *data, int *psp_ret) reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; iowrite32(reg, psp->io_regs + psp->vdata->sev->cmdresp_reg); - /* wait for command completion */ - ret = psp_wait_cmd_ioc(psp, ®, *hygon_psp_hooks.psp_cmd_timeout); - if (ret) { - if (psp_ret) - *psp_ret = 0; + if (!(op & PSP_DO_CMD_OP_NOWAIT)) { + /* wait for command completion */ + ret = psp_wait_cmd_ioc(psp, ®, *hygon_psp_hooks.psp_cmd_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; - dev_err(psp->dev, "psp command %#x timed out, disabling PSP\n", cmd); - *hygon_psp_hooks.psp_dead = true; + dev_err(psp->dev, "psp command %#x timed out, disabling PSP\n", cmd); + *hygon_psp_hooks.psp_dead = true; - return ret; - } + return ret; + } - if (psp_ret) - *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); + if (psp_ret) + *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); - if (FIELD_GET(PSP_CMDRESP_STS, reg)) { - dev_dbg(psp->dev, "psp command %#x failed (%#010lx)\n", - cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); - ret = -EIO; + if (FIELD_GET(PSP_CMDRESP_STS, reg)) { + dev_dbg(psp->dev, "psp command %#x failed (%#010lx)\n", + cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); + ret = -EIO; + } } - return ret; } @@ -443,7 +587,7 @@ int psp_do_cmd(int cmd, void *data, int *psp_ret) } else { mutex_lock(hygon_psp_hooks.sev_cmd_mutex); } - rc = __psp_do_cmd_locked(cmd, data, psp_ret); + rc = psp_do_cmd_locked(cmd, data, psp_ret, 0); if (mutex_enabled) psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); else @@ -453,6 +597,400 @@ int psp_do_cmd(int cmd, void *data, int *psp_ret) } EXPORT_SYMBOL_GPL(psp_do_cmd); +uint8_t psp_legacy_rb_supported; // support legacy ringbuffer +uint8_t psp_rb_oc_supported; // support overcommit +uint8_t psp_generic_rb_supported; // support generic ringbuffer +void psp_ringbuffer_check_support(void) +{ + int ret, error = 0; + static atomic_t rb_checked = ATOMIC_INIT(0); + int rb_check_old = 0; + struct tkm_cmdresp_device_info_get *info = NULL; + + if (atomic_try_cmpxchg(&rb_checked, &rb_check_old, 1)) { + // get buildid to check if the firmware supports ringbuffer mode + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + atomic_set(&rb_checked, 0); + goto end; + } + + info->head.cmdresp_code = TKM_DEVICE_INFO_GET; + info->head.cmdresp_size = sizeof(*info); + info->head.buf_size = sizeof(*info); + ret = psp_do_cmd(TKM_PSP_CMDID, info, &error); + if (ret) { + pr_warn("psp_do_cmd failed ret %d[%#x]\n", ret, error); + atomic_set(&rb_checked, 0); + goto end; + } + + /* check if the firmware supports the ringbuffer mode */ + psp_legacy_rb_supported = PSP_RB_IS_SUPPORTED(info->dev_info.fw_version); + psp_rb_oc_supported = PSP_RB_OC_IS_SUPPORTED(info->dev_info.fw_version); + psp_generic_rb_supported = PSP_GRB_IS_SUPPORTED(info->dev_info.fw_version); + + if (!psp_legacy_rb_supported && !psp_generic_rb_supported) + pr_info("psp ringbuffer not supported\n"); + else + pr_info("psp ringbuffer is supported\n"); + } +end: + kfree(info); +} + + +static DEFINE_MUTEX(psp_rb_mutex); + +/* + * Populate the command from the virtual machine to the queue to + * support execution in ringbuffer mode + */ +uint32_t psp_ringbuffer_enqueue(struct csv_ringbuffer_queue *ringbuffer, + uint32_t cmd, phys_addr_t phy_addr, uint16_t flags) +{ + struct csv_cmdptr_entry cmdptr = { }; + struct csv_statval_entry statval = { }; + uint32_t index = -1; + + if (!psp_legacy_rb_supported && !psp_generic_rb_supported) + return -1; + + cmdptr.cmd_buf_ptr = phy_addr; + cmdptr.cmd_id = cmd; + cmdptr.cmd_flags = flags; + + statval.status = PSP_CMD_STATUS_RUNNING; + + mutex_lock(&psp_rb_mutex); + index = cmd_queue_tail(&ringbuffer->cmd_ptr); + + /** + * If the firmware does not support the overcommit function: + * the firmware may not check the 'status' before executing cmd. + * Therefore, the 'status' must be written before the cmd be enqueued, + * otherwise, X86 may overwrite the result written by the firmware. + * + * If the firmware support the overcommit function: + * The firmware will forcefully check the 'status' + * before executing cmd until the 'status' becomes 0xffff. + * In order to prevent the firmware from getting the cmd to be valid, + * the 'status' must be written after waiting for the cmd to be queued. + */ + if (psp_rb_oc_supported) { + if (enqueue_cmd(&ringbuffer->cmd_ptr, &cmdptr, 1) != 1) { + index = -1; + goto out; + } + enqueue_stat(&ringbuffer->stat_val, &statval, 1); + } else { + if (enqueue_stat(&ringbuffer->stat_val, &statval, 1) != 1) { + index = -1; + goto out; + } + enqueue_cmd(&ringbuffer->cmd_ptr, &cmdptr, 1); + } + +out: + mutex_unlock(&psp_rb_mutex); + return index; +} + +void psp_ringbuffer_dequeue(struct csv_ringbuffer_queue *ringbuffer, + struct csv_cmdptr_entry *cmdptr, + struct csv_statval_entry *statval, uint32_t num) +{ + int i; + uint32_t orig_head, que_size; + + mutex_lock(&psp_rb_mutex); + + orig_head = cmd_queue_head(&ringbuffer->cmd_ptr); + que_size = cmd_queue_size(&ringbuffer->cmd_ptr); + if (que_size < num) + num = que_size; + + if (cmdptr) + dequeue_cmd(&ringbuffer->cmd_ptr, (void *)cmdptr, num); + else + ringbuffer->cmd_ptr.head += num; + + if (statval) + dequeue_stat(&ringbuffer->stat_val, (void *)statval, num); + else + ringbuffer->stat_val.head += num; + + /** + * Ensure that the statval of the dequeued command is 0 + * to prevent it from being accessed by the overcommit + * function of the psp ringbuffer. + */ + for (i = orig_head; i < orig_head + num; ++i) + ringbuffer_set_status(ringbuffer, i, 0); + + mutex_unlock(&psp_rb_mutex); +} + +static int __psp_ringbuffer_queue_init(struct csv_ringbuffer_queue *ring_buffer) +{ + int ret = 0, i; + void *cmd_ptr_buffer = NULL; + void *stat_val_buffer = NULL; + struct csv_cmdptr_entry *cmd; + + memset((void *)ring_buffer, 0, sizeof(struct csv_ringbuffer_queue)); + + cmd_ptr_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); + if (!cmd_ptr_buffer) + return -ENOMEM; + csv_queue_init(&ring_buffer->cmd_ptr, cmd_ptr_buffer, + CSV_RING_BUFFER_SIZE, CSV_RING_BUFFER_ESIZE); + /** + * For high-priority queue: initialize all commands with a valid cmd_id + * to prevent PSP from reading invalid cmd_id. + * + * Low-priority queue never + * attempts to read commands from empty queue. + */ + cmd = (struct csv_cmdptr_entry *)ring_buffer->cmd_ptr.data_align; + for (i = 0; i < CSV_RING_BUFFER_ELEMENT_NUM; ++i) + cmd[i].cmd_id = TKM_PSP_CMDID; + + stat_val_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); + if (!stat_val_buffer) { + ret = -ENOMEM; + goto free_cmdptr; + } + csv_queue_init(&ring_buffer->stat_val, stat_val_buffer, + CSV_RING_BUFFER_SIZE, CSV_RING_BUFFER_ESIZE); + return 0; + +free_cmdptr: + kfree(cmd_ptr_buffer); + + return ret; +} + +int psp_ringbuffer_queue_init(struct csv_ringbuffer_queue *ring_buffer) +{ + int i; + int ret; + + for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ret = __psp_ringbuffer_queue_init(&ring_buffer[i]); + if (ret) + goto free; + } + return 0; +free: + psp_ringbuffer_queue_free(ring_buffer); + return -ENOMEM; +} + +void psp_ringbuffer_queue_free(struct csv_ringbuffer_queue *ring_buffer) +{ + int i; + + for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { + kfree((void *)ring_buffer[i].cmd_ptr.data); + ring_buffer[i].cmd_ptr.data = 0; + kfree((void *)ring_buffer[i].stat_val.data); + ring_buffer[i].stat_val.data = 0; + } +} + +static int __psp_do_generic_ringbuf_cmds_locked(struct csv_ringbuffer_queue *ring_buffer, + int *pspret) +{ + int cmd = PSP_CMD_RING_BUFFER; + struct csv_ringbuffer_queue *que = NULL; + int psp_op = 0; + + if (!psp_grb_cmdbuf) + return -EFAULT; + + que = &ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + psp_grb_cmdbuf->high.mask = que->cmd_ptr.mask; + psp_grb_cmdbuf->high.cmdptr_address = __psp_pa(que->cmd_ptr.data_align); + psp_grb_cmdbuf->high.statval_address = __psp_pa(que->stat_val.data_align); + psp_grb_cmdbuf->high.head = cmd_queue_head(&que->cmd_ptr); + psp_grb_cmdbuf->high.tail = cmd_queue_tail(&que->cmd_ptr); + + que = &ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + psp_grb_cmdbuf->low.mask = que->cmd_ptr.mask; + psp_grb_cmdbuf->low.cmdptr_address = __psp_pa(que->cmd_ptr.data_align); + psp_grb_cmdbuf->low.statval_address = __psp_pa(que->stat_val.data_align); + psp_grb_cmdbuf->low.head = cmd_queue_head(&que->cmd_ptr); + if (psp_rb_oc_supported) + psp_grb_cmdbuf->low.tail = cmd_queue_overcommit_tail(&que->cmd_ptr); + else + psp_grb_cmdbuf->low.tail = cmd_queue_tail(&que->cmd_ptr); + + pr_debug("ringbuffer launch high head %x, tail %x\n", + psp_grb_cmdbuf->high.head, psp_grb_cmdbuf->high.tail); + pr_debug("ringbuffer launch low head %x, tail %x\n", + psp_grb_cmdbuf->low.head, psp_grb_cmdbuf->low.tail); + + if (psp_worker_notify) + psp_op = PSP_DO_CMD_OP_NOWAIT; + + return psp_do_cmd_locked(cmd, psp_grb_cmdbuf, pspret, psp_op); +} + +static int __psp_ringbuffer_enter_locked(struct csv_ringbuffer_queue *ring_buffer, int *error) +{ + int ret; + struct csv_data_ring_buffer *data; + struct csv_ringbuffer_queue *low_queue; + struct csv_ringbuffer_queue *hi_queue; + struct psp_device *psp = psp_master; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + low_queue = &ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + hi_queue = &ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + + data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); + data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); + data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); + data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); + data->queue_lo_size = 1; + data->queue_hi_size = 1; + data->int_on_empty = 1; + + ret = psp_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error, 0); + if (!ret) + iowrite32(0, psp->io_regs + psp->vdata->sev->cmdbuff_addr_hi_reg); + + kfree(data); + return ret; +} + +static int __psp_do_ringbuffer_cmds_locked(struct csv_ringbuffer_queue *ring_buffer, int *psp_ret) +{ + struct psp_device *psp = psp_master; + unsigned int rb_tail, rb_head; + unsigned int reg, rb_ctl, ret = 0; + struct csv_ringbuffer_queue *hi_rb, *low_rb; + + if (!psp || !hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + hi_rb = &ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + low_rb = &ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + + /* update rb tail */ + rb_tail = ioread32(psp->io_regs + psp->vdata->sev->cmdbuff_addr_hi_reg); + rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); + rb_tail |= (cmd_queue_tail(&hi_rb->cmd_ptr) << PSP_RBTAIL_QHI_TAIL_SHIFT); + rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); + if (psp_rb_oc_supported) + rb_tail |= cmd_queue_overcommit_tail(&low_rb->cmd_ptr); + else + rb_tail |= cmd_queue_tail(&low_rb->cmd_ptr); + iowrite32(rb_tail, psp->io_regs + psp->vdata->sev->cmdbuff_addr_hi_reg); + + /* update rb head */ + rb_head = ioread32(psp->io_regs + psp->vdata->sev->cmdbuff_addr_lo_reg); + rb_head &= (~PSP_RBHEAD_QHI_HEAD_MASK); + rb_head |= (cmd_queue_head(&hi_rb->cmd_ptr) << PSP_RBHEAD_QHI_HEAD_SHIFT); + rb_head &= (~PSP_RBHEAD_QLO_HEAD_MASK); + rb_head |= cmd_queue_head(&low_rb->cmd_ptr); + iowrite32(rb_head, psp->io_regs + psp->vdata->sev->cmdbuff_addr_lo_reg); + + pr_debug("ringbuffer launch rb_head %x, rb_tail %x\n", rb_head, rb_tail); + + if (psp_worker_notify) + psp_in_nowait_mode = true; + + /* update rb ctl to trigger psp irq */ + psp_int_rcvd = 0; + /* PSP response to x86 only when all queue is empty or error happends */ + rb_ctl = (PSP_RBCTL_X86_WRITES | PSP_RBCTL_RBMODE_ACT | PSP_RBCTL_CLR_INTSTAT); + iowrite32(rb_ctl, psp->io_regs + psp->vdata->sev->cmdresp_reg); + + if (!psp_in_nowait_mode) { + /* wait for all commands in ring buffer completed */ + ret = psp_wait_cmd_ioc_ringbuffer(psp, ®, *hygon_psp_hooks.psp_cmd_timeout*10); + if (ret) { + if (psp_ret) + *psp_ret = 0; + dev_err(psp->dev, + "psp command in ringbuffer mode timed out, disabling PSP\n"); + *hygon_psp_hooks.psp_dead = true; + return ret; + } + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + ret = -EFAULT; + } + + return ret; +} + +int psp_do_ringbuffer_cmds_locked(struct csv_ringbuffer_queue *ring_buffer, int *psp_ret) +{ + int rc = 0; + + if (psp_generic_rb_supported) { + rc = __psp_do_generic_ringbuf_cmds_locked(ring_buffer, psp_ret); + } else { + rc = __psp_ringbuffer_enter_locked(ring_buffer, psp_ret); + if (rc) + goto end; + + rc = __psp_do_ringbuffer_cmds_locked(ring_buffer, psp_ret); + } +end: + return rc; +} + +int psp_ringbuffer_get_newhead(uint32_t *hi_head, uint32_t *low_head) +{ + struct psp_device *psp = psp_master; + unsigned int reg; + unsigned int rb_head, rb_tail; + unsigned int psp_ret; + int ret = -EIO; + + if (psp_generic_rb_supported) { + reg = ioread32(psp->io_regs + psp->vdata->sev->cmdresp_reg); + psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); + if (psp_ret) { + pr_debug("ringbuffer execve failed (%#010x)\n", psp_ret); + ret = -psp_ret; + goto end; + } + + *hi_head = psp_grb_cmdbuf->high.head; + *low_head = psp_grb_cmdbuf->low.head; + pr_debug("ringbuffer exit hi_head %x, low_head %x\n", *hi_head, *low_head); + } else { + reg = ioread32(psp->io_regs + psp->vdata->sev->cmdbuff_addr_lo_reg); + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) { + pr_debug("ringbuffer execve failed (%#010x)\n", reg); + goto end; + } + + rb_head = reg; + rb_tail = ioread32(psp->io_regs + psp->vdata->sev->cmdbuff_addr_hi_reg); + + *hi_head = (reg & PSP_RBHEAD_QHI_HEAD_MASK) >> PSP_RBHEAD_QHI_HEAD_SHIFT; + *low_head = reg & PSP_RBHEAD_QLO_HEAD_MASK; + pr_debug("ringbuffer exit rb_head %x, rb_tail %x\n", rb_head, rb_tail); + } + + ret = 0; +end: + return ret; +} + #ifdef CONFIG_HYGON_PSP2CPU_CMD static DEFINE_SPINLOCK(p2c_notifier_lock); @@ -521,8 +1059,8 @@ static irqreturn_t psp_irq_handler_hygon(int irq, void *data) /* Check if it is SEV command completion: */ reg = ioread32(psp->io_regs + psp->vdata->sev->cmdresp_reg); if (reg & PSP_CMDRESP_RESP) { - if (vpsp_in_ringbuffer_mode) { - queue_work(vpsp_wq, &vpsp_work); + if (psp_in_nowait_mode) { + queue_work(psp_wq, &psp_work); } else { psp_int_rcvd = 1; wake_up(&psp_int_queue); diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index d87d51f67837b66e75668fdd0a532d3d12675e9d..b76608219b84a41bcf575c5f8c1f01b0679ace56 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -13,7 +13,9 @@ #include #include #include +#include +#include "ring-buffer.h" #include "sp-dev.h" #include "../psp-dev.h" @@ -47,12 +49,60 @@ extern struct hygon_psp_hooks_table { } hygon_psp_hooks; extern struct wait_queue_head psp_int_queue; +extern uint8_t psp_legacy_rb_supported; // support legacy ringbuffer +extern uint8_t psp_rb_oc_supported; // support overcommit +extern uint8_t psp_generic_rb_supported; // support generic ringbuffer #define PSP_MUTEX_TIMEOUT 60000 struct psp_mutex { volatile uint64_t locked; }; +struct tkm_cmdresp_head { + uint32_t buf_size; //including this header + uint32_t cmdresp_size; //including this header + uint32_t cmdresp_code; +} __packed; + +struct tkm_device_info { + uint32_t api_version; + uint32_t fw_version; + uint32_t kek_sm4_total; + uint32_t isk_sm2_sign_total; + uint32_t isk_sm2_enc_total; + uint8_t chip_id[32]; +} __packed; +struct tkm_cmdresp_device_info_get { + struct tkm_cmdresp_head head; + struct tkm_device_info dev_info; +} __packed; + +struct queue_info { + uint32_t head; /* In|Out */ + uint32_t tail; /* In */ + uint32_t mask; /* In */ + uint64_t cmdptr_address; /* In */ + uint64_t statval_address; /* In */ + uint8_t reserved[36]; +} __packed; // total 64 bytes +struct psp_ringbuffer_cmd_buf { + struct queue_info high; + struct queue_info low; + uint8_t reserved[128]; +} __packed; // total 256 bytes + +#define PSP_RB_IS_SUPPORTED(buildid) (buildid >= 1913 && boot_cpu_has(X86_FEATURE_SEV)) +#define PSP_RB_OC_IS_SUPPORTED(buildid) (buildid >= 2167) +#define PSP_GRB_IS_SUPPORTED(buildid) (buildid >= 2270) +#define PSP_CMD_STATUS_RUNNING 0xffff +#define PSP_RB_OVERCOMMIT_SIZE 1024 +#define TKM_DEVICE_INFO_GET 0x1001 + +#define PSP_CMD_RING_BUFFER 0x304 +#define PSP_DO_CMD_OP_PHYADDR BIT(0) // Input data as physical address +#define PSP_DO_CMD_OP_NOWAIT BIT(1) // No need to wait ioc +int psp_do_cmd_locked(int cmd, void *data, int *psp_ret, uint32_t op); + struct psp_dev_data { struct psp_mutex mb_mutex; }; @@ -60,7 +110,8 @@ struct psp_dev_data { struct psp_misc_dev { struct kref refcount; struct psp_dev_data *data_pg_aligned; - struct miscdevice misc; + struct miscdevice dev_misc; + struct miscdevice resource2_misc; }; extern int psp_mutex_trylock(struct psp_mutex *mutex); @@ -72,5 +123,31 @@ int psp_mutex_unlock(struct psp_mutex *mutex); int fixup_hygon_psp_caps(struct psp_device *psp); int sp_request_hygon_psp_irq(struct sp_device *sp, irq_handler_t handler, const char *name, void *data); +/** + * When PSP_DO_CMD_OP_NOWAIT is used with psp_do_cmd_locked, + * psp_worker_register_notify must be called first to register async notify + * for PSP worker bottom-half execution. Note: triggering worker bottom-half + * always clears previous notify. + */ +void psp_worker_register_notify(work_func_t notify); + +/** + * psp generic ringbuffer implement. + **/ +uint32_t psp_ringbuffer_enqueue(struct csv_ringbuffer_queue *ringbuffer, + uint32_t cmd, phys_addr_t phy_addr, uint16_t flags); +void psp_ringbuffer_dequeue(struct csv_ringbuffer_queue *ringbuffer, + struct csv_cmdptr_entry *cmdptr, struct csv_statval_entry *statval, + uint32_t num); +int psp_ringbuffer_queue_init(struct csv_ringbuffer_queue *ring_buffer); +void psp_ringbuffer_queue_free(struct csv_ringbuffer_queue *ring_buffer); +int psp_ringbuffer_get_newhead(uint32_t *hi_head, uint32_t *low_head); +void psp_ringbuffer_check_support(void); + +/** + * psp_do_ringbuffer_cmds_locked is a no-wait PSP operation. + * Must register notify via psp_worker_register_notify before use. + */ +int psp_do_ringbuffer_cmds_locked(struct csv_ringbuffer_queue *ring_buffer, int *psp_ret); #endif /* __CCP_HYGON_PSP_DEV_H__ */ diff --git a/drivers/crypto/ccp/hygon/ring-buffer.c b/drivers/crypto/ccp/hygon/ring-buffer.c index 0c9ea0217b2ea1a39e2b02030af0e293e8f13790..7476902f69012daed996ed59bf1986fb182fa6db 100644 --- a/drivers/crypto/ccp/hygon/ring-buffer.c +++ b/drivers/crypto/ccp/hygon/ring-buffer.c @@ -102,7 +102,21 @@ void csv_queue_cleanup(struct csv_queue *queue) memset((void *)queue, 0, sizeof(struct csv_queue)); } -unsigned int csv_enqueue_cmd(struct csv_queue *queue, +unsigned int enqueue_stat(struct csv_queue *ring_buf, + const void *buf, unsigned int len) +{ + unsigned int size; + + size = queue_avail_size(ring_buf); + if (len > size) + len = size; + + enqueue_data(ring_buf, buf, len, ring_buf->tail); + ring_buf->tail += len; + return len; +} + +unsigned int enqueue_cmd(struct csv_queue *queue, const void *buf, unsigned int len) { unsigned int size; @@ -116,7 +130,7 @@ unsigned int csv_enqueue_cmd(struct csv_queue *queue, return len; } -unsigned int csv_dequeue_stat(struct csv_queue *queue, +unsigned int dequeue_stat(struct csv_queue *queue, void *buf, unsigned int len) { unsigned int size; @@ -130,7 +144,7 @@ unsigned int csv_dequeue_stat(struct csv_queue *queue, return len; } -unsigned int csv_dequeue_cmd(struct csv_queue *ring_buf, +unsigned int dequeue_cmd(struct csv_queue *ring_buf, void *buf, unsigned int len) { unsigned int size; @@ -144,10 +158,47 @@ unsigned int csv_dequeue_cmd(struct csv_queue *ring_buf, return len; } -unsigned int csv_cmd_queue_size(struct csv_queue *ring_buf) +unsigned int cmd_queue_size(struct csv_queue *ring_buf) { unsigned int free_size; free_size = queue_avail_size(ring_buf); return ring_buf->mask - free_size; } + +unsigned int cmd_queue_tail(struct csv_queue *ring_buf) +{ + return ring_buf->tail & ring_buf->mask; +} + +unsigned int cmd_queue_overcommit_tail(struct csv_queue *ring_buf) +{ + unsigned int que_size = cmd_queue_size(ring_buf); + + if (que_size >= PSP_RING_BUFFER_OVERCOMMIT_SIZE || que_size == 0) + return cmd_queue_tail(ring_buf); + return (ring_buf->head + PSP_RING_BUFFER_OVERCOMMIT_SIZE) & ring_buf->mask; +} + +unsigned int cmd_queue_head(struct csv_queue *ring_buf) +{ + return ring_buf->head & ring_buf->mask; +} + +void ringbuffer_set_status(struct csv_ringbuffer_queue *ringbuffer, + unsigned int index, unsigned int status) +{ + struct csv_queue *queue = &ringbuffer->stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)queue->data; + + statval[index & queue->mask].status = status; +} + +unsigned int ringbuffer_get_status(struct csv_ringbuffer_queue *ringbuffer, unsigned int index) +{ + struct csv_queue *ringbuf = &ringbuffer->stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; + + return statval[index & ringbuf->mask].status; + +} diff --git a/drivers/crypto/ccp/hygon/ring-buffer.h b/drivers/crypto/ccp/hygon/ring-buffer.h index bf97aa6df36a2e66b4e306dc1bf253b73a07ce70..8545fc13e281f20117995182a1d51758f3c2a11c 100644 --- a/drivers/crypto/ccp/hygon/ring-buffer.h +++ b/drivers/crypto/ccp/hygon/ring-buffer.h @@ -15,13 +15,26 @@ int csv_queue_init(struct csv_queue *queue, void *buffer, unsigned int size, size_t esize); void csv_queue_cleanup(struct csv_queue *queue); -unsigned int csv_enqueue_cmd(struct csv_queue *queue, +unsigned int enqueue_cmd(struct csv_queue *queue, const void *buf, unsigned int len); -unsigned int csv_dequeue_stat(struct csv_queue *queue, +unsigned int dequeue_stat(struct csv_queue *queue, void *buf, unsigned int len); -unsigned int csv_dequeue_cmd(struct csv_queue *ring_buf, +unsigned int dequeue_cmd(struct csv_queue *ring_buf, void *buf, unsigned int len); -unsigned int csv_cmd_queue_size(struct csv_queue *ring_buf); +unsigned int cmd_queue_size(struct csv_queue *ring_buf); + +unsigned int enqueue_stat(struct csv_queue *ring_buf, + const void *buf, unsigned int len); +unsigned int cmd_queue_tail(struct csv_queue *ring_buf); + +unsigned int cmd_queue_overcommit_tail(struct csv_queue *ring_buf); + +unsigned int cmd_queue_head(struct csv_queue *ring_buf); + +void ringbuffer_set_status(struct csv_ringbuffer_queue *ringbuffer, + unsigned int index, unsigned int status); + +unsigned int ringbuffer_get_status(struct csv_ringbuffer_queue *ringbuffer, unsigned int index); #endif /* __CCP_HYGON_RINGBUF_H__ */ diff --git a/drivers/crypto/ccp/hygon/vpsp.c b/drivers/crypto/ccp/hygon/vpsp.c index 4fd989e9c55d440ded9fd9780950ef67e20756d7..8d2b2b3bcd8b562af7586ea63617009c823cbe3d 100644 --- a/drivers/crypto/ccp/hygon/vpsp.c +++ b/drivers/crypto/ccp/hygon/vpsp.c @@ -20,7 +20,6 @@ #include #include -#include "ring-buffer.h" #include "psp-dev.h" #include "csv-dev.h" #include "vpsp.h" @@ -63,7 +62,6 @@ */ DEFINE_HASHTABLE(vpsp_cmd_ctx_table, 11); DEFINE_RWLOCK(table_rwlock); -bool vpsp_in_ringbuffer_mode; static struct vpsp_cmd_ctx *vpsp_cmd_ctx_array[CSV_COMMAND_PRIORITY_NUM] [CSV_RING_BUFFER_SIZE / CSV_RING_BUFFER_ESIZE]; @@ -132,7 +130,7 @@ static struct vpsp_cmd_ctx *vpsp_cmd_ctx_create(gpa_t key1, pid_t key2) * the initial value must be greater than 0. */ refcount_set(&cmd_ctx->ref, 1); - cmd_ctx->statval = VPSP_CMD_STATUS_RUNNING; + cmd_ctx->statval = PSP_CMD_STATUS_RUNNING; cmd_ctx->key1 = key1; cmd_ctx->key2 = key2; vpsp_hashtable_add_cmd_ctx(cmd_ctx); @@ -148,7 +146,7 @@ static struct vpsp_cmd_ctx *vpsp_cmd_ctx_create(gpa_t key1, pid_t key2) * only additional to perform decrement refcount. * * Actual memory release occurs when the refcount drops to 0, - * which may happen during the vpsp_worker_handler or + * which may happen during the psp_worker_handler or * vpsp_cmd_ctx_destroy process. * * @cmd_ctx: the vpsp_cmd_ctx object @@ -863,162 +861,35 @@ int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) return ret; } - -static DEFINE_MUTEX(vpsp_rb_mutex); struct csv_ringbuffer_queue vpsp_ring_buffer[CSV_COMMAND_PRIORITY_NUM]; - -static unsigned int vpsp_queue_cmd_size(int prio) -{ - return csv_cmd_queue_size(&vpsp_ring_buffer[prio].cmd_ptr); -} - -static int get_queue_tail(int prio) -{ - struct csv_ringbuffer_queue *ringbuffer = &vpsp_ring_buffer[prio]; - return ringbuffer->cmd_ptr.tail & ringbuffer->cmd_ptr.mask; -} - -static int get_queue_overcommit_tail(int prio) -{ - uint32_t que_size = vpsp_queue_cmd_size(prio); - struct csv_ringbuffer_queue *ringbuffer = &vpsp_ring_buffer[prio]; - - if (que_size >= VPSP_RB_OVERCOMMIT_SIZE || que_size == 0 || !vpsp_rb_oc_supported) - return get_queue_tail(prio); - return (ringbuffer->cmd_ptr.head + VPSP_RB_OVERCOMMIT_SIZE) & ringbuffer->cmd_ptr.mask; -} - -static int get_queue_head(struct csv_ringbuffer_queue *ringbuffer) -{ - return ringbuffer->cmd_ptr.head & ringbuffer->cmd_ptr.mask; -} - -static void vpsp_set_cmd_status(int prio, int index, int status) -{ - struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; - struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; - - statval[index].status = status; -} - -static int vpsp_get_cmd_status(int prio, int index) -{ - struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; - struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; - - return statval[index].status; -} - -static int vpsp_dequeue_and_notify(int prio, struct csv_cmdptr_entry *cmd_ptr) -{ - struct vpsp_cmd_ctx *ctx = NULL; - int mask = vpsp_ring_buffer[prio].cmd_ptr.mask; - int index = vpsp_ring_buffer[prio].cmd_ptr.head & mask; - - ctx = vpsp_cmd_ctx_array[prio][index]; - if (ctx) { - /** - * Write the result back to the cmd ctx, - * after which we can safely perform - * the ringbuffer dequeue operation without - * waiting for the Guest to retrieve the result. - */ - ctx->statval = vpsp_get_cmd_status(prio, index); - vpsp_cmd_ctx_obj_put(ctx, false); - } - - /* The status update must be before the head update */ - vpsp_set_cmd_status(prio, index, 0); - - mutex_lock(&vpsp_rb_mutex); - csv_dequeue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, (void *)cmd_ptr, 1); - mutex_unlock(&vpsp_rb_mutex); - return 0; -} - -/** - * Ensure that the 'status' field of cmd statval - * in the range from tail to overcommit tail in the queue is 0. - */ -static void vpsp_queue_overcommit_entry_inactive(int prio) -{ - int tail = 0, overcommit_tail = 0, i = 0; - - mutex_lock(&vpsp_rb_mutex); - - tail = get_queue_tail(prio); - overcommit_tail = get_queue_overcommit_tail(prio); - for (i = tail; i < overcommit_tail; ++i) - vpsp_set_cmd_status(prio, i, 0); - - mutex_unlock(&vpsp_rb_mutex); -} - -/* - * Populate the command from the virtual machine to the queue to - * support execution in ringbuffer mode - */ -static int vpsp_fill_cmd_queue(int prio, int cmd, phys_addr_t phy_addr, uint16_t flags) -{ - struct csv_cmdptr_entry cmdptr = { }; - int index = -1; - - cmdptr.cmd_buf_ptr = phy_addr; - cmdptr.cmd_id = cmd; - cmdptr.cmd_flags = flags; - - mutex_lock(&vpsp_rb_mutex); - index = get_queue_tail(prio); - - /** - * If the firmware does not support the overcommit function: - * the firmware may not check the 'status' before executing cmd. - * Therefore, the 'status' must be written before the cmd be enqueued, - * otherwise, X86 may overwrite the result written by the firmware. - * - * If the firmware support the overcommit function: - * The firmware will forcefully check the 'status' - * before executing cmd until the 'status' becomes 0xffff. - * In order to prevent the firmware from getting the cmd to be valid, - * the 'status' must be written after waiting for the cmd to be queued. - */ - if (vpsp_rb_oc_supported) { - if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { - vpsp_set_cmd_status(prio, index, 0); - index = -1; - goto out; - } - vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); - } else { - vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); - if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { - vpsp_set_cmd_status(prio, index, 0); - index = -1; - goto out; - } - } - -out: - mutex_unlock(&vpsp_rb_mutex); - return index; -} - static void vpsp_ring_update_head(int prio, uint32_t new_head) { struct csv_ringbuffer_queue *ring_buffer = &vpsp_ring_buffer[prio]; - uint32_t orig_head = get_queue_head(ring_buffer); - struct csv_cmdptr_entry entry; - uint32_t comple_num = 0; - int i; + uint32_t orig_head = cmd_queue_head(&ring_buffer->cmd_ptr); + uint32_t comple_num = 0, index = orig_head; + struct vpsp_cmd_ctx *ctx = NULL; + int i, mask = ring_buffer->cmd_ptr.mask; if (new_head >= orig_head) comple_num = new_head - orig_head; else - comple_num = ring_buffer->cmd_ptr.mask - (orig_head - new_head) - + 1; - - for (i = 0; i < comple_num; ++i) - vpsp_dequeue_and_notify(prio, &entry); + comple_num = mask - (orig_head - new_head) + 1; + + for (i = 0; i < comple_num; ++i) { + index = (orig_head + i) & mask; + ctx = vpsp_cmd_ctx_array[prio][index]; + if (ctx) { + /** + * Write the result back to the cmd ctx, + * after which we can safely perform + * the ringbuffer dequeue operation without + * waiting for the Guest to retrieve the result. + */ + ctx->statval = ringbuffer_get_status(ring_buffer, index); + vpsp_cmd_ctx_obj_put(ctx, false); + } + } + psp_ringbuffer_dequeue(ring_buffer, NULL, NULL, comple_num); } static int vpsp_psp_mutex_trylock(void) @@ -1049,217 +920,21 @@ static int vpsp_psp_mutex_unlock(void) return 0; } -static int __vpsp_ring_buffer_enter_locked(int *error) -{ - int ret; - struct csv_data_ring_buffer *data; - struct csv_ringbuffer_queue *low_queue; - struct csv_ringbuffer_queue *hi_queue; - struct sev_device *sev = psp_master->sev_data; - - if (!hygon_psp_hooks.sev_dev_hooks_installed) - return -ENODEV; - - if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) - return -EEXIST; - - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - low_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]; - hi_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; - - data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); - data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); - data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); - data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); - data->queue_lo_size = 1; - data->queue_hi_size = 1; - data->int_on_empty = 1; - - ret = hygon_psp_hooks.__sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); - if (!ret) { - iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); - csv_comm_mode = CSV_COMM_RINGBUFFER_ON; - } - - kfree(data); - return ret; -} - -void vpsp_worker_handler(struct work_struct *unused) +static void vpsp_ringbuffer_wakeup_locked(struct work_struct *unused) { struct sev_user_data_status data; - struct sev_device *sev = psp_master->sev_data; - unsigned int reg; - unsigned int rb_head, rb_tail; - - reg = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); - /* cmd error happends */ - if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) - goto end; + unsigned int hi_head = 0, low_head = 0; - rb_head = reg; - rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + psp_ringbuffer_get_newhead(&hi_head, &low_head); - pr_debug("ringbuffer exit rb_head %x, rb_tail %x\n", rb_head, rb_tail); /* update head */ - vpsp_ring_update_head(CSV_COMMAND_PRIORITY_HIGH, - (reg & PSP_RBHEAD_QHI_HEAD_MASK) >> PSP_RBHEAD_QHI_HEAD_SHIFT); - vpsp_ring_update_head(CSV_COMMAND_PRIORITY_LOW, - reg & PSP_RBHEAD_QLO_HEAD_MASK); - -end: - /** - * Before send new mailbox command, set vpsp_in_ringbuffer_mode - * to false to avoid nested triggering of the workqueue. - */ - vpsp_in_ringbuffer_mode = false; - - /* exit ringbuf mode by send CMD in mailbox mode */ - hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, - &data, NULL); - csv_comm_mode = CSV_COMM_MAILBOX_ON; - vpsp_psp_mutex_unlock(); -} - -static int __vpsp_do_ringbuf_cmds_locked(void) -{ - struct psp_device *psp = psp_master; - unsigned int rb_tail, rb_head; - unsigned int rb_ctl; - struct sev_device *sev; - struct csv_queue *queue; - struct csv_cmdptr_entry *first_cmd; - - if (!psp || !hygon_psp_hooks.sev_dev_hooks_installed) - return -ENODEV; - - if (*hygon_psp_hooks.psp_dead) - return -EBUSY; - - sev = psp->sev_data; - - /* update rb tail */ - vpsp_queue_overcommit_entry_inactive(CSV_COMMAND_PRIORITY_LOW); - rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); - rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); - rb_tail |= (get_queue_tail(CSV_COMMAND_PRIORITY_HIGH) - << PSP_RBTAIL_QHI_TAIL_SHIFT); - rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); - rb_tail |= get_queue_overcommit_tail(CSV_COMMAND_PRIORITY_LOW); - iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); - - /* update rb head */ - rb_head = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); - rb_head &= (~PSP_RBHEAD_QHI_HEAD_MASK); - rb_head |= (get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) - << PSP_RBHEAD_QHI_HEAD_SHIFT); - rb_head &= (~PSP_RBHEAD_QLO_HEAD_MASK); - rb_head |= get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); - iowrite32(rb_head, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); - - /** - * In some PSP firmware, even if the high priority queue is empty, - * it will still try to read the element at the head of the queue and try to process it. - * When the element at the head of the queue happens to be an illegal cmd id, - * PSP returns the PSP_RBHEAD_QPAUSE_INT_STAT error. - * - * Therefore, now we need to manually set the head element of the queue to - * the default tkm cmd id before sending the ringbuffer each time when - * the high priority queue is empty. - * - * The low priority queue has no such bug, and future PSP firmware should fix it. - */ - if (vpsp_queue_cmd_size(CSV_COMMAND_PRIORITY_HIGH) == 0) { - queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH].cmd_ptr; - first_cmd = (struct csv_cmdptr_entry *)queue->data_align; - first_cmd[queue->head & queue->mask].cmd_id = TKM_PSP_CMDID; - } - - pr_debug("ringbuffer launch rb_head %x, rb_tail %x\n", rb_head, rb_tail); - - /* update rb ctl to trigger psp irq */ - sev->int_rcvd = 0; - /* PSP response to x86 only when all queue is empty or error happends */ - rb_ctl = (PSP_RBCTL_X86_WRITES | PSP_RBCTL_RBMODE_ACT | PSP_RBCTL_CLR_INTSTAT); - iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); - - vpsp_in_ringbuffer_mode = true; - return 0; -} - -static int vpsp_do_ringbuf_cmds_locked(int *psp_ret) -{ - int rc; - - if (!hygon_psp_hooks.sev_dev_hooks_installed) - return -ENODEV; - - rc = __vpsp_ring_buffer_enter_locked(psp_ret); - if (rc) - goto end; - - rc = __vpsp_do_ringbuf_cmds_locked(); -end: - return rc; -} - -static int __vpsp_do_cmd_locked(int cmd, phys_addr_t phy_addr, int *psp_ret) -{ - struct psp_device *psp = psp_master; - struct sev_device *sev; - unsigned int phys_lsb, phys_msb; - unsigned int reg, ret = 0; - - if (!psp || !psp->sev_data || !hygon_psp_hooks.sev_dev_hooks_installed) - return -ENODEV; - - if (*hygon_psp_hooks.psp_dead) - return -EBUSY; - - sev = psp->sev_data; - - /* Get the physical address of the command buffer */ - phys_lsb = phy_addr ? lower_32_bits(phy_addr) : 0; - phys_msb = phy_addr ? upper_32_bits(phy_addr) : 0; - - dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", - cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout); - - iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); - iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); - - sev->int_rcvd = 0; - - reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; - iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); - - /* wait for command completion */ - ret = hygon_psp_hooks.sev_wait_cmd_ioc(sev, ®, *hygon_psp_hooks.psp_timeout); - if (ret) { - if (psp_ret) - *psp_ret = 0; + vpsp_ring_update_head(CSV_COMMAND_PRIORITY_HIGH, hi_head); + vpsp_ring_update_head(CSV_COMMAND_PRIORITY_LOW, low_head); - dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); - *hygon_psp_hooks.psp_dead = true; - - return ret; - } - - *hygon_psp_hooks.psp_timeout = *hygon_psp_hooks.psp_cmd_timeout; - - if (psp_ret) - *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); - - if (FIELD_GET(PSP_CMDRESP_STS, reg)) { - dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", - cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); - ret = -EIO; + if (!psp_generic_rb_supported) { + /* exit ringbuf mode by send CMD in mailbox mode */ + psp_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, NULL, 0); } - - return ret; } int vpsp_do_cmd(int cmd, phys_addr_t phy_addr, int *psp_ret) @@ -1279,7 +954,7 @@ int vpsp_do_cmd(int cmd, phys_addr_t phy_addr, int *psp_ret) mutex_lock(hygon_psp_hooks.sev_cmd_mutex); } - rc = __vpsp_do_cmd_locked(cmd, phy_addr, psp_ret); + rc = psp_do_cmd_locked(cmd, (void *)phy_addr, psp_ret, PSP_DO_CMD_OP_PHYADDR); if (mutex_enabled) psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); @@ -1297,7 +972,7 @@ int vpsp_try_get_result(struct vpsp_cmd_ctx *cmd_ctx, struct vpsp_ret *psp_ret) { int ret = 0; uint8_t prio = cmd_ctx->rb_prio; - uint16_t statval = VPSP_CMD_STATUS_RUNNING; + uint16_t statval = PSP_CMD_STATUS_RUNNING; uint32_t index = cmd_ctx->rb_index; phys_addr_t phy_addr = cmd_ctx->psp_cmdbuf_paddr; struct csv_cmdptr_entry cmd = {0}; @@ -1307,11 +982,11 @@ int vpsp_try_get_result(struct vpsp_cmd_ctx *cmd_ctx, struct vpsp_ret *psp_ret) /* Get the retult directly if the command has been executed */ if (index >= 0) { - if (cmd_ctx->statval != VPSP_CMD_STATUS_RUNNING) + if (cmd_ctx->statval != PSP_CMD_STATUS_RUNNING) statval = cmd_ctx->statval; else - statval = vpsp_get_cmd_status(prio, index); - if (statval != VPSP_CMD_STATUS_RUNNING) { + statval = ringbuffer_get_status(&vpsp_ring_buffer[prio], index); + if (statval != PSP_CMD_STATUS_RUNNING) { psp_ret->pret = statval; psp_ret->status = VPSP_FINISH; return 0; @@ -1320,11 +995,11 @@ int vpsp_try_get_result(struct vpsp_cmd_ctx *cmd_ctx, struct vpsp_ret *psp_ret) if (vpsp_psp_mutex_trylock()) { /* Use mailbox mode to execute a command if there is only one command */ - if (vpsp_queue_cmd_size(prio) == 1) { + if (cmd_queue_size(&vpsp_ring_buffer[prio].cmd_ptr) == 1) { /* dequeue command from queue*/ - vpsp_dequeue_and_notify(prio, &cmd); - - ret = __vpsp_do_cmd_locked(cmd.cmd_id, phy_addr, (int *)psp_ret); + psp_ringbuffer_dequeue(&vpsp_ring_buffer[prio], &cmd, NULL, 1); + ret = psp_do_cmd_locked(cmd.cmd_id, (void *)phy_addr, + (int *)psp_ret, PSP_DO_CMD_OP_PHYADDR); psp_ret->status = VPSP_FINISH; vpsp_psp_mutex_unlock(); if (unlikely(ret)) { @@ -1338,9 +1013,10 @@ int vpsp_try_get_result(struct vpsp_cmd_ctx *cmd_ctx, struct vpsp_ret *psp_ret) } } } else { - ret = vpsp_do_ringbuf_cmds_locked((int *)psp_ret); + psp_worker_register_notify(vpsp_ringbuffer_wakeup_locked); + ret = psp_do_ringbuffer_cmds_locked(vpsp_ring_buffer, (int *)psp_ret); if (unlikely(ret)) { - pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed %d\n", + pr_err("[%s]: psp ringbuf execute failed %d\n", __func__, ret); psp_ret->status = VPSP_FINISH; vpsp_psp_mutex_unlock(); @@ -1377,11 +1053,11 @@ int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, return -ENODEV; /* ringbuffer mode check and parse command prio*/ - rb_supported = vpsp_rb_check_and_cmd_prio_parse(&prio, + rb_supported = vpsp_parse_ringbuffer_cmd_prio(&prio, (struct vpsp_cmd *)&cmd); if (rb_supported) { /* fill command in ringbuffer's queue and get index */ - index = vpsp_fill_cmd_queue(prio, cmd, phy_addr, 0); + index = psp_ringbuffer_enqueue(&vpsp_ring_buffer[prio], cmd, phy_addr, 0); if (unlikely(index < 0)) { /* do mailbox command if queuing failed*/ ret = vpsp_do_cmd(cmd, phy_addr, (int *)psp_ret); @@ -1430,3 +1106,25 @@ int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, end: return ret; } + +/* + * parse commands from the virtual machine + */ +int vpsp_parse_ringbuffer_cmd_prio(uint8_t *prio, + struct vpsp_cmd *vcmd) +{ + int rb_supported; + + psp_ringbuffer_check_support(); + rb_supported = (psp_legacy_rb_supported || psp_generic_rb_supported); + + /* parse prio by vcmd */ + if (rb_supported && vcmd->is_high_rb) + *prio = CSV_COMMAND_PRIORITY_HIGH; + else + *prio = CSV_COMMAND_PRIORITY_LOW; + /* clear rb level bit in vcmd */ + vcmd->is_high_rb = 0; + + return rb_supported; +} diff --git a/drivers/crypto/ccp/hygon/vpsp.h b/drivers/crypto/ccp/hygon/vpsp.h index 5432c12a23a35b86add0ad57a23cec593525e9fc..ccfb68e1d523720ac05bc97b6c609a071ab5ca95 100644 --- a/drivers/crypto/ccp/hygon/vpsp.h +++ b/drivers/crypto/ccp/hygon/vpsp.h @@ -128,25 +128,11 @@ struct vpsp_dev_ctrl { } __packed data; }; -/* definition of variables used by virtual psp */ -enum VPSP_RB_CHECK_STATUS { - RB_NOT_CHECK = 0, - RB_CHECKING, - RB_CHECKED, - RB_CHECK_MAX -}; -#define VPSP_RB_IS_SUPPORTED(buildid) (buildid >= 1913) -#define VPSP_RB_OC_IS_SUPPORTED(buildid) (buildid >= 2167) -#define VPSP_CMD_STATUS_RUNNING 0xffff -#define VPSP_RB_OVERCOMMIT_SIZE 1024 - -extern struct csv_ringbuffer_queue vpsp_ring_buffer[CSV_COMMAND_PRIORITY_NUM]; extern struct hygon_psp_hooks_table hygon_psp_hooks; -extern bool vpsp_in_ringbuffer_mode; extern struct kmem_cache *vpsp_cmd_ctx_slab; -extern uint8_t vpsp_rb_oc_supported; +extern int is_hygon_psp; +extern struct csv_ringbuffer_queue vpsp_ring_buffer[CSV_COMMAND_PRIORITY_NUM]; -void vpsp_worker_handler(struct work_struct *unused); int vpsp_try_get_result(struct vpsp_cmd_ctx *cmd_ctx, struct vpsp_ret *psp_ret); int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_cmd_ctx *cmd_ctx, struct vpsp_ret *psp_ret); @@ -156,7 +142,7 @@ void vpsp_cmd_ctx_obj_put(struct vpsp_cmd_ctx *cmd_ctx, bool force); int vpsp_get_dev_ctx(struct vpsp_dev_ctx **ctx, pid_t pid); int vpsp_get_default_vid_permission(void); int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl); -int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, +int vpsp_parse_ringbuffer_cmd_prio(uint8_t *prio, struct vpsp_cmd *vcmd); #endif /* __CCP_HYGON_VPSP_H__ */ diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 1588ffb3dc3e225c2584b25e37e0a1319d362b97..181c046699f2bc6a1ceeaf800b97403a65286216 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -83,6 +83,8 @@ enum csv_comm_state { #define CSV_RING_BUFFER_ALIGN (4 * 1024) #define CSV_RING_BUFFER_LEN (CSV_RING_BUFFER_SIZE + CSV_RING_BUFFER_ALIGN) #define CSV_RING_BUFFER_ESIZE 16 +#define PSP_RING_BUFFER_OVERCOMMIT_SIZE 1024 +#define CSV_RING_BUFFER_ELEMENT_NUM (CSV_RING_BUFFER_SIZE / CSV_RING_BUFFER_ESIZE) /** * struct csv_data_hgsc_cert_import - HGSC_CERT_IMPORT command parameters