diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 583c11664c63b661e208c7e91aed45a430c1d791..67afcf8773cd06cf0dc23d32d0e24accecdce8a9 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -108,6 +108,7 @@ ifdef CONFIG_X86_64 vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o vmlinux-objs-y += $(obj)/pgtable_64.o vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o + vmlinux-objs-$(CONFIG_HYGON_CSV) += $(obj)/csv.o endif vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o diff --git a/arch/x86/boot/compressed/csv.c b/arch/x86/boot/compressed/csv.c new file mode 100644 index 0000000000000000000000000000000000000000..18e0bde5bca220ef62f8bdbcb6ee95f5ddc3eef2 --- /dev/null +++ b/arch/x86/boot/compressed/csv.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon CSV Support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include "misc.h" + +#undef __init +#undef __initdata +#undef __pa +#define __init +#define __initdata +#define __pa(x) ((unsigned long)(x)) + +#include +#include + +/* Include code for early secure calls */ +#include "../../kernel/csv-shared.c" + +static unsigned int csv3_enabled __section(".data"); +static unsigned int csv3_secure_call_init; + +void csv_update_page_attr(unsigned long address, pteval_t set, pteval_t clr) +{ + if (!csv3_enabled) + return; + + if ((set | clr) & _PAGE_ENC) { + if (set & _PAGE_ENC) + csv3_early_secure_call_ident_map(__pa(address), 1, + CSV3_SECURE_CMD_ENC); + + if (clr & _PAGE_ENC) + csv3_early_secure_call_ident_map(__pa(address), 1, + CSV3_SECURE_CMD_DEC); + } +} + +/* Invoke it before jump to real kernel in case secure call pages are not mapped + * in the identity page table. + * + * If no #VC happens, there is no identity mapping in page table for secure call + * pages. And page fault is not supported in the early stage when real kernel is + * running. As a result, CSV3 guest will shutdown when access secure call pages + * by then. + */ +void csv_init_secure_call_pages(void *boot_params) +{ + if (!csv3_enabled || csv3_secure_call_init) + return; + + /* + * boot_params may be not sanitized, but it's OK to access e820_table + * field. + */ + csv3_scan_secure_call_pages(boot_params); + csv3_early_secure_call_ident_map(0, 0, CSV3_SECURE_CMD_RESET); + csv3_secure_call_init = 1; +} + +void csv_set_status(void) +{ + unsigned int eax; + unsigned int ebx; + unsigned int ecx; + unsigned int edx; + + eax = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + + /* HygonGenuine */ + if (ebx == CPUID_VENDOR_HygonGenuine_ebx && + ecx == CPUID_VENDOR_HygonGenuine_ecx && + edx == CPUID_VENDOR_HygonGenuine_edx && + sme_me_mask) { + unsigned long low, high; + + asm volatile("rdmsr\n" : "=a" (low), "=d" (high) : + "c" (MSR_AMD64_SEV)); + + if (low & MSR_CSV3_ENABLED) + csv3_enabled = 1; + } +} diff --git a/arch/x86/boot/compressed/csv.h b/arch/x86/boot/compressed/csv.h new file mode 100644 index 0000000000000000000000000000000000000000..8b8a33551895c2c87281ac993a7deb92e5ead653 --- /dev/null +++ b/arch/x86/boot/compressed/csv.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hygon CSV header for early boot related functions. + * + * Copyright (C) Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef BOOT_COMPRESSED_CSV_H +#define BOOT_COMPRESSED_CSV_H + +#ifdef CONFIG_HYGON_CSV + +void csv_set_status(void); +void csv_init_secure_call_pages(void *boot_params); + +void csv_update_page_attr(unsigned long address, pteval_t set, pteval_t clr); + +#else + +static inline void csv_set_status(void) { } +static inline void csv_init_secure_call_pages(void *boot_params) { } + +static inline void csv_update_page_attr(unsigned long address, + pteval_t set, pteval_t clr) { } + +#endif + +#endif /* BOOT_COMPRESSED_CSV_H */ diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index bf4a10a5794f1c84d793f03e00ecd430fbead8c2..d01f9431cee14bce2403919ea27e28fc9ac28898 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -397,6 +397,16 @@ SYM_CODE_START(startup_64) movq %r15, %rdi call sev_enable #endif +#ifdef CONFIG_HYGON_CSV + /* + * Check CSV active status. The CSV and CSV2 guest are indicated by + * MSR_AMD64_SEV_ENABLED_BIT and MSR_AMD64_SEV_ES_ENABLED_BIT in MSR + * register 0xc0010131, respectively. + * The CSV3 guest is indicated by MSR_CSV3_ENABLED in MSR register + * 0xc0010131. + */ + call csv_set_status +#endif /* * configure_5level_paging() updates the number of paging levels using @@ -463,6 +473,16 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) movq %r15, %rdi call initialize_identity_maps +#ifdef CONFIG_HYGON_CSV + /* + * If running as a CSV3 guest, secure call pages must be mapped in + * the identity page table before jumping to the decompressed kernel. + * Scan secure call pages here in safe. + */ + movq %r15, %rdi + call csv_init_secure_call_pages +#endif + /* * Do the extraction, and jump to the new kernel.. */ diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c index aead80ec70a0bf0ecc9f9f4b5e1a7121b22ae215..a7b4148a943f656eb61f97b409b369eb23d11c76 100644 --- a/arch/x86/boot/compressed/ident_map_64.c +++ b/arch/x86/boot/compressed/ident_map_64.c @@ -298,6 +298,9 @@ static int set_clr_page_flags(struct x86_mapping_info *info, if ((set | clr) & _PAGE_ENC) { clflush_page(address); + /* On CSV3, notify secure processor to manage page attr changes */ + csv_update_page_attr(address, set, clr); + /* * If the encryption attribute is being cleared, change the page state * to shared in the RMP table. diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index aae1a2db4251037ef88a2e2277acc1d95d92f479..674433c522ed51f801363b0ff36becfaf7268de0 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -37,6 +37,7 @@ #include #include "tdx.h" +#include "csv.h" #define BOOT_CTYPE_H #include diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h index fc575d2f00cf08d0d5ee68f114276448204a5473..e2fcaf4ded5f7ceea8dd18d56068f8bbba05ca07 100644 --- a/arch/x86/include/asm/csv.h +++ b/arch/x86/include/asm/csv.h @@ -48,6 +48,40 @@ static inline uint32_t csv_get_smr_entry_shift(void) { return 0; } #endif /* CONFIG_HYGON_CSV */ +#define CPUID_VENDOR_HygonGenuine_ebx 0x6f677948 +#define CPUID_VENDOR_HygonGenuine_ecx 0x656e6975 +#define CPUID_VENDOR_HygonGenuine_edx 0x6e65476e + +#define MSR_CSV3_ENABLED_BIT 30 +#define MSR_CSV3_ENABLED BIT_ULL(MSR_CSV3_ENABLED_BIT) + +#ifdef CONFIG_HYGON_CSV + +bool csv3_active(void); + +void __init csv_early_reset_memory(struct boot_params *bp); +void __init csv_early_update_memory_enc(u64 vaddr, u64 pages); +void __init csv_early_update_memory_dec(u64 vaddr, u64 pages); + +void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc); + +void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc); + +#else /* !CONFIG_HYGON_CSV */ + +static inline bool csv3_active(void) { return false; } + +static inline void __init csv_early_reset_memory(struct boot_params *bp) { } +static inline void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) { } +static inline void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) { } + +static inline void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, + bool enc) { } + +static inline void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc) { } + +#endif /* CONFIG_HYGON_CSV */ + #endif /* __ASSEMBLY__ */ #endif /* __ASM_X86_CSV_H__ */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4894535cbd0bd4eb843962f4335934504e470490..4caef8c0858a05ffa0245b6d2113332721b47d8d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -2150,6 +2150,9 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, unsigned long ipi_bitmap_high, u32 min, unsigned long icr, int op_64_bit); +void kvm_arch_hypercall_init(void *func); +void kvm_arch_hypercall_exit(void); + int kvm_add_user_return_msr(u32 msr); int kvm_find_user_return_msr(u32 msr); int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask); diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 0000325ab98f4dc7408d0f338aab42bf6850c367..c25d40cbbdbe0ed4264d34d14d3cf493fdd92c57 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -160,3 +160,5 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_MMCONF_FAM10H) += mmconf-fam10h_64.o obj-y += vsmp_64.o endif + +obj-$(CONFIG_HYGON_CSV) += csv.o diff --git a/arch/x86/kernel/csv-shared.c b/arch/x86/kernel/csv-shared.c new file mode 100644 index 0000000000000000000000000000000000000000..0763195764daff072ef91c4553dd24c91bd5da53 --- /dev/null +++ b/arch/x86/kernel/csv-shared.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon CSV support + * + * This file is shared between decompression boot code and running + * linux kernel. + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include + +/* + ****************************** CSV3 secure call ******************************* + * + * CSV3 guest is based on hygon secure isolated virualization feature. An secure + * processor which resides in hygon SOC manages guest's private memory. The + * secure processor allocates or frees private memory for CSV3 guest and manages + * CSV3 guest's nested page table. + * + * As the secure processor is considered as a PCI device in host, CSV3 guest can + * not communicate with it directly. Howerver, CSV3 guest must request the secure + * processor to change its physical memory between private memory and shared + * memory. CSV3 secure call command is a method used to communicate with secure + * processor that host cannot tamper with the data in CSV3 guest. Host can only + * perform an external command to notify the secure processor to handle the + * pending guest's command. + * + * CSV3 secure call pages: + * Secure call pages are two dedicated pages that reserved by BIOS. We define + * secure call pages as page A and page B. During guest launch stage, the secure + * processor will parse the address of secure call pages. The secure processor + * maps the two pages with same private memory page in NPT. The secure processor + * always set one page as present and another page as non-present in NPT. + + * CSV3 secure call main work flow: + * If we write the guest's commands in one page then read them from another page, + * nested page fault happens and the guest exits to host. Then host will perform + * an external command with the gpa(page A or page B) to the secure processor. + * The secure processor checks that the gpa in NPF belongs to secure call pages, + * read the guest's command to handle, then switch the present bit between the + * two pages. + * + * guest page A guest page B + * | | + * ____|______________|____ + * | | + * | nested page table | + * |______________________| + * \ / + * \ / + * \ / + * \ / + * \ / + * secure memory page + * + * CSV3_SECURE_CMD_ENC: + * CSV3 guest declares a specifid memory range as secure. By default, all of + * CSV3 guest's memory mapped as secure. + * The secure processor allocate a block of secure memory and map the memory + * in CSV3 guest's NPT with the specified guest physical memory range in CSV3 + * secure call. + * + * CSV3_SECURE_CMD_DEC: + * CSV3 guest declares a specified memory range as shared. + * The secure processor save the guest physical memory range in its own ram + * and free the range in CSV3 guest's NPT. When CSV3 guest access the memory, + * a new nested page fault happens. + * + * CSV3_SECURE_CMD_RESET: + * CSV3 guest switches all of the shared memory to secure. + * The secure processor resets all the shared memory in CSV3 guest's NPT and + * clears the saved shared memory range. Then the secure process allocates + * secure memory to map in CSV3 guest's NPT. + * + * CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE: + * CSV3 guest wants to change the secure call pages. + * The secure processor re-init the secure call context. + */ +enum csv3_secure_command_type { + CSV3_SECURE_CMD_ENC = 1, + CSV3_SECURE_CMD_DEC, + CSV3_SECURE_CMD_RESET, + CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE, +}; + +/* + * Secure call page fields. + * Secure call page size is 4KB always. We define CSV3 secure call page structure + * as below. + * guid: Must be in the first 128 bytes of the page. Its value should be + * (0xceba2fa59a5d926ful, 0xa556555d276b21abul) always. + * cmd_type: Command to be issued to the secure processor. + * nums: number of entries in the command. + * base_address:Start address of the memory range. + * size: Size of the memory range. + */ +#define SECURE_CALL_ENTRY_MAX (254) + +/* size of secure call cmd is 4KB. */ +struct csv3_secure_call_cmd { + union { + u8 guid[16]; + u64 guid_64[2]; + }; + u32 cmd_type; + u32 nums; + u64 unused; + struct { + u64 base_address; + u64 size; + } entry[SECURE_CALL_ENTRY_MAX]; +}; + +/* csv3 secure call guid, do not change the value. */ +#define CSV3_SECURE_CALL_GUID_LOW 0xceba2fa59a5d926ful +#define CSV3_SECURE_CALL_GUID_HIGH 0xa556555d276b21abul + +static u64 csv3_boot_sc_page_a __initdata = -1ul; +static u64 csv3_boot_sc_page_b __initdata = -1ul; +static u32 early_page_idx __initdata; + +/** + * csv3_scan_secure_call_pages - try to find the secure call pages. + * @boot_params: boot parameters where e820_table resides. + * + * The secure call pages are reserved by BIOS. We scan all the reserved pages + * to check the CSV3 secure call guid bytes. + */ +void __init csv3_scan_secure_call_pages(struct boot_params *boot_params) +{ + struct boot_e820_entry *entry; + struct csv3_secure_call_cmd *sc_page; + u64 offset; + u64 addr; + u8 i; + u8 table_num; + int count = 0; + + if (!boot_params) + return; + + if (csv3_boot_sc_page_a != -1ul && csv3_boot_sc_page_b != -1ul) + return; + + table_num = min_t(u8, boot_params->e820_entries, + E820_MAX_ENTRIES_ZEROPAGE); + entry = &boot_params->e820_table[0]; + for (i = 0; i < table_num; i++) { + if (entry[i].type != E820_TYPE_RESERVED) + continue; + + addr = entry[i].addr & PAGE_MASK; + for (offset = 0; offset < entry[i].size; offset += PAGE_SIZE) { + sc_page = (void *)(addr + offset); + if (sc_page->guid_64[0] == CSV3_SECURE_CALL_GUID_LOW && + sc_page->guid_64[1] == CSV3_SECURE_CALL_GUID_HIGH) { + if (count == 0) + csv3_boot_sc_page_a = addr + offset; + else if (count == 1) + csv3_boot_sc_page_b = addr + offset; + count++; + } + if (count >= 2) + return; + } + } +} + +/** + * csv3_early_secure_call_ident_map - issue early secure call command at the + * stage where identity page table is created. + * @base_address: Start address of the specified memory range. + * @num_pages: number of the specific pages. + * @cmd_type: Secure call cmd type. + */ +void __init csv3_early_secure_call_ident_map(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (csv3_boot_sc_page_a == -1ul || csv3_boot_sc_page_b == -1ul) + return; + + /* identity mapping at the stage. */ + page_rd = (void *)(early_page_idx ? csv3_boot_sc_page_a : csv3_boot_sc_page_b); + page_wr = (void *)(early_page_idx ? csv3_boot_sc_page_b : csv3_boot_sc_page_a); + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + early_page_idx ^= 1; +} diff --git a/arch/x86/kernel/csv.c b/arch/x86/kernel/csv.c new file mode 100644 index 0000000000000000000000000000000000000000..4f80c97798deba1c3146d01a266fb672012e63c1 --- /dev/null +++ b/arch/x86/kernel/csv.c @@ -0,0 +1,287 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON CSV support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include +#include +#include +#include +#include + +#include "../mm/mm_internal.h" +#include "csv-shared.c" + +struct secure_call_pages { + struct csv3_secure_call_cmd page_a; + struct csv3_secure_call_cmd page_b; +}; + +static u32 csv3_percpu_secure_call_init __initdata; +static u32 early_secure_call_page_idx __initdata; + +static DEFINE_PER_CPU(struct secure_call_pages*, secure_call_data); +static DEFINE_PER_CPU(int, secure_call_page_idx); + +typedef void (*csv3_secure_call_func)(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type); + +void __init csv_early_reset_memory(struct boot_params *bp) +{ + if (!csv3_active()) + return; + + csv3_scan_secure_call_pages(bp); + csv3_early_secure_call_ident_map(0, 0, CSV3_SECURE_CMD_RESET); +} + +void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) +{ + if (!csv3_active()) + return; + + if (pages) + csv3_early_secure_call_ident_map(__pa(vaddr), pages, + CSV3_SECURE_CMD_DEC); +} + +void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) +{ + if (!csv3_active()) + return; + + if (pages) + csv3_early_secure_call_ident_map(__pa(vaddr), pages, + CSV3_SECURE_CMD_ENC); +} + +static void __init csv3_alloc_secure_call_data(int cpu) +{ + struct secure_call_pages *data; + + data = memblock_alloc(sizeof(*data), PAGE_SIZE); + if (!data) + panic("Can't allocate CSV3 secure all data"); + + per_cpu(secure_call_data, cpu) = data; +} + +static void __init csv3_secure_call_update_table(void) +{ + int cpu; + struct secure_call_pages *data; + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (!csv3_active()) + return; + + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, PAGE_SIZE); + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, PAGE_SIZE); + + while (1) { + page_wr->cmd_type = CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE; + page_wr->nums = 0; + + /* initialize per-cpu secure call pages */ + for_each_possible_cpu(cpu) { + if (cpu >= SECURE_CALL_ENTRY_MAX) + panic("csv does not support cpus > %d\n", + SECURE_CALL_ENTRY_MAX); + csv3_alloc_secure_call_data(cpu); + data = per_cpu(secure_call_data, cpu); + per_cpu(secure_call_page_idx, cpu) = 0; + page_wr->entry[cpu].base_address = __pa(data); + page_wr->entry[cpu].size = PAGE_SIZE * 2; + page_wr->nums++; + } + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE) + break; + } + + early_memunmap(page_rd, PAGE_SIZE); + early_memunmap(page_wr, PAGE_SIZE); +} + +/** + * __csv3_early_secure_call - issue secure call command at the stage where new + * kernel page table is created and early identity page + * table is deprecated . + * @base_address: Start address of the specified memory range. + * @num_pages: number of the specific pages. + * @cmd_type: Secure call cmd type. + */ +static void __init __csv3_early_secure_call(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (csv3_boot_sc_page_a == -1ul || csv3_boot_sc_page_b == -1ul) + return; + + if (!csv3_percpu_secure_call_init) { + csv3_secure_call_update_table(); + csv3_percpu_secure_call_init = 1; + } + + if (early_secure_call_page_idx == 0) { + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, + PAGE_SIZE); + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, + PAGE_SIZE); + } else { + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, + PAGE_SIZE); + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, + PAGE_SIZE); + } + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + + early_memunmap(page_rd, PAGE_SIZE); + early_memunmap(page_wr, PAGE_SIZE); + + early_secure_call_page_idx ^= 1; +} + +static void csv3_secure_call(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + u32 cmd_ack; + struct secure_call_pages *data; + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + int page_idx; + int cpu; + + preempt_disable(); + + cpu = smp_processor_id(); + data = per_cpu(secure_call_data, cpu); + page_idx = per_cpu(secure_call_page_idx, cpu); + + if (page_idx == 0) { + page_rd = &data->page_a; + page_wr = &data->page_b; + } else { + page_rd = &data->page_b; + page_wr = &data->page_a; + } + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the smp_mb below. + */ + smp_mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + + per_cpu(secure_call_page_idx, cpu) ^= 1; + preempt_enable(); +} + +static void __csv3_memory_enc_dec(csv3_secure_call_func secure_call, u64 vaddr, + u64 pages, bool enc) +{ + u64 vaddr_end, vaddr_next; + u64 psize, pmask; + u64 last_paddr, paddr; + u64 last_psize = 0; + pte_t *kpte; + int level; + enum csv3_secure_command_type cmd_type; + + cmd_type = enc ? CSV3_SECURE_CMD_ENC : CSV3_SECURE_CMD_DEC; + vaddr_next = vaddr; + vaddr_end = vaddr + (pages << PAGE_SHIFT); + for (; vaddr < vaddr_end; vaddr = vaddr_next) { + kpte = lookup_address(vaddr, &level); + if (!kpte || pte_none(*kpte)) { + panic("invalid pte, vaddr 0x%llx\n", vaddr); + goto out; + } + + psize = page_level_size(level); + pmask = page_level_mask(level); + + vaddr_next = (vaddr & pmask) + psize; + paddr = ((pte_pfn(*kpte) << PAGE_SHIFT) & pmask) + + (vaddr & ~pmask); + psize -= (vaddr & ~pmask); + + if (vaddr_end - vaddr < psize) + psize = vaddr_end - vaddr; + if (last_psize == 0 || (last_paddr + last_psize) == paddr) { + last_paddr = (last_psize == 0 ? paddr : last_paddr); + last_psize += psize; + } else { + secure_call(last_paddr, last_psize >> PAGE_SHIFT, + cmd_type); + last_paddr = paddr; + last_psize = psize; + } + } + + if (last_psize) + secure_call(last_paddr, last_psize >> PAGE_SHIFT, cmd_type); + +out: + return; +} + +void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc) +{ + u64 npages; + + if (!csv3_active()) + return; + + npages = (size + (vaddr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; + __csv3_memory_enc_dec(__csv3_early_secure_call, vaddr & PAGE_MASK, + npages, enc); +} + +void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc) +{ + if (!csv3_active()) + return; + + __csv3_memory_enc_dec(csv3_secure_call, vaddr & PAGE_MASK, pages, enc); +} diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 360dcd0d8454b2df1d4b123f90e04a176d195aa6..1defe865de67ec8fafa3149817b4da57050a74b6 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -42,6 +42,7 @@ #include #include #include +#include /* * Manage page tables very early on. @@ -160,6 +161,14 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdv i = pmd_index(vaddr); pmd[i] -= sme_get_me_mask(); } + + /* On CSV3, move the shared pages out of isolated memory region. */ + if (csv3_active()) { + vaddr = (unsigned long)__start_bss_decrypted; + csv_early_reset_memory(bp); + csv_early_update_memory_dec((unsigned long)vaddr, + (vaddr_end - vaddr) >> PAGE_SHIFT); + } } /* diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 562bbf9d95bef5611fc8188dec2d5d27177c3830..cbaf28b5c079dc2ef6f8964cbb7beb9af0bc13e0 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #include "kvm_cache_regs.h" #include "svm.h" #include "csv.h" @@ -25,6 +27,9 @@ /* Function and variable pointers for hooks */ struct hygon_kvm_hooks_table hygon_kvm_hooks; +/* enable/disable CSV3 support */ +static bool csv3_enabled = true; + static struct kvm_x86_ops csv_x86_ops; static const char csv_vm_mnonce[] = "VM_ATTESTATION"; static DEFINE_MUTEX(csv_cmd_batch_mutex); @@ -134,7 +139,19 @@ static void csv_reset_mempool_offset(void) g_mempool_offset = 0; } -int csv_alloc_trans_mempool(void) +static void csv_free_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + kfree(g_trans_mempool[i]); + g_trans_mempool[i] = NULL; + } + + csv_reset_mempool_offset(); +} + +static int csv_alloc_trans_mempool(void) { int i; @@ -157,18 +174,6 @@ int csv_alloc_trans_mempool(void) return -ENOMEM; } -void csv_free_trans_mempool(void) -{ - int i; - - for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { - kfree(g_trans_mempool[i]); - g_trans_mempool[i] = NULL; - } - - csv_reset_mempool_offset(); -} - static void __maybe_unused *get_trans_data_from_mempool(size_t size) { void *trans = NULL; @@ -795,6 +800,864 @@ static int csv_receive_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +struct encrypt_data_block { + struct { + u64 npages: 12; + u64 pfn: 52; + } entry[512]; +}; + +union csv3_page_attr { + struct { + u64 reserved: 1; + u64 rw: 1; + u64 reserved1: 49; + u64 mmio: 1; + u64 reserved2: 12; + }; + u64 val; +}; + +enum csv3_pg_level { + CSV3_PG_LEVEL_NONE, + CSV3_PG_LEVEL_4K, + CSV3_PG_LEVEL_2M, + CSV3_PG_LEVEL_NUM +}; + +struct shared_page_block { + struct list_head list; + struct page **pages; + u64 count; +}; + +struct kvm_csv_info { + struct kvm_sev_info *sev; + + bool csv3_active; /* CSV3 enabled guest */ + + /* List of shared pages */ + u64 total_shared_page_count; + struct list_head shared_pages_list; + void *cached_shared_page_block; + struct mutex shared_page_block_lock; + + struct list_head smr_list; /* List of guest secure memory regions */ + unsigned long nodemask; /* Nodemask where CSV3 guest's memory resides */ +}; + +struct kvm_svm_csv { + struct kvm_svm kvm_svm; + struct kvm_csv_info csv_info; +}; + +struct secure_memory_region { + struct list_head list; + u64 npages; + u64 hpa; +}; + +static inline struct kvm_svm_csv *to_kvm_svm_csv(struct kvm *kvm) +{ + return (struct kvm_svm_csv *)container_of(kvm, struct kvm_svm, kvm); +} + +static int to_csv3_pg_level(int level) +{ + int ret; + + switch (level) { + case PG_LEVEL_4K: + ret = CSV3_PG_LEVEL_4K; + break; + case PG_LEVEL_2M: + ret = CSV3_PG_LEVEL_2M; + break; + default: + ret = CSV3_PG_LEVEL_NONE; + } + + return ret; +} + +static bool csv3_guest(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + + return sev_es_guest(kvm) && csv->csv3_active; +} + +static inline void csv3_init_update_npt(struct csv3_data_update_npt *update_npt, + gpa_t gpa, u32 error, u32 handle) +{ + memset(update_npt, 0x00, sizeof(*update_npt)); + + update_npt->gpa = gpa & PAGE_MASK; + update_npt->error_code = error; + update_npt->handle = handle; +} + +static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_csv3_init_data params; + + if (unlikely(csv->csv3_active)) + return -EINVAL; + + if (unlikely(!sev->es_active)) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + csv->csv3_active = true; + csv->sev = sev; + csv->nodemask = (unsigned long)params.nodemask; + + INIT_LIST_HEAD(&csv->shared_pages_list); + INIT_LIST_HEAD(&csv->smr_list); + mutex_init(&csv->shared_page_block_lock); + + return 0; +} + +static bool csv3_is_mmio_pfn(kvm_pfn_t pfn) +{ + return !e820__mapped_raw_any(pfn_to_hpa(pfn), + pfn_to_hpa(pfn + 1) - 1, + E820_TYPE_RAM); +} + +static int csv3_set_guest_private_memory(struct kvm *kvm) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot; + struct secure_memory_region *smr; + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_set_guest_private_memory *set_guest_private_memory; + struct csv3_data_memory_region *regions; + nodemask_t nodemask; + nodemask_t *nodemask_ptr; + + LIST_HEAD(tmp_list); + struct list_head *pos, *q; + u32 i = 0, count = 0, remainder; + int ret = 0, error; + u64 size = 0, nr_smr = 0, nr_pages = 0; + u32 smr_entry_shift; + int bkt; + + unsigned int flags = FOLL_HWPOISON; + int npages; + struct page *page; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + nodes_clear(nodemask); + for_each_set_bit(i, &csv->nodemask, BITS_PER_LONG) + if (i < MAX_NUMNODES) + node_set(i, nodemask); + + nodemask_ptr = csv->nodemask ? &nodemask : &node_online_map; + + set_guest_private_memory = kzalloc(sizeof(*set_guest_private_memory), + GFP_KERNEL_ACCOUNT); + if (!set_guest_private_memory) + return -ENOMEM; + + regions = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!regions) { + kfree(set_guest_private_memory); + return -ENOMEM; + } + + /* Get guest secure memory size */ + kvm_for_each_memslot(memslot, bkt, slots) { + npages = get_user_pages_unlocked(memslot->userspace_addr, 1, + &page, flags); + if (npages != 1) + continue; + + nr_pages += memslot->npages; + + put_page(page); + } + + /* + * NPT secure memory size + * + * PTEs_entries = nr_pages + * PDEs_entries = nr_pages / 512 + * PDPEs_entries = nr_pages / (512 * 512) + * PML4Es_entries = nr_pages / (512 * 512 * 512) + * + * Totals_entries = nr_pages + nr_pages / 512 + nr_pages / (512 * 512) + + * nr_pages / (512 * 512 * 512) <= nr_pages + nr_pages / 256 + * + * Total_NPT_size = (Totals_entries / 512) * PAGE_SIZE = ((nr_pages + + * nr_pages / 256) / 512) * PAGE_SIZE = nr_pages * 8 + nr_pages / 32 + * <= nr_pages * 9 + * + */ + smr_entry_shift = csv_get_smr_entry_shift(); + size = ALIGN((nr_pages << PAGE_SHIFT), 1UL << smr_entry_shift) + + ALIGN(nr_pages * 9, 1UL << smr_entry_shift); + nr_smr = size >> smr_entry_shift; + remainder = nr_smr; + for (i = 0; i < nr_smr; i++) { + smr = kzalloc(sizeof(*smr), GFP_KERNEL_ACCOUNT); + if (!smr) { + ret = -ENOMEM; + goto e_free_smr; + } + + smr->hpa = csv_alloc_from_contiguous((1UL << smr_entry_shift), + nodemask_ptr, + get_order(1 << smr_entry_shift)); + if (!smr->hpa) { + kfree(smr); + ret = -ENOMEM; + goto e_free_smr; + } + + smr->npages = ((1UL << smr_entry_shift) >> PAGE_SHIFT); + list_add_tail(&smr->list, &tmp_list); + + regions[count].size = (1UL << smr_entry_shift); + regions[count].base_address = smr->hpa; + count++; + + if (count >= (PAGE_SIZE / sizeof(regions[0])) || (remainder == count)) { + set_guest_private_memory->nregions = count; + set_guest_private_memory->handle = sev->handle; + set_guest_private_memory->regions_paddr = __sme_pa(regions); + + /* set secury memory region for launch enrypt data */ + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, + CSV3_CMD_SET_GUEST_PRIVATE_MEMORY, + set_guest_private_memory, &error); + if (ret) + goto e_free_smr; + + memset(regions, 0, PAGE_SIZE); + remainder -= count; + count = 0; + } + } + + list_splice(&tmp_list, &csv->smr_list); + + goto done; + +e_free_smr: + if (!list_empty(&tmp_list)) { + list_for_each_safe(pos, q, &tmp_list) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + csv_release_to_contiguous(smr->hpa, + smr->npages << PAGE_SHIFT); + list_del(&smr->list); + kfree(smr); + } + } + } +done: + kfree(set_guest_private_memory); + kfree(regions); + return ret; +} + +static int csv3_launch_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_csv3_launch_encrypt_data params; + struct csv3_data_launch_encrypt_data *encrypt_data = NULL; + struct encrypt_data_block *blocks = NULL; + u8 *data = NULL; + u32 offset; + u32 num_entries, num_entries_in_block; + u32 num_blocks, num_blocks_max; + u32 i, n; + unsigned long pfn, pfn_sme_mask; + int ret = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) { + ret = -EFAULT; + goto exit; + } + + if ((params.len & ~PAGE_MASK) || !params.len || !params.uaddr) { + ret = -EINVAL; + goto exit; + } + + /* Allocate all the guest memory from CMA */ + ret = csv3_set_guest_private_memory(kvm); + if (ret) + goto exit; + + num_entries = params.len / PAGE_SIZE; + num_entries_in_block = ARRAY_SIZE(blocks->entry); + num_blocks = (num_entries + num_entries_in_block - 1) / num_entries_in_block; + num_blocks_max = ARRAY_SIZE(encrypt_data->data_blocks); + + if (num_blocks >= num_blocks_max) { + ret = -EINVAL; + goto exit; + } + + data = vzalloc(params.len); + if (!data) { + ret = -ENOMEM; + goto exit; + } + if (copy_from_user(data, (void __user *)params.uaddr, params.len)) { + ret = -EFAULT; + goto data_free; + } + + blocks = vzalloc(num_blocks * sizeof(*blocks)); + if (!blocks) { + ret = -ENOMEM; + goto data_free; + } + + for (offset = 0, i = 0, n = 0; offset < params.len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + data); + pfn_sme_mask = __sme_set(pfn << PAGE_SHIFT) >> PAGE_SHIFT; + if (offset && ((blocks[n].entry[i].pfn + 1) == pfn_sme_mask)) + blocks[n].entry[i].npages += 1; + else { + if (offset) { + i = (i + 1) % num_entries_in_block; + n = (i == 0) ? (n + 1) : n; + } + blocks[n].entry[i].pfn = pfn_sme_mask; + blocks[n].entry[i].npages = 1; + } + } + + encrypt_data = kzalloc(sizeof(*encrypt_data), GFP_KERNEL); + if (!encrypt_data) { + ret = -ENOMEM; + goto block_free; + } + + encrypt_data->handle = csv->sev->handle; + encrypt_data->length = params.len; + encrypt_data->gpa = params.gpa; + for (i = 0; i <= n; i++) { + encrypt_data->data_blocks[i] = + __sme_set(vmalloc_to_pfn((void *)blocks + i * sizeof(*blocks)) << PAGE_SHIFT); + } + + clflush_cache_range(data, params.len); + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_LAUNCH_ENCRYPT_DATA, + encrypt_data, &argp->error); + + kfree(encrypt_data); +block_free: + vfree(blocks); +data_free: + vfree(data); +exit: + return ret; +} + +static int csv3_sync_vmsa(struct vcpu_svm *svm) +{ + struct sev_es_save_area *save = svm->sev_es.vmsa; + + /* Check some debug related fields before encrypting the VMSA */ + if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) + return -EINVAL; + + memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); + + /* Sync registgers per spec. */ + save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; + save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; + save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP]; + save->xcr0 = svm->vcpu.arch.xcr0; + save->xss = svm->vcpu.arch.ia32_xss; + + return 0; +} + +static int csv3_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_launch_encrypt_vmcb *encrypt_vmcb = NULL; + struct kvm_vcpu *vcpu; + int ret = 0; + unsigned long i = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + encrypt_vmcb = kzalloc(sizeof(*encrypt_vmcb), GFP_KERNEL); + if (!encrypt_vmcb) { + ret = -ENOMEM; + goto exit; + } + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + ret = csv3_sync_vmsa(svm); + if (ret) + goto e_free; + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + clflush_cache_range(svm->vmcb, PAGE_SIZE); + encrypt_vmcb->handle = csv->sev->handle; + encrypt_vmcb->vcpu_id = i; + encrypt_vmcb->vmsa_addr = __sme_pa(svm->sev_es.vmsa); + encrypt_vmcb->vmsa_len = PAGE_SIZE; + encrypt_vmcb->shadow_vmcb_addr = __sme_pa(svm->vmcb); + encrypt_vmcb->shadow_vmcb_len = PAGE_SIZE; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, + CSV3_CMD_LAUNCH_ENCRYPT_VMCB, + encrypt_vmcb, &argp->error); + if (ret) + goto e_free; + + svm->current_vmcb->pa = encrypt_vmcb->secure_vmcb_addr; + svm->vcpu.arch.guest_state_protected = true; + } + +e_free: + kfree(encrypt_vmcb); +exit: + return ret; +} + +static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, + unsigned long npages) +{ + gfn_t gfn; + gfn_t gfn_end; + + gfn = gpa >> PAGE_SHIFT; + gfn_end = gfn + npages; +#ifdef KVM_HAVE_MMU_RWLOCK + write_lock(&vcpu->kvm->mmu_lock); +#else + spin_lock(&vcpu->kvm->mmu_lock); +#endif + for (; gfn < gfn_end; gfn++) + kvm_vcpu_mark_page_dirty(vcpu, gfn); +#ifdef KVM_HAVE_MMU_RWLOCK + write_unlock(&vcpu->kvm->mmu_lock); +#else + spin_unlock(&vcpu->kvm->mmu_lock); +#endif +} + +static int csv3_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code) +{ + int r = 0; + struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); + union csv3_page_attr page_attr = {.mmio = 1}; + union csv3_page_attr page_attr_mask = {.mmio = 1}; + struct csv3_data_update_npt *update_npt; + int psp_ret; + + if (!hygon_kvm_hooks.sev_hooks_installed) + return -EFAULT; + + update_npt = kzalloc(sizeof(*update_npt), GFP_KERNEL); + if (!update_npt) { + r = -ENOMEM; + goto exit; + } + + csv3_init_update_npt(update_npt, gpa, error_code, + kvm_svm->sev_info.handle); + update_npt->page_attr = page_attr.val; + update_npt->page_attr_mask = page_attr_mask.val; + update_npt->level = CSV3_PG_LEVEL_4K; + + r = hygon_kvm_hooks.sev_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, + update_npt, &psp_ret); + + if (psp_ret != SEV_RET_SUCCESS) + r = -EFAULT; + + kfree(update_npt); +exit: + return r; +} + +static int __csv3_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, + u32 error_code, struct kvm_memory_slot *slot, + int *psp_ret_ptr, kvm_pfn_t pfn, u32 level) +{ + int r = 0; + struct csv3_data_update_npt *update_npt; + struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); + int psp_ret = 0; + + if (!hygon_kvm_hooks.sev_hooks_installed) + return -EFAULT; + + update_npt = kzalloc(sizeof(*update_npt), GFP_KERNEL); + if (!update_npt) { + r = -ENOMEM; + goto exit; + } + + csv3_init_update_npt(update_npt, gpa, error_code, + kvm_svm->sev_info.handle); + + update_npt->spa = pfn << PAGE_SHIFT; + update_npt->level = level; + + if (!csv3_is_mmio_pfn(pfn)) + update_npt->spa |= sme_me_mask; + + r = hygon_kvm_hooks.sev_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, + update_npt, &psp_ret); + + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + kvm_flush_remote_tlbs(vcpu->kvm); + + csv3_mark_page_dirty(vcpu, update_npt->gpa, update_npt->npages); + + if (psp_ret_ptr) + *psp_ret_ptr = psp_ret; + + kfree(update_npt); +exit: + return r; +} + +static int csv3_pin_shared_memory(struct kvm_vcpu *vcpu, + struct kvm_memory_slot *slot, gfn_t gfn, + kvm_pfn_t *pfn) +{ + struct page **pages, *page; + u64 hva; + int npinned; + kvm_pfn_t tmp_pfn; + struct kvm *kvm = vcpu->kvm; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct shared_page_block *shared_page_block = NULL; + u64 npages = PAGE_SIZE / sizeof(struct page *); + bool write = !(slot->flags & KVM_MEM_READONLY); + + tmp_pfn = __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, write, + NULL, NULL); + if (unlikely(is_error_pfn(tmp_pfn))) + return -ENOMEM; + + if (csv3_is_mmio_pfn(tmp_pfn)) { + *pfn = tmp_pfn; + return 0; + } + + if (!page_maybe_dma_pinned(pfn_to_page(tmp_pfn))) { + kvm_release_pfn_clean(tmp_pfn); + if (csv->total_shared_page_count % npages == 0) { + shared_page_block = kzalloc(sizeof(*shared_page_block), + GFP_KERNEL_ACCOUNT); + if (!shared_page_block) + return -ENOMEM; + + pages = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!pages) { + kfree(shared_page_block); + return -ENOMEM; + } + + shared_page_block->pages = pages; + list_add_tail(&shared_page_block->list, + &csv->shared_pages_list); + csv->cached_shared_page_block = shared_page_block; + } else { + shared_page_block = csv->cached_shared_page_block; + pages = shared_page_block->pages; + } + + hva = __gfn_to_hva_memslot(slot, gfn); + npinned = pin_user_pages_fast(hva, 1, FOLL_WRITE | FOLL_LONGTERM, + &page); + if (npinned != 1) { + if (shared_page_block->count == 0) { + list_del(&shared_page_block->list); + kfree(pages); + kfree(shared_page_block); + } + return -ENOMEM; + } + + pages[csv->total_shared_page_count % npages] = page; + shared_page_block->count++; + csv->total_shared_page_count++; + *pfn = page_to_pfn(page); + } else { + kvm_release_pfn_clean(tmp_pfn); + *pfn = tmp_pfn; + } + + return 0; +} + +static int __pfn_mapping_level(struct kvm *kvm, gfn_t gfn, + const struct kvm_memory_slot *slot) +{ + int level = PG_LEVEL_4K; + unsigned long hva; + unsigned long flags; + pgd_t pgd; + p4d_t p4d; + pud_t pud; + pmd_t pmd; + + /* + * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() + * is not solely for performance, it's also necessary to avoid the + * "writable" check in __gfn_to_hva_many(), which will always fail on + * read-only memslots due to gfn_to_hva() assuming writes. Earlier + * page fault steps have already verified the guest isn't writing a + * read-only memslot. + */ + hva = __gfn_to_hva_memslot(slot, gfn); + + /* + * Disable IRQs to prevent concurrent tear down of host page tables, + * e.g. if the primary MMU promotes a P*D to a huge page and then frees + * the original page table. + */ + local_irq_save(flags); + + /* + * Read each entry once. As above, a non-leaf entry can be promoted to + * a huge page _during_ this walk. Re-reading the entry could send the + * walk into the weeks, e.g. p*d_large() returns false (sees the old + * value) and then p*d_offset() walks into the target huge page instead + * of the old page table (sees the new value). + */ + pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); + if (pgd_none(pgd)) + goto out; + + p4d = READ_ONCE(*p4d_offset(&pgd, hva)); + if (p4d_none(p4d) || !p4d_present(p4d)) + goto out; + + pud = READ_ONCE(*pud_offset(&p4d, hva)); + if (pud_none(pud) || !pud_present(pud)) + goto out; + + if (pud_large(pud)) { + level = PG_LEVEL_1G; + goto out; + } + + pmd = READ_ONCE(*pmd_offset(&pud, hva)); + if (pmd_none(pmd) || !pmd_present(pmd)) + goto out; + + if (pmd_large(pmd)) + level = PG_LEVEL_2M; + +out: + local_irq_restore(flags); + return level; +} + +static int csv3_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn, + struct kvm_memory_slot *slot) +{ + int level; + int page_num; + gfn_t gfn_base; + + if (csv3_is_mmio_pfn(pfn)) { + level = PG_LEVEL_4K; + goto end; + } + + if (!PageCompound(pfn_to_page(pfn))) { + level = PG_LEVEL_4K; + goto end; + } + + level = PG_LEVEL_2M; + page_num = KVM_PAGES_PER_HPAGE(level); + gfn_base = gfn & ~(page_num - 1); + + /* + * 2M aligned guest address in memslot. + */ + if ((gfn_base < slot->base_gfn) || + (gfn_base + page_num > slot->base_gfn + slot->npages)) { + level = PG_LEVEL_4K; + goto end; + } + + /* + * hva in memslot is 2M aligned. + */ + if (__gfn_to_hva_memslot(slot, gfn_base) & ~PMD_MASK) { + level = PG_LEVEL_4K; + goto end; + } + + level = __pfn_mapping_level(vcpu->kvm, gfn, slot); + + /* + * Firmware supports 2M/4K level. + */ + level = level > PG_LEVEL_2M ? PG_LEVEL_2M : level; + +end: + return to_csv3_pg_level(level); +} + +static int csv3_page_fault(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, + gfn_t gfn, u32 error_code) +{ + int ret = 0; + int psp_ret = 0; + int level; + kvm_pfn_t pfn = KVM_PFN_NOSLOT; + struct kvm_csv_info *csv = &to_kvm_svm_csv(vcpu->kvm)->csv_info; + + if (error_code & PFERR_PRESENT_MASK) + level = CSV3_PG_LEVEL_4K; + else { + mutex_lock(&csv->shared_page_block_lock); + ret = csv3_pin_shared_memory(vcpu, slot, gfn, &pfn); + mutex_unlock(&csv->shared_page_block_lock); + if (ret) + goto exit; + + level = csv3_mapping_level(vcpu, gfn, pfn, slot); + } + + ret = __csv3_page_fault(vcpu, gfn << PAGE_SHIFT, error_code, slot, + &psp_ret, pfn, level); + + if (psp_ret != SEV_RET_SUCCESS) + ret = -EFAULT; +exit: + return ret; +} + +static void csv_vm_destroy(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct list_head *head = &csv->shared_pages_list; + struct list_head *pos, *q; + struct shared_page_block *shared_page_block; + struct kvm_vcpu *vcpu; + unsigned long i = 0; + + struct list_head *smr_head = &csv->smr_list; + struct secure_memory_region *smr; + + if (csv3_guest(kvm)) { + mutex_lock(&csv->shared_page_block_lock); + if (!list_empty(head)) { + list_for_each_safe(pos, q, head) { + shared_page_block = list_entry(pos, + struct shared_page_block, list); + unpin_user_pages(shared_page_block->pages, + shared_page_block->count); + kfree(shared_page_block->pages); + csv->total_shared_page_count -= + shared_page_block->count; + list_del(&shared_page_block->list); + kfree(shared_page_block); + } + } + mutex_unlock(&csv->shared_page_block_lock); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + svm->current_vmcb->pa = __sme_pa(svm->vmcb); + } + } + + if (likely(csv_x86_ops.vm_destroy)) + csv_x86_ops.vm_destroy(kvm); + + if (!csv3_guest(kvm)) + return; + + /* free secure memory region */ + if (!list_empty(smr_head)) { + list_for_each_safe(pos, q, smr_head) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + csv_release_to_contiguous(smr->hpa, smr->npages << PAGE_SHIFT); + list_del(&smr->list); + kfree(smr); + } + } + } +} + +static int csv3_handle_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, + u32 error_code) +{ + gfn_t gfn = gpa_to_gfn(gpa); + struct kvm_memory_slot *slot = gfn_to_memslot(vcpu->kvm, gfn); + int ret; + int r = -EIO; + + if (kvm_is_visible_memslot(slot)) + ret = csv3_page_fault(vcpu, slot, gfn, error_code); + else + ret = csv3_mmio_page_fault(vcpu, gpa, error_code); + + if (!ret) + r = 1; + + return r; +} + +static int csv_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) +{ + struct vcpu_svm *svm = to_svm(vcpu); + u32 exit_code = svm->vmcb->control.exit_code; + int ret = -EIO; + + /* + * NPF for csv3 is dedicated. + */ + if (csv3_guest(vcpu->kvm) && exit_code == SVM_EXIT_NPF) { + gpa_t gpa = __sme_clr(svm->vmcb->control.exit_info_2); + u64 error_code = svm->vmcb->control.exit_info_1; + + ret = csv3_handle_page_fault(vcpu, gpa, error_code); + } else { + if (likely(csv_x86_ops.handle_exit)) + ret = csv_x86_ops.handle_exit(vcpu, exit_fastpath); + } + + return ret; +} + +static void csv_guest_memory_reclaimed(struct kvm *kvm) +{ + if (!csv3_guest(kvm)) { + if (likely(csv_x86_ops.guest_memory_reclaimed)) + csv_x86_ops.guest_memory_reclaimed(kvm); + } +} + static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -836,6 +1699,19 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) */ r = csv_receive_update_vmsa(kvm, &sev_cmd); break; + case KVM_CSV3_INIT: + if (!csv3_enabled) { + r = -ENOTTY; + goto out; + } + r = csv3_guest_init(kvm, &sev_cmd); + break; + case KVM_CSV3_LAUNCH_ENCRYPT_DATA: + r = csv3_launch_encrypt_data(kvm, &sev_cmd); + break; + case KVM_CSV3_LAUNCH_ENCRYPT_VMCB: + r = csv3_launch_encrypt_vmcb(kvm, &sev_cmd); + break; default: /* * If the command is compatible between CSV and SEV, the @@ -851,6 +1727,7 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd))) r = -EFAULT; +out: mutex_unlock(&kvm->lock); return r; } @@ -1062,7 +1939,7 @@ static int csv_control_post_system_reset(struct kvm *kvm) struct csv_asid_userid *csv_asid_userid_array; -int csv_alloc_asid_userid_array(unsigned int nr_asids) +static int csv_alloc_asid_userid_array(unsigned int nr_asids) { int ret = 0; @@ -1077,14 +1954,59 @@ int csv_alloc_asid_userid_array(unsigned int nr_asids) return ret; } -void csv_free_asid_userid_array(void) +static void csv_free_asid_userid_array(void) { kfree(csv_asid_userid_array); csv_asid_userid_array = NULL; } +#else /* !CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID */ + +static int csv_alloc_asid_userid_array(unsigned int nr_asids) +{ + pr_warn("reuse ASID is unavailable\n"); + return -EFAULT; +} + +static void csv_free_asid_userid_array(void) +{ +} + #endif /* CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID */ +void __init csv_hardware_setup(unsigned int max_csv_asid) +{ + unsigned int nr_asids = max_csv_asid + 1; + + /* + * Allocate a memory pool to speed up live migration of + * the CSV/CSV2 guests. If the allocation fails, no + * acceleration is performed at live migration. + */ + csv_alloc_trans_mempool(); + /* + * Allocate a buffer to support reuse ASID, reuse ASID + * will not work if the allocation fails. + */ + csv_alloc_asid_userid_array(nr_asids); + + /* CSV3 depends on X86_FEATURE_CSV3 */ + if (boot_cpu_has(X86_FEATURE_SEV_ES) && boot_cpu_has(X86_FEATURE_CSV3)) + csv3_enabled = true; + else + csv3_enabled = false; + + pr_info("CSV3 %s (ASIDs 1 - %u)\n", + csv3_enabled ? "enabled" : "disabled", max_csv_asid); +} + +void csv_hardware_unsetup(void) +{ + /* Free the memory that allocated in csv_hardware_setup(). */ + csv_free_trans_mempool(); + csv_free_asid_userid_array(); +} + void csv_exit(void) { } @@ -1104,4 +2026,12 @@ void __init csv_init(struct kvm_x86_ops *ops) ops->vm_attestation = csv_vm_attestation; ops->control_pre_system_reset = csv_control_pre_system_reset; ops->control_post_system_reset = csv_control_post_system_reset; + + if (boot_cpu_has(X86_FEATURE_SEV_ES) && boot_cpu_has(X86_FEATURE_CSV3)) { + ops->vm_size = sizeof(struct kvm_svm_csv); + + ops->vm_destroy = csv_vm_destroy; + ops->handle_exit = csv_handle_exit; + ops->guest_memory_reclaimed = csv_guest_memory_reclaimed; + } } diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index 0d5d2b7191aab4cdcb5e90abf4dd147e0ea4c56b..9b0563062a941aad1e2e35b400706604f7a77e97 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -41,17 +41,8 @@ struct csv_asid_userid { u32 userid_len; char userid[ASID_USERID_LENGTH]; }; - extern struct csv_asid_userid *csv_asid_userid_array; -int csv_alloc_asid_userid_array(unsigned int nr_asids); -void csv_free_asid_userid_array(void); - -#else - -static inline int csv_alloc_asid_userid_array(unsigned int nr_asids) { return -ENOMEM; } -static inline void csv_free_asid_userid_array(void) { } - #endif /* CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID */ #ifdef CONFIG_HYGON_CSV @@ -79,8 +70,9 @@ extern struct hygon_kvm_hooks_table { void __init csv_init(struct kvm_x86_ops *ops); void csv_exit(void); -int csv_alloc_trans_mempool(void); -void csv_free_trans_mempool(void); +void __init csv_hardware_setup(unsigned int max_csv_asid); +void csv_hardware_unsetup(void); + int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); bool csv_has_emulated_ghcb_msr(struct kvm *kvm); @@ -98,8 +90,9 @@ static inline bool csv2_state_unstable(struct vcpu_svm *svm) static inline void __init csv_init(struct kvm_x86_ops *ops) { } static inline void csv_exit(void) { } -static inline int csv_alloc_trans_mempool(void) { return 0; } -static inline void csv_free_trans_mempool(void) { } +static inline void __init csv_hardware_setup(unsigned int max_csv_asid) { } +static inline void csv_hardware_unsetup(void) { } + static inline int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { return 1; } static inline diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 0c469487694f07fc9e040d7b05a89de37cbaef00..8f3b63627462fcdc47f3ecdc2189ed19b7021b2f 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2450,19 +2450,8 @@ void __init sev_hardware_setup(void) */ sev_install_hooks(); - if (sev_enabled) { - /* - * Allocate a memory pool to speed up live migration of - * the CSV/CSV2 guests. If the allocation fails, no - * acceleration is performed at live migration. - */ - csv_alloc_trans_mempool(); - /* - * Allocate a buffer to support reuse ASID, reuse ASID - * will not work if the allocation fails. - */ - csv_alloc_asid_userid_array(nr_asids); - } + if (sev_enabled) + csv_hardware_setup(max_sev_asid); } #endif @@ -2474,11 +2463,8 @@ void sev_hardware_unsetup(void) if (!sev_enabled) return; - /* Free the memory that allocated in sev_hardware_setup(). */ - if (is_x86_vendor_hygon()) { - csv_free_trans_mempool(); - csv_free_asid_userid_array(); - } + if (is_x86_vendor_hygon()) + csv_hardware_unsetup(); /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ sev_flush_asids(1, max_sev_asid); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 8b7736d096c32d3dfb23dcdcfe5fd046c401b14a..2bbeaa220802eaf1db921e89b4228c3e241dae37 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -1140,6 +1141,9 @@ static void svm_hardware_unsetup(void) __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), get_order(IOPM_SIZE)); iopm_base = 0; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + kvm_arch_hypercall_exit(); } static void init_seg(struct vmcb_seg *seg) @@ -5194,6 +5198,26 @@ static __init void svm_set_cpu_caps(void) sev_set_cpu_caps(); } +static int kvm_hygon_arch_hypercall(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64 a2, u64 a3) +{ + int ret = 0; + struct kvm_vpsp vpsp = { + .kvm = kvm, + .write_guest = kvm_write_guest, + .read_guest = kvm_read_guest + }; + switch (nr) { + case KVM_HC_PSP_OP: + ret = kvm_pv_psp_op(&vpsp, a0, a1, a2, a3); + break; + + default: + ret = -KVM_ENOSYS; + break; + } + return ret; +} + static __init int svm_hardware_setup(void) { int cpu; @@ -5362,6 +5386,9 @@ static __init int svm_hardware_setup(void) */ allow_smaller_maxphyaddr = !npt_enabled; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + kvm_arch_hypercall_init(kvm_hygon_arch_hypercall); + return 0; err: diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7b78155b3610601f6d6f91b16776e39bab224f7d..77d2c6caa6e554277865f6967464e187492ac637 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -320,6 +320,8 @@ u64 __read_mostly host_xcr0; static struct kmem_cache *x86_emulator_cache; +static int (*kvm_arch_hypercall)(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64 a2, u64 a3); + /* * When called, it means the previous get/set msr reached an invalid msr. * Return true if we want to ignore/silent this failed msr access. @@ -9891,7 +9893,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) } if (static_call(kvm_x86_get_cpl)(vcpu) != 0 && - !(is_x86_vendor_hygon() && nr == KVM_HC_VM_ATTESTATION)) { + !(is_x86_vendor_hygon() && (nr == KVM_HC_VM_ATTESTATION || nr == KVM_HC_PSP_OP))) { ret = -KVM_EPERM; goto out; } @@ -9928,6 +9930,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) kvm_sched_yield(vcpu, a0); ret = 0; break; + case KVM_HC_PSP_OP: + ret = -KVM_ENOSYS; + if (kvm_arch_hypercall) + ret = kvm_arch_hypercall(vcpu->kvm, nr, a0, a1, a2, a3); + break; case KVM_HC_MAP_GPA_RANGE: { u64 gpa = a0, npages = a1, attrs = a2; @@ -13711,6 +13718,18 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, } EXPORT_SYMBOL_GPL(kvm_sev_es_string_io); +void kvm_arch_hypercall_init(void *func) +{ + kvm_arch_hypercall = func; +} +EXPORT_SYMBOL_GPL(kvm_arch_hypercall_init); + +void kvm_arch_hypercall_exit(void) +{ + kvm_arch_hypercall = NULL; +} +EXPORT_SYMBOL_GPL(kvm_arch_hypercall_exit); + EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio); diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index 1873a65b5655786e9b05fcfdfc06fd9d4a17e6ee..f7d88ad030b9b6a6b3e1be331eda06cc2bd3d916 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "mm_internal.h" @@ -344,6 +345,14 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc); + /* + * On CSV3, the shared and private page attr changes should be managed + * by secure processor. Private pages live in isolated memory region, + * while shared pages live out of isolated memory region. + */ + if (csv3_active()) + csv_memory_enc_dec(vaddr, npages, enc); + return true; } @@ -377,6 +386,9 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) */ clflush_cache_range(__va(pa), size); + if (csv3_active()) + goto skip_in_place_enc_dec; + /* Encrypt/decrypt the contents in-place */ if (enc) { sme_early_encrypt(pa, size); @@ -390,6 +402,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) early_snp_set_memory_shared((unsigned long)__va(pa), pa, 1); } +skip_in_place_enc_dec: /* Change the page encryption mask. */ new_pte = pfn_pte(pfn, new_prot); set_pte_atomic(kpte, new_pte); @@ -469,6 +482,15 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr, early_set_mem_enc_dec_hypercall(start, size, enc); out: __flush_tlb_all(); + + /* + * On CSV3, the shared and private page attr changes should be managed + * by secure processor. Private pages live in isolated memory region, + * while shared pages live out of isolated memory region. + */ + if (csv3_active()) + csv_early_memory_enc_dec(vaddr_end - size, size, enc); + return ret; } diff --git a/arch/x86/mm/mem_encrypt_hygon.c b/arch/x86/mm/mem_encrypt_hygon.c index adcd970de30a73f742635d40fd847283f81bd836..1871850cbb6044af9e2ca3a8ab1f9c10b5527782 100644 --- a/arch/x86/mm/mem_encrypt_hygon.c +++ b/arch/x86/mm/mem_encrypt_hygon.c @@ -26,6 +26,10 @@ #include #include +u32 vendor_ebx __section(".data") = 0; +u32 vendor_ecx __section(".data") = 0; +u32 vendor_edx __section(".data") = 0; + void print_hygon_cc_feature_info(void) { /* Secure Memory Encryption */ @@ -45,6 +49,9 @@ void print_hygon_cc_feature_info(void) /* Encrypted Register State */ if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) pr_info(" HYGON CSV2"); + + if (csv3_active()) + pr_info(" HYGON CSV3"); } /* @@ -106,6 +113,24 @@ static bool __init __maybe_unused csv3_check_cpu_support(void) return !!me_mask && csv3_enabled; } +/* csv3_active() indicate whether the guest is protected by CSV3 */ +bool noinstr csv3_active(void) +{ + if (vendor_ebx == 0 || vendor_ecx == 0 || vendor_edx == 0) { + u32 eax = 0; + + native_cpuid(&eax, &vendor_ebx, &vendor_ecx, &vendor_edx); + } + + /* HygonGenuine */ + if (vendor_ebx == CPUID_VENDOR_HygonGenuine_ebx && + vendor_ecx == CPUID_VENDOR_HygonGenuine_ecx && + vendor_edx == CPUID_VENDOR_HygonGenuine_edx) + return !!(sev_status & MSR_CSV3_ENABLED); + else + return false; +} + /******************************************************************************/ /**************************** CSV3 CMA interfaces *****************************/ /******************************************************************************/ @@ -257,6 +282,7 @@ static void __init csv_cma_reserve_mem(void) 1 << CSV_CMA_SHIFT, node); break; } + cma_enable_concurrency(csv_cma->cma); if (start > cma_get_base(csv_cma->cma) || !start) start = cma_get_base(csv_cma->cma); diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 927088b2c3d3f2c60bd5c3fa9735ba90d2e0bdd3..301284e07603bb353c3c04d1128293f325b5d479 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -210,5 +210,29 @@ config TCG_FTPM_TEE help This driver proxies for firmware TPM running in TEE. +config TCG_HYGON + tristate "Hygon TPM Interface" + depends on ACPI + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_SP_PSP + default y + help + If you want to make Hygon TPM support available, say Yes and + it will be accessible from within Linux. To compile this + driver as a module, choose M here; the module will be called + tpm_hygon. + +config TCM_HYGON + tristate "Hygon TCM Interface" + depends on ACPI + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_SP_PSP + default y + help + If you want to make Hygon TCM support available, say Yes and + it will be accessible from within Linux. To compile this + driver as a module, choose M here; the module will be called + tcm_hygon. + source "drivers/char/tpm/st33zp24/Kconfig" endif # TCG_TPM diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 0222b1ddb3105e6726fa93e2ea3500d404964cbc..8f868c9b9ce78f53ec54620bec947f099fd13624 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -42,3 +42,5 @@ obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o obj-$(CONFIG_TCG_CRB) += tpm_crb.o obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o +obj-$(CONFIG_TCG_HYGON) += tpm_hygon.o +obj-$(CONFIG_TCM_HYGON) += tcm_hygon.o diff --git a/drivers/char/tpm/tcm_hygon.c b/drivers/char/tpm/tcm_hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..63f5e61d9b3e604130c09984635b9849a4246b9f --- /dev/null +++ b/drivers/char/tpm/tcm_hygon.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * The Hygon TCM2.0 device driver. + * + * Copyright (C) 2023 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tpm.h" + +#define TCM2PSP_CMD(id) (0x100 | (id)) +#define MAX_TCM_BUF_LEN 4096 + +struct tcm_hygon_priv { + u8 priv_buf[MAX_TCM_BUF_LEN]; +}; + +struct tcm_header_t { + __be16 tag; + __be32 length; + union { + __be32 ordinal; + __be32 return_code; + }; +} __packed; + +static int tcm_c_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret = 0; + struct tcm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + struct tcm_header_t *header = (void *)(priv->priv_buf + sizeof(u32) + sizeof(u32)); + u32 len = be32_to_cpu(header->length); + + if (len > count) { + ret = -E2BIG; + goto out; + } + + if (len > 0) + memmove(buf, (u8 *)header, len); + + ret = len; + +out: + return ret; +} + +static int tcm_c_send(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret, error; + struct tcm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + u32 buf_size = sizeof(priv->priv_buf); + u32 cmd_size = (u32)count; + u8 *p = priv->priv_buf; + + if (buf_size - sizeof(u32) - sizeof(u32) < count) { + ret = -E2BIG; + goto out; + } + + *(u32 *)p = cpu_to_be32(buf_size); + p += sizeof(buf_size); + *(u32 *)p = cpu_to_be32(cmd_size); + p += sizeof(cmd_size); + memmove(p, buf, count); + + ret = psp_do_cmd(TCM2PSP_CMD(0), priv->priv_buf, &error); + if (ret) { + pr_err("%s: psp do cmd error, %d\n", __func__, error); + ret = -EIO; + } + +out: + return ret; +} + +static const struct tpm_class_ops tcm_c_ops = { + .flags = TPM_OPS_AUTO_STARTUP, + .recv = tcm_c_recv, + .send = tcm_c_send, +}; + +static void tcm_bios_log_teardown(struct tpm_chip *chip) +{ + int i; + struct inode *inode; + + /* securityfs_remove currently doesn't take care of handling sync + * between removal and opening of pseudo files. To handle this, a + * workaround is added by making i_private = NULL here during removal + * and to check it during open(), both within inode_lock()/unlock(). + * This design ensures that open() either safely gets kref or fails. + */ + for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) { + if (chip->bios_dir[i]) { + inode = d_inode(chip->bios_dir[i]); + inode_lock(inode); + inode->i_private = NULL; + inode_unlock(inode); + securityfs_remove(chip->bios_dir[i]); + } + } +} + +static void tcm_chip_unregister(struct tpm_chip *chip) +{ + if (IS_ENABLED(CONFIG_HW_RANDOM_TPM)) + hwrng_unregister(&chip->hwrng); + tcm_bios_log_teardown(chip); + cdev_del(&chip->cdevs); + put_device(&chip->devs); + cdev_device_del(&chip->cdev, &chip->dev); +} + +static int hygon_tcm2_acpi_add(struct acpi_device *device) +{ + int ret; + struct tpm_chip *chip; + struct tcm_hygon_priv *priv; + struct device *dev = &device->dev; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err; + } + + chip = tpmm_chip_alloc(dev, &tcm_c_ops); + if (IS_ERR(chip)) { + pr_err("tcmm_chip_alloc fail\n"); + ret = PTR_ERR(chip); + goto err; + } + + ret = dev_set_name(&chip->dev, "tcm%d", chip->dev_num); + if (ret) { + pr_err("tcm device set name fail\n"); + goto err; + } + + dev_set_drvdata(&chip->dev, priv); + + chip->flags |= TPM_CHIP_FLAG_TPM2; + chip->flags |= TPM_CHIP_FLAG_IRQ; + + ret = tpm_chip_register(chip); + if (ret) { + pr_err("tcm chip_register fail\n"); + goto err; + } + + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + device_del(&chip->devs); + ret = dev_set_name(&chip->devs, "tcmrm%d", chip->dev_num); + if (ret) { + pr_err("tcmrm device set name fail\n"); + goto err_dev; + } + ret = device_add(&chip->devs); + if (ret) { + pr_err("devs add fail\n"); + goto err_dev; + } + } + + pr_info("Hygon TCM2 detected\n"); + + return 0; + +err_dev: + tcm_chip_unregister(chip); + +err: + return ret; +} + +static void hygon_tcm2_acpi_remove(struct acpi_device *device) +{ + struct device *dev = &device->dev; + struct tpm_chip *chip = dev_get_drvdata(dev); + + tpm_chip_unregister(chip); + + pr_info("Hygon TCM2 removed\n"); +} + +static SIMPLE_DEV_PM_OPS(tcm_hygon_pm, tpm_pm_suspend, tpm_pm_resume); + +static const struct acpi_device_id hygon_tcm2_device_ids[] = { + {"HYGT0201", 0}, + {"", 0}, +}; + +MODULE_DEVICE_TABLE(acpi, hygon_tcm2_device_ids); + +static struct acpi_driver hygon_tcm2_acpi_driver = { + .name = "tcm_hygon", + .ids = hygon_tcm2_device_ids, + .ops = { + .add = hygon_tcm2_acpi_add, + .remove = hygon_tcm2_acpi_remove, + }, + .drv = { + .pm = &tcm_hygon_pm, + }, +}; + +static int __init hygon_tcm2_init(void) +{ + return acpi_bus_register_driver(&hygon_tcm2_acpi_driver); +} + +static void __exit hygon_tcm2_exit(void) +{ + acpi_bus_unregister_driver(&hygon_tcm2_acpi_driver); +} + +/* + * hygon_tcm2_init must be done after ccp module init, but before + * ima module init. That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(hygon_tcm2_init); +module_exit(hygon_tcm2_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("mayuanchen (mayuanchen@hygon.cn)"); +MODULE_DESCRIPTION("TCM2 device driver for Hygon PSP"); diff --git a/drivers/char/tpm/tpm_hygon.c b/drivers/char/tpm/tpm_hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..8e509df90290e972ee57c7f13de5ac5a28788cba --- /dev/null +++ b/drivers/char/tpm/tpm_hygon.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * The Hygon TPM2.0 device driver. + * + * Copyright (C) 2020 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tpm.h" + +#define TPM2PSP_CMD(id) (0x100 | (id)) +#define MAX_TPM_BUF_LEN 4096 +#define MAX_CMD_BUF_LEN (MAX_TPM_BUF_LEN + sizeof(u32) + sizeof(u32)) + +struct tpm_hygon_priv { + u8 priv_buf[MAX_CMD_BUF_LEN]; +}; + +/* + * tpm header struct name is different in different kernel versions. + * so redefine it for driver porting. + */ +struct tpm_header_t { + __be16 tag; + __be32 length; + union { + __be32 ordinal; + __be32 return_code; + }; +} __packed; + +static int tpm_c_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret = 0; + struct tpm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + struct tpm_header_t *header = (void *)(priv->priv_buf + sizeof(u32) + sizeof(u32)); + u32 len = be32_to_cpu(header->length); + + if (len > count) { + ret = -E2BIG; + goto out; + } + + if (len > 0) + memmove(buf, (u8 *)header, len); + + ret = len; + +out: + return ret; +} + +static int tpm_c_send(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret, error; + struct tpm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + u32 buf_size = cpu_to_be32(sizeof(priv->priv_buf)); + u32 cmd_size = cpu_to_be32((u32)count); + u8 *p = priv->priv_buf; + + *(u32 *)p = buf_size; + p += sizeof(buf_size); + *(u32 *)p = cmd_size; + p += sizeof(cmd_size); + memmove(p, buf, count); + + ret = psp_do_cmd(TPM2PSP_CMD(0), priv->priv_buf, &error); + if (ret) { + pr_err("%s: sev do cmd error, %d\n", __func__, error); + ret = -EIO; + } + + return ret; +} + +static const struct tpm_class_ops tpm_c_ops = { + .flags = TPM_OPS_AUTO_STARTUP, + .recv = tpm_c_recv, + .send = tpm_c_send, +}; + +static int hygon_tpm2_acpi_add(struct acpi_device *device) +{ + int ret; + struct tpm_chip *chip; + struct tpm_hygon_priv *priv; + struct device *dev = &device->dev; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err; + } + + chip = tpmm_chip_alloc(dev, &tpm_c_ops); + if (IS_ERR(chip)) { + pr_err("tpmm_chip_alloc fail\n"); + ret = PTR_ERR(chip); + goto err; + } + + dev_set_drvdata(&chip->dev, priv); + + chip->flags |= TPM_CHIP_FLAG_TPM2; + chip->flags |= TPM_CHIP_FLAG_IRQ; + + ret = tpm_chip_register(chip); + if (ret) { + pr_err("tpm_chip_register fail\n"); + goto err; + } + + pr_info("Hygon TPM2 detected\n"); + + return 0; + +err: + return ret; +} + +static void hygon_tpm2_acpi_remove(struct acpi_device *device) +{ + struct device *dev = &device->dev; + struct tpm_chip *chip = dev_get_drvdata(dev); + + tpm_chip_unregister(chip); + + pr_info("Hygon TPM2 removed\n"); +} + +static SIMPLE_DEV_PM_OPS(tpm_hygon_pm, tpm_pm_suspend, tpm_pm_resume); + +static const struct acpi_device_id hygon_tpm2_device_ids[] = { + {"HYGT0101", 0}, + {"", 0}, +}; + +MODULE_DEVICE_TABLE(acpi, hygon_tpm2_device_ids); + +static struct acpi_driver hygon_tpm2_acpi_driver = { + .name = "tpm_hygon", + .ids = hygon_tpm2_device_ids, + .ops = { + .add = hygon_tpm2_acpi_add, + .remove = hygon_tpm2_acpi_remove, + }, + .drv = { + .pm = &tpm_hygon_pm, + }, +}; + +static int __init hygon_tpm2_init(void) +{ + return acpi_bus_register_driver(&hygon_tpm2_acpi_driver); +} + +static void __exit hygon_tpm2_exit(void) +{ + acpi_bus_unregister_driver(&hygon_tpm2_acpi_driver); +} + +/* + * hygon_tpm2_init must be done after ccp module init, but before + * ima module init. That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(hygon_tpm2_init); +module_exit(hygon_tpm2_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("mayuanchen (mayuanchen@hygon.cn)"); +MODULE_DESCRIPTION("TPM2 device driver for Hygon PSP"); diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 0e3c86d9427f18f8f0afa0baec59762f4a46004a..93b25e88c81b50de26ad24d3fb9969da45cfbaaf 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -80,3 +80,22 @@ config HYGON_PSP2CPU_CMD depends on CRYPTO_DEV_SP_PSP help Hygon PSP2CPU Command Support + +config TDM_DEV_HYGON + bool "Hygon TDM Interface" + default y + depends on CRYPTO_DEV_CCP_DD + depends on HYGON_PSP2CPU_CMD + help + Hygon TDM driver + +config TDM_KERNEL_GUARD + tristate "Hygon TDM kernel guard" + default y + depends on TDM_DEV_HYGON + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_SM3 + help + The key part of kernel is protected by TDM technology, SCT and IDT + are protected by default, and others are added later according to the + requirements. diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 5c338c72f8e6a0c1c76432d69f379c31e70b26b8..8bc2b02352832a633e72b01546498f55e1c53e52 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -17,7 +17,10 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ dbc.o \ hygon/psp-dev.o \ hygon/csv-dev.o \ - hygon/ring-buffer.o + hygon/ring-buffer.o \ + hygon/vpsp.o + +ccp-$(CONFIG_TDM_DEV_HYGON) += hygon/tdm-dev.o obj-$(CONFIG_CRYPTO_DEV_HCT) += hygon/hct.o @@ -30,6 +33,7 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-des3.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o +obj-$(CONFIG_TDM_KERNEL_GUARD) += hygon/tdm-kernel-guard.o $(obj)/ccp_sm2_sign.asn1.o: $(obj)/ccp_sm2_sign.asn1.c $(obj)/ccp_sm2_sign.asn1.h $(obj)/ccp-crypto-sm2-hygon.o: $(obj)/ccp_sm2_sign.asn1.h diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 2bf6d1801650c5a9c8dd954b3c618a16240a663b..93c17faef3e78ad93cf7c2a5c9a1ca1921f850bf 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -30,6 +30,20 @@ u32 hygon_csv_build; int csv_comm_mode = CSV_COMM_MAILBOX_ON; +/* defination of variabled used by virtual psp */ +enum VPSP_RB_CHECK_STATUS { + RB_NOT_CHECK = 0, + RB_CHECKING, + RB_CHECKED, + RB_CHECK_MAX +}; +#define VPSP_RB_IS_SUPPORTED(buildid) (buildid >= 1913) +#define VPSP_CMD_STATUS_RUNNING 0xffff +static DEFINE_MUTEX(vpsp_rb_mutex); +struct csv_ringbuffer_queue vpsp_ring_buffer[CSV_COMMAND_PRIORITY_NUM]; +static uint8_t vpsp_rb_supported; +static atomic_t vpsp_rb_check_status = ATOMIC_INIT(RB_NOT_CHECK); + /* * csv_update_api_version used to update the api version of HYGON CSV * firmwareat driver side. @@ -179,6 +193,7 @@ static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) void __user *argp = (void __user *)arg; struct sev_issue_cmd input; int ret = -EFAULT; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); if (!hygon_psp_hooks.sev_dev_hooks_installed) return -ENODEV; @@ -195,7 +210,13 @@ static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (input.cmd > CSV_MAX) return -EINVAL; - mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } switch (input.cmd) { case CSV_HGSC_CERT_IMPORT: @@ -217,14 +238,20 @@ static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) * Release the mutex before calling the native ioctl function * because it will acquires the mutex. */ - mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); return hygon_psp_hooks.sev_ioctl(file, ioctl, arg); } if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) ret = -EFAULT; - mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); return ret; } @@ -379,11 +406,18 @@ static int csv_do_ringbuf_cmds(int *psp_ret) { struct sev_user_data_status data; int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); if (!hygon_psp_hooks.sev_dev_hooks_installed) return -ENODEV; - mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } rc = __csv_ring_buffer_enter_locked(psp_ret); if (rc) @@ -396,7 +430,10 @@ static int csv_do_ringbuf_cmds(int *psp_ret) csv_comm_mode = CSV_COMM_MAILBOX_ON; cmd_unlock: - mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); return rc; } @@ -442,7 +479,7 @@ static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer /* the command queue will points to @cmd_ptr_buffer */ csv_queue_init(&ring_buffer->cmd_ptr, cmd_ptr_buffer, - CSV_RING_BUFFER_LEN, CSV_RING_BUFFER_ESIZE); + CSV_RING_BUFFER_SIZE, CSV_RING_BUFFER_ESIZE); stat_val_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); if (!stat_val_buffer) @@ -450,7 +487,7 @@ static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer /* the status queue will points to @stat_val_buffer */ csv_queue_init(&ring_buffer->stat_val, stat_val_buffer, - CSV_RING_BUFFER_LEN, CSV_RING_BUFFER_ESIZE); + CSV_RING_BUFFER_SIZE, CSV_RING_BUFFER_ESIZE); return 0; } @@ -653,7 +690,6 @@ int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error if (ret) dev_warn(sev->dev, "CSV3: fail to set secure memory region, CSV3 support unavailable\n"); - return ret; } @@ -667,3 +703,471 @@ int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error } #endif /* CONFIG_HYGON_CSV */ + +static int get_queue_tail(struct csv_ringbuffer_queue *ringbuffer) +{ + return ringbuffer->cmd_ptr.tail & ringbuffer->cmd_ptr.mask; +} + +static int get_queue_head(struct csv_ringbuffer_queue *ringbuffer) +{ + return ringbuffer->cmd_ptr.head & ringbuffer->cmd_ptr.mask; +} + +static void vpsp_set_cmd_status(int prio, int index, int status) +{ + struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; + + statval[index].status = status; +} + +static int vpsp_get_cmd_status(int prio, int index) +{ + struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; + + return statval[index].status; +} + +static unsigned int vpsp_queue_cmd_size(int prio) +{ + return csv_cmd_queue_size(&vpsp_ring_buffer[prio].cmd_ptr); +} + +static int vpsp_dequeue_cmd(int prio, int index, + struct csv_cmdptr_entry *cmd_ptr) +{ + mutex_lock(&vpsp_rb_mutex); + + /* The status update must be before the head update */ + vpsp_set_cmd_status(prio, index, 0); + csv_dequeue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, (void *)cmd_ptr, 1); + + mutex_unlock(&vpsp_rb_mutex); + + return 0; +} + +/* + * Populate the command from the virtual machine to the queue to + * support execution in ringbuffer mode + */ +static int vpsp_fill_cmd_queue(uint32_t vid, int prio, int cmd, void *data, uint16_t flags) +{ + struct csv_cmdptr_entry cmdptr = { }; + int index = -1; + + cmdptr.cmd_buf_ptr = PUT_PSP_VID(__psp_pa(data), vid); + cmdptr.cmd_id = cmd; + cmdptr.cmd_flags = flags; + + mutex_lock(&vpsp_rb_mutex); + index = get_queue_tail(&vpsp_ring_buffer[prio]); + + /* If status is equal to VPSP_CMD_STATUS_RUNNING, then the queue is full */ + if (vpsp_get_cmd_status(prio, index) == VPSP_CMD_STATUS_RUNNING) { + index = -1; + goto out; + } + + /* The status must be written first, and then the cmd can be enqueued */ + vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); + if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { + vpsp_set_cmd_status(prio, index, 0); + index = -1; + goto out; + } + +out: + mutex_unlock(&vpsp_rb_mutex); + return index; +} + +static void vpsp_ring_update_head(struct csv_ringbuffer_queue *ring_buffer, + uint32_t new_head) +{ + uint32_t orig_head = get_queue_head(ring_buffer); + uint32_t comple_num = 0; + + if (new_head >= orig_head) + comple_num = new_head - orig_head; + else + comple_num = ring_buffer->cmd_ptr.mask - (orig_head - new_head) + + 1; + + ring_buffer->cmd_ptr.head += comple_num; +} + +static int vpsp_ring_buffer_queue_init(void) +{ + int i; + int ret; + + for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ret = __csv_ring_buffer_queue_init(&vpsp_ring_buffer[i]); + if (ret) + return ret; + } + + return 0; +} + +static int vpsp_psp_mutex_trylock(void) +{ + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (is_vendor_hygon() && mutex_enabled) + return psp_mutex_trylock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + return mutex_trylock(hygon_psp_hooks.sev_cmd_mutex); +} + +static int vpsp_psp_mutex_unlock(void) +{ + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return 0; +} + +static int __vpsp_ring_buffer_enter_locked(int *error) +{ + int ret; + struct csv_data_ring_buffer *data; + struct csv_ringbuffer_queue *low_queue; + struct csv_ringbuffer_queue *hi_queue; + struct sev_device *sev = psp_master->sev_data; + + if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) + return -EEXIST; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + low_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + hi_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + + data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); + data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); + data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); + data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); + data->queue_lo_size = 1; + data->queue_hi_size = 1; + data->int_on_empty = 1; + + ret = hygon_psp_hooks.__sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); + if (!ret) { + iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + csv_comm_mode = CSV_COMM_RINGBUFFER_ON; + } + + kfree(data); + return ret; +} + +static int __vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +{ + struct psp_device *psp = psp_master; + unsigned int reg, ret = 0; + unsigned int rb_tail, rb_head; + unsigned int rb_ctl; + struct sev_device *sev; + + if (!psp) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* update rb tail */ + rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); + rb_tail |= (get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) + << PSP_RBTAIL_QHI_TAIL_SHIFT); + rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); + rb_tail |= get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); + iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + /* update rb head */ + rb_head = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + rb_head &= (~PSP_RBHEAD_QHI_HEAD_MASK); + rb_head |= (get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) + << PSP_RBHEAD_QHI_HEAD_SHIFT); + rb_head &= (~PSP_RBHEAD_QLO_HEAD_MASK); + rb_head |= get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); + iowrite32(rb_head, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + + /* update rb ctl to trigger psp irq */ + sev->int_rcvd = 0; + /* PSP response to x86 only when all queue is empty or error happends */ + rb_ctl = (PSP_RBCTL_X86_WRITES | PSP_RBCTL_RBMODE_ACT | PSP_RBCTL_CLR_INTSTAT); + iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for all commands in ring buffer completed */ + ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, (*hygon_psp_hooks.psp_timeout)*10); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(psp->dev, "csv command in ringbuffer mode timed out, disabling PSP\n"); + *hygon_psp_hooks.psp_dead = true; + return ret; + } + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + ret = -EFAULT; + + /* update head */ + vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH], + (reg & PSP_RBHEAD_QHI_HEAD_MASK) >> PSP_RBHEAD_QHI_HEAD_SHIFT); + vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW], + reg & PSP_RBHEAD_QLO_HEAD_MASK); + + if (psp_ret) + *psp_ret = vpsp_get_cmd_status(prio, index); + + return ret; +} + +static int vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +{ + struct sev_user_data_status data; + int rc; + + rc = __vpsp_ring_buffer_enter_locked(psp_ret); + if (rc) + goto end; + + rc = __vpsp_do_ringbuf_cmds_locked(psp_ret, prio, index); + + /* exit ringbuf mode by send CMD in mailbox mode */ + hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, + &data, NULL); + csv_comm_mode = CSV_COMM_MAILBOX_ON; + +end: + return rc; +} + +/** + * struct user_data_status - PLATFORM_STATUS command parameters + * + * @major: major API version + * @minor: minor API version + * @state: platform state + * @owner: self-owned or externally owned + * @chip_secure: ES or MP chip + * @fw_enc: is this FW is encrypted + * @fw_sign: is this FW is signed + * @config_es: platform config flags for csv-es + * @build: Firmware Build ID for this API version + * @bl_version_debug: Bootloader VERSION_DEBUG field + * @bl_version_minor: Bootloader VERSION_MINOR field + * @bl_version_major: Bootloader VERSION_MAJOR field + * @guest_count: number of active guests + * @reserved: should set to zero + */ +struct user_data_status { + uint8_t api_major; /* Out */ + uint8_t api_minor; /* Out */ + uint8_t state; /* Out */ + uint8_t owner : 1, /* Out */ + chip_secure : 1, /* Out */ + fw_enc : 1, /* Out */ + fw_sign : 1, /* Out */ + reserved1 : 4; /*reserved*/ + uint32_t config_es : 1, /* Out */ + build : 31; /* Out */ + uint32_t guest_count; /* Out */ +} __packed; + +/* + * Check whether the firmware supports ringbuffer mode and parse + * commands from the virtual machine + */ +static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, + struct vpsp_cmd *vcmd) +{ + int ret, error; + int rb_supported; + int rb_check_old = RB_NOT_CHECK; + struct user_data_status *status = NULL; + + if (atomic_try_cmpxchg(&vpsp_rb_check_status, &rb_check_old, + RB_CHECKING)) { + /* get buildid to check if the firmware supports ringbuffer mode */ + status = kzalloc(sizeof(*status), GFP_KERNEL); + if (!status) { + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + goto end; + } + ret = sev_platform_status((struct sev_user_data_status *)status, + &error); + if (ret) { + pr_warn("failed to get status[%#x], use default command mode.\n", error); + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + kfree(status); + goto end; + } + + /* check if the firmware supports the ringbuffer mode */ + if (VPSP_RB_IS_SUPPORTED(status->build)) { + if (vpsp_ring_buffer_queue_init()) { + pr_warn("vpsp_ring_buffer_queue_init fail, use default command mode\n"); + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + kfree(status); + goto end; + } + WRITE_ONCE(vpsp_rb_supported, 1); + } + + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + kfree(status); + } + +end: + rb_supported = READ_ONCE(vpsp_rb_supported); + /* parse prio by vcmd */ + if (rb_supported && vcmd->is_high_rb) + *prio = CSV_COMMAND_PRIORITY_HIGH; + else + *prio = CSV_COMMAND_PRIORITY_LOW; + /* clear rb level bit in vcmd */ + vcmd->is_high_rb = 0; + + return rb_supported; +} + +int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret); +/* + * Try to obtain the result again by the command index, this + * interface is used in ringbuffer mode + */ +int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, void *data, + struct vpsp_ret *psp_ret) +{ + int ret = 0; + struct csv_cmdptr_entry cmd = {0}; + + /* Get the retult directly if the command has been executed */ + if (index >= 0 && vpsp_get_cmd_status(prio, index) != + VPSP_CMD_STATUS_RUNNING) { + psp_ret->pret = vpsp_get_cmd_status(prio, index); + psp_ret->status = VPSP_FINISH; + return 0; + } + + if (vpsp_psp_mutex_trylock()) { + /* Use mailbox mode to execute a command if there is only one command */ + if (vpsp_queue_cmd_size(prio) == 1) { + /* dequeue command from queue*/ + vpsp_dequeue_cmd(prio, index, &cmd); + + ret = __vpsp_do_cmd_locked(vid, cmd.cmd_id, data, + (int *)psp_ret); + psp_ret->status = VPSP_FINISH; + vpsp_psp_mutex_unlock(); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + } else { + ret = vpsp_do_ringbuf_cmds_locked((int *)psp_ret, prio, + index); + psp_ret->status = VPSP_FINISH; + vpsp_psp_mutex_unlock(); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed %d\n", + __func__, ret); + goto end; + } + } + } else { + /* Change the command to the running state if getting the mutex fails */ + psp_ret->index = index; + psp_ret->status = VPSP_RUNNING; + return 0; + } +end: + return ret; +} +EXPORT_SYMBOL_GPL(vpsp_try_get_result); + +/* + * Send the virtual psp command to the PSP device and try to get the + * execution result, the interface and the vpsp_try_get_result + * interface are executed asynchronously. If the execution succeeds, + * the result is returned to the VM. If the execution fails, the + * vpsp_try_get_result interface will be used to obtain the result + * later again + */ +int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) +{ + int ret = 0; + int rb_supported; + int index = -1; + uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + + /* ringbuffer mode check and parse command prio*/ + rb_supported = vpsp_rb_check_and_cmd_prio_parse(&prio, + (struct vpsp_cmd *)&cmd); + if (rb_supported) { + /* fill command in ringbuffer's queue and get index */ + index = vpsp_fill_cmd_queue(vid, prio, cmd, data, 0); + if (unlikely(index < 0)) { + /* do mailbox command if queuing failed*/ + ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + psp_ret->status = VPSP_FINISH; + goto end; + } + + /* try to get result from the ringbuffer command */ + ret = vpsp_try_get_result(vid, prio, index, data, psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_try_get_result failed %d\n", __func__, ret); + goto end; + } + } else { + /* mailbox mode */ + ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + psp_ret->status = VPSP_FINISH; + } + +end: + return ret; +} +EXPORT_SYMBOL_GPL(vpsp_try_do_cmd); diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index 7fb2b2e368eb9e1dd7c4f207dccd9b689354ca27..aff04b77477d8065800ae39989bd2c8a7538f8a2 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -14,12 +14,420 @@ #include #include #include +#include +#include +#include +#include #include "psp-dev.h" /* Function and variable pointers for hooks */ struct hygon_psp_hooks_table hygon_psp_hooks; +static struct psp_misc_dev *psp_misc; +#define HYGON_PSP_IOC_TYPE 'H' +enum HYGON_PSP_OPCODE { + HYGON_PSP_MUTEX_ENABLE = 1, + HYGON_PSP_MUTEX_DISABLE, + HYGON_VPSP_CTRL_OPT, + HYGON_PSP_OPCODE_MAX_NR, +}; + +enum VPSP_DEV_CTRL_OPCODE { + VPSP_OP_VID_ADD, + VPSP_OP_VID_DEL, + VPSP_OP_SET_DEFAULT_VID_PERMISSION, + VPSP_OP_GET_DEFAULT_VID_PERMISSION, +}; + +struct vpsp_dev_ctrl { + unsigned char op; + union { + unsigned int vid; + // Set or check the permissions for the default VID + unsigned int def_vid_perm; + unsigned char reserved[128]; + } data; +}; + +uint64_t atomic64_exchange(volatile uint64_t *dst, uint64_t val) +{ +#if 0 + asm volatile( + "lock ; " + "xchgq %0, %1;" + : "=r" (val), "=m" (*dst) + : "0" (val), "m" (*dst) + : "memory"); /* no-clobber list */ + return val; +#endif + return xchg(dst, val); +} + +int psp_mutex_init(struct psp_mutex *mutex) +{ + if (!mutex) + return -1; + mutex->locked = 0; + return 0; +} + +int psp_mutex_trylock(struct psp_mutex *mutex) +{ + if (atomic64_exchange(&mutex->locked, 1)) + return 0; + else + return 1; +} + +int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms) +{ + int ret = 0; + unsigned long je; + + je = jiffies + msecs_to_jiffies(ms); + do { + if (psp_mutex_trylock(mutex)) { + ret = 1; + break; + } + } while ((ms == 0) || time_before(jiffies, je)); + + return ret; +} +EXPORT_SYMBOL_GPL(psp_mutex_lock_timeout); + +int psp_mutex_unlock(struct psp_mutex *mutex) +{ + if (!mutex) + return -1; + + atomic64_exchange(&mutex->locked, 0); + return 0; +} +EXPORT_SYMBOL_GPL(psp_mutex_unlock); + +static int mmap_psp(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long page; + + page = virt_to_phys((void *)psp_misc->data_pg_aligned) >> PAGE_SHIFT; + + if (remap_pfn_range(vma, vma->vm_start, page, (vma->vm_end - vma->vm_start), + vma->vm_page_prot)) { + printk(KERN_INFO "remap failed..."); + return -1; + } + vm_flags_mod(vma, VM_DONTDUMP|VM_DONTEXPAND, 0); + printk(KERN_INFO "remap_pfn_rang page:[%lu] ok.\n", page); + return 0; +} + +static ssize_t read_psp(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + ssize_t remaining; + + if ((*ppos + count) > PAGE_SIZE) { + printk(KERN_INFO "%s: invalid address range, pos %llx, count %lx\n", + __func__, *ppos, count); + return -EFAULT; + } + + remaining = copy_to_user(buf, (char *)psp_misc->data_pg_aligned + *ppos, count); + if (remaining) + return -EFAULT; + *ppos += count; + + return count; +} + +static ssize_t write_psp(struct file *file, const char __user *buf, size_t count, loff_t *ppos) +{ + ssize_t remaining, written; + + if ((*ppos + count) > PAGE_SIZE) { + printk(KERN_INFO "%s: invalid address range, pos %llx, count %lx\n", + __func__, *ppos, count); + return -EFAULT; + } + + remaining = copy_from_user((char *)psp_misc->data_pg_aligned + *ppos, buf, count); + written = count - remaining; + if (!written) + return -EFAULT; + + *ppos += written; + + return written; +} +DEFINE_RWLOCK(vpsp_rwlock); + +/* VPSP_VID_MAX_ENTRIES determines the maximum number of vms that can set vid. + * but, the performance of finding vid is determined by g_vpsp_vid_num, + * so VPSP_VID_MAX_ENTRIES can be set larger. + */ +#define VPSP_VID_MAX_ENTRIES 2048 +#define VPSP_VID_NUM_MAX 64 + +struct vpsp_vid_entry { + uint32_t vid; + pid_t pid; +}; +static struct vpsp_vid_entry g_vpsp_vid_array[VPSP_VID_MAX_ENTRIES]; +static uint32_t g_vpsp_vid_num; +static int compare_vid_entries(const void *a, const void *b) +{ + return ((struct vpsp_vid_entry *)a)->pid - ((struct vpsp_vid_entry *)b)->pid; +} +static void swap_vid_entries(void *a, void *b, int size) +{ + struct vpsp_vid_entry entry; + + memcpy(&entry, a, size); + memcpy(a, b, size); + memcpy(b, &entry, size); +} + +/** + * When 'allow_default_vid' is set to 1, + * QEMU is allowed to use 'vid 0' by default + * in the absence of a valid 'vid' setting. + */ +uint32_t allow_default_vid = 1; +void vpsp_set_default_vid_permission(uint32_t is_allow) +{ + allow_default_vid = is_allow; +} + +int vpsp_get_default_vid_permission(void) +{ + return allow_default_vid; +} +EXPORT_SYMBOL_GPL(vpsp_get_default_vid_permission); + +/** + * When the virtual machine executes the 'tkm' command, + * it needs to retrieve the corresponding 'vid' + * by performing a binary search using 'kvm->userspace_pid'. + */ +int vpsp_get_vid(uint32_t *vid, pid_t pid) +{ + struct vpsp_vid_entry new_entry = {.pid = pid}; + struct vpsp_vid_entry *existing_entry = NULL; + + read_lock(&vpsp_rwlock); + existing_entry = bsearch(&new_entry, g_vpsp_vid_array, g_vpsp_vid_num, + sizeof(struct vpsp_vid_entry), compare_vid_entries); + read_unlock(&vpsp_rwlock); + + if (!existing_entry) + return -ENOENT; + if (vid) { + *vid = existing_entry->vid; + pr_debug("PSP: %s %d, by pid %d\n", __func__, *vid, pid); + } + return 0; +} +EXPORT_SYMBOL_GPL(vpsp_get_vid); + +/** + * Upon qemu startup, this section checks whether + * the '-device psp,vid' parameter is specified. + * If set, it utilizes the 'vpsp_add_vid' function + * to insert the 'vid' and 'pid' values into the 'g_vpsp_vid_array'. + * The insertion is done in ascending order of 'pid'. + */ +static int vpsp_add_vid(uint32_t vid) +{ + pid_t cur_pid = task_pid_nr(current); + struct vpsp_vid_entry new_entry = {.vid = vid, .pid = cur_pid}; + + if (vpsp_get_vid(NULL, cur_pid) == 0) + return -EEXIST; + if (g_vpsp_vid_num == VPSP_VID_MAX_ENTRIES) + return -ENOMEM; + if (vid >= VPSP_VID_NUM_MAX) + return -EINVAL; + + write_lock(&vpsp_rwlock); + memcpy(&g_vpsp_vid_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_vid_entry)); + sort(g_vpsp_vid_array, g_vpsp_vid_num, sizeof(struct vpsp_vid_entry), + compare_vid_entries, swap_vid_entries); + pr_info("PSP: add vid %d, by pid %d, total vid num is %d\n", vid, cur_pid, g_vpsp_vid_num); + write_unlock(&vpsp_rwlock); + return 0; +} + +/** + * Upon the virtual machine is shut down, + * the 'vpsp_del_vid' function is employed to remove + * the 'vid' associated with the current 'pid'. + */ +static int vpsp_del_vid(void) +{ + pid_t cur_pid = task_pid_nr(current); + int i, ret = -ENOENT; + + write_lock(&vpsp_rwlock); + for (i = 0; i < g_vpsp_vid_num; ++i) { + if (g_vpsp_vid_array[i].pid == cur_pid) { + --g_vpsp_vid_num; + pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n", + g_vpsp_vid_array[i].vid, cur_pid, g_vpsp_vid_num); + memcpy(&g_vpsp_vid_array[i], &g_vpsp_vid_array[i + 1], + sizeof(struct vpsp_vid_entry) * (g_vpsp_vid_num - i)); + ret = 0; + goto end; + } + } + +end: + write_unlock(&vpsp_rwlock); + return ret; +} + +static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) +{ + int ret = 0; + unsigned char op = ctrl->op; + + switch (op) { + case VPSP_OP_VID_ADD: + ret = vpsp_add_vid(ctrl->data.vid); + break; + + case VPSP_OP_VID_DEL: + ret = vpsp_del_vid(); + break; + + case VPSP_OP_SET_DEFAULT_VID_PERMISSION: + vpsp_set_default_vid_permission(ctrl->data.def_vid_perm); + break; + + case VPSP_OP_GET_DEFAULT_VID_PERMISSION: + ctrl->data.def_vid_perm = vpsp_get_default_vid_permission(); + break; + + default: + ret = -EINVAL; + break; + } + return ret; +} + +static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) +{ + unsigned int opcode = 0; + struct vpsp_dev_ctrl vpsp_ctrl_op; + int ret = -EFAULT; + + if (_IOC_TYPE(ioctl) != HYGON_PSP_IOC_TYPE) { + printk(KERN_INFO "%s: invalid ioctl type: 0x%x\n", __func__, _IOC_TYPE(ioctl)); + return -EINVAL; + } + opcode = _IOC_NR(ioctl); + switch (opcode) { + case HYGON_PSP_MUTEX_ENABLE: + psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, 0); + // And get the sev lock to make sure no one is using it now. + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + hygon_psp_hooks.psp_mutex_enabled = 1; + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + // Wait 10ms just in case someone is right before getting the psp lock. + mdelay(10); + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + ret = 0; + break; + + case HYGON_PSP_MUTEX_DISABLE: + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + // And get the psp lock to make sure no one is using it now. + psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, 0); + hygon_psp_hooks.psp_mutex_enabled = 0; + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + // Wait 10ms just in case someone is right before getting the sev lock. + mdelay(10); + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + ret = 0; + break; + + case HYGON_VPSP_CTRL_OPT: + if (copy_from_user(&vpsp_ctrl_op, (void __user *)arg, + sizeof(struct vpsp_dev_ctrl))) + return -EFAULT; + ret = do_vpsp_op_ioctl(&vpsp_ctrl_op); + if (!ret && copy_to_user((void __user *)arg, &vpsp_ctrl_op, + sizeof(struct vpsp_dev_ctrl))) + return -EFAULT; + break; + + default: + printk(KERN_INFO "%s: invalid ioctl number: %d\n", __func__, opcode); + return -EINVAL; + } + return ret; +} + +static const struct file_operations psp_fops = { + .owner = THIS_MODULE, + .mmap = mmap_psp, + .read = read_psp, + .write = write_psp, + .unlocked_ioctl = ioctl_psp, +}; + +int hygon_psp_additional_setup(struct sp_device *sp) +{ + struct device *dev = sp->dev; + int ret = 0; + + if (!psp_misc) { + struct miscdevice *misc; + + psp_misc = devm_kzalloc(dev, sizeof(*psp_misc), GFP_KERNEL); + if (!psp_misc) + return -ENOMEM; + psp_misc->data_pg_aligned = (struct psp_dev_data *)get_zeroed_page(GFP_KERNEL); + if (!psp_misc->data_pg_aligned) { + dev_err(dev, "alloc psp data page failed\n"); + devm_kfree(dev, psp_misc); + psp_misc = NULL; + return -ENOMEM; + } + SetPageReserved(virt_to_page(psp_misc->data_pg_aligned)); + psp_mutex_init(&psp_misc->data_pg_aligned->mb_mutex); + + *(uint32_t *)((void *)psp_misc->data_pg_aligned + 8) = 0xdeadbeef; + misc = &psp_misc->misc; + misc->minor = MISC_DYNAMIC_MINOR; + misc->name = "hygon_psp_config"; + misc->fops = &psp_fops; + + ret = misc_register(misc); + if (ret) + return ret; + kref_init(&psp_misc->refcount); + hygon_psp_hooks.psp_misc = psp_misc; + } else { + kref_get(&psp_misc->refcount); + } + + return ret; +} +EXPORT_SYMBOL_GPL(hygon_psp_additional_setup); + +void hygon_psp_exit(struct kref *ref) +{ + struct psp_misc_dev *misc_dev = container_of(ref, struct psp_misc_dev, refcount); + + misc_deregister(&misc_dev->misc); + ClearPageReserved(virt_to_page(misc_dev->data_pg_aligned)); + free_page((unsigned long)misc_dev->data_pg_aligned); + psp_misc = NULL; + hygon_psp_hooks.psp_misc = NULL; +} +EXPORT_SYMBOL_GPL(hygon_psp_exit); + int fixup_hygon_psp_caps(struct psp_device *psp) { /* the hygon psp is unavailable if bit0 is cleared in feature reg */ @@ -93,13 +501,114 @@ static int __psp_do_cmd_locked(int cmd, void *data, int *psp_ret) return ret; } -int psp_do_cmd(int cmd, void *data, int *psp_ret) +int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + phys_addr_t phys_addr; + unsigned int phys_lsb, phys_msb; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + if (data && WARN_ON_ONCE(!virt_addr_valid(data))) + return -EINVAL; + + /* Get the physical address of the command buffer */ + phys_addr = PUT_PSP_VID(__psp_pa(data), vid); + phys_lsb = data ? lower_32_bits(phys_addr) : 0; + phys_msb = data ? upper_32_bits(phys_addr) : 0; + + dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout); + + print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, + hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); + + iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + sev->int_rcvd = 0; + + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for command completion */ + ret = hygon_psp_hooks.sev_wait_cmd_ioc(sev, ®, *hygon_psp_hooks.psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); + *hygon_psp_hooks.psp_dead = true; + + return ret; + } + + *hygon_psp_hooks.psp_timeout = *hygon_psp_hooks.psp_cmd_timeout; + + if (psp_ret) + *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); + + if (FIELD_GET(PSP_CMDRESP_STS, reg)) { + dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", + cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); + ret = -EIO; + } + + print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, + hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); + + return ret; +} + +int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) { int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) { + return -EBUSY; + } + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } + + rc = __vpsp_do_cmd_locked(vid, cmd, data, psp_ret); - mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return rc; +} + +int psp_do_cmd(int cmd, void *data, int *psp_ret) +{ + int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } rc = __psp_do_cmd_locked(cmd, data, psp_ret); - mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); return rc; } diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index 480b3c36a002ea561a5110f4ae6a3dcc786abc4e..abfc8fcb29c7b0d591ce07b37743a777283c8dfc 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -12,6 +12,7 @@ #include #include +#include #include "sp-dev.h" @@ -30,6 +31,8 @@ extern struct hygon_psp_hooks_table { bool sev_dev_hooks_installed; struct mutex *sev_cmd_mutex; + struct psp_misc_dev *psp_misc; + bool psp_mutex_enabled; bool *psp_dead; int *psp_timeout; int *psp_cmd_timeout; @@ -43,6 +46,27 @@ extern struct hygon_psp_hooks_table { long (*sev_ioctl)(struct file *file, unsigned int ioctl, unsigned long arg); } hygon_psp_hooks; +#define PSP_MUTEX_TIMEOUT 60000 +struct psp_mutex { + volatile uint64_t locked; +}; + +struct psp_dev_data { + struct psp_mutex mb_mutex; +}; + +struct psp_misc_dev { + struct kref refcount; + struct psp_dev_data *data_pg_aligned; + struct miscdevice misc; +}; + +extern int psp_mutex_trylock(struct psp_mutex *mutex); + +int hygon_psp_additional_setup(struct sp_device *sp); +void hygon_psp_exit(struct kref *ref); +int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms); +int psp_mutex_unlock(struct psp_mutex *mutex); int fixup_hygon_psp_caps(struct psp_device *psp); int sp_request_hygon_psp_irq(struct sp_device *sp, irq_handler_t handler, const char *name, void *data); diff --git a/drivers/crypto/ccp/hygon/ring-buffer.c b/drivers/crypto/ccp/hygon/ring-buffer.c index 93402b13b93a3d6b75f5d0dd519f26f31b4699cb..0c9ea0217b2ea1a39e2b02030af0e293e8f13790 100644 --- a/drivers/crypto/ccp/hygon/ring-buffer.c +++ b/drivers/crypto/ccp/hygon/ring-buffer.c @@ -28,6 +28,7 @@ static void enqueue_data(struct csv_queue *queue, unsigned int l; void *data; + off &= queue->mask; if (esize != 1) { off *= esize; size *= esize; @@ -128,3 +129,25 @@ unsigned int csv_dequeue_stat(struct csv_queue *queue, queue->head += len; return len; } + +unsigned int csv_dequeue_cmd(struct csv_queue *ring_buf, + void *buf, unsigned int len) +{ + unsigned int size; + + size = ring_buf->tail - ring_buf->head; + if (len > size) + len = size; + + dequeue_data(ring_buf, buf, len, ring_buf->head); + ring_buf->head += len; + return len; +} + +unsigned int csv_cmd_queue_size(struct csv_queue *ring_buf) +{ + unsigned int free_size; + + free_size = queue_avail_size(ring_buf); + return ring_buf->mask - free_size; +} diff --git a/drivers/crypto/ccp/hygon/ring-buffer.h b/drivers/crypto/ccp/hygon/ring-buffer.h index 2c99ade0251291ef017c28640a255cb92d64b55b..bf97aa6df36a2e66b4e306dc1bf253b73a07ce70 100644 --- a/drivers/crypto/ccp/hygon/ring-buffer.h +++ b/drivers/crypto/ccp/hygon/ring-buffer.h @@ -19,5 +19,9 @@ unsigned int csv_enqueue_cmd(struct csv_queue *queue, const void *buf, unsigned int len); unsigned int csv_dequeue_stat(struct csv_queue *queue, void *buf, unsigned int len); +unsigned int csv_dequeue_cmd(struct csv_queue *ring_buf, + void *buf, unsigned int len); + +unsigned int csv_cmd_queue_size(struct csv_queue *ring_buf); #endif /* __CCP_HYGON_RINGBUF_H__ */ diff --git a/drivers/crypto/ccp/hygon/tdm-dev.c b/drivers/crypto/ccp/hygon/tdm-dev.c new file mode 100644 index 0000000000000000000000000000000000000000..71ab3f6caaaba0eba54288e3ef482b93bd2b61aa --- /dev/null +++ b/drivers/crypto/ccp/hygon/tdm-dev.c @@ -0,0 +1,1594 @@ +/* + * The Hygon TDM CPU-to-PSP communication driver + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tdm-dev.h" + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) "tdm: " fmt + +#define TDM_CMD_ID_MAX 16 +#define TDM2PSP_CMD(id) (0x110 | (id)) +#define TDM_P2C_CMD_ID 1 +#define TDM_C2P_CMD_SIZE (3*PAGE_SIZE) +#define TDM_KFIFO_SIZE 1024 + +#define TDM_IOC_TYPE 'D' +#define TDM_CMD_LEN_LIMIT (1U << 12) + +struct context_message { + uint32_t flag; + uint32_t pid; + uint8_t comm[16]; + uint8_t module_name[64]; +}; + +struct tdm_task_head { + struct list_head head; + rwlock_t lock; +}; + +struct tdm_task_ctx { + uint32_t task_id; + uint32_t cmd_ctx_flag; + measure_exception_handler_t handler; + struct list_head list; +}; + +static struct tdm_task_head dyn_head; +static unsigned int p2c_cmd_id = TDM_P2C_CMD_ID; +static struct task_struct *kthread; +static DECLARE_KFIFO(kfifo_error_task, unsigned char, TDM_KFIFO_SIZE); +static spinlock_t kfifo_lock; +static int tdm_support; +static int tdm_init_flag; +static int tdm_destroy_flag; + +static int list_check_exist(uint32_t task_id) +{ + int found = 0; + struct list_head *head = NULL; + rwlock_t *lock = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) { + found = 1; + break; + } + } + read_unlock(lock); + + return found; +} + +static int list_enqueue(void *entry) +{ + int ret = 0; + struct list_head *head, *entry_list = NULL; + rwlock_t *lock = NULL; + + if (!entry) { + ret = -DYN_NULL_POINTER; + pr_err("Null pointer\n"); + goto end; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + entry_list = &(((struct tdm_task_ctx *)entry)->list); + + write_lock(lock); + if (entry_list) + list_add_tail(entry_list, head); + write_unlock(lock); + +end: + return 0; +} + +static __maybe_unused int list_print(void) +{ + struct list_head *head = NULL; + rwlock_t *lock = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + pr_info("id: %d ", task_node->task_id); + } + read_unlock(lock); + pr_info("\n"); + + return 0; +} + +static int measure_exception_handling_thread(void *data) +{ + int ret = 0; + int copied = 0; + uint32_t error_task_id = 0xffffffff; + struct measure_status task_measure_status; + struct list_head *head = NULL; + rwlock_t *lock = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + + head = &dyn_head.head; + lock = &dyn_head.lock; + + pr_info("Thread started for measurement exception handler dispatching...\n"); + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + + while (!kfifo_is_empty(&kfifo_error_task)) { + copied = kfifo_out_spinlocked(&kfifo_error_task, + (unsigned char *)&error_task_id, sizeof(uint32_t), &kfifo_lock); + if (copied != sizeof(uint32_t)) { + ret = -DYN_ERR_API; + pr_err("kfifio_out exception,return\n"); + goto end; + } + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == error_task_id) + break; + } + read_unlock(lock); + + if (!task_node) { + ret = -DYN_NULL_POINTER; + pr_err("task_node is null,return\n"); + goto end; + } + + if (task_node->task_id == error_task_id) { + if (task_node->handler) { + pr_info("-----Measurement exception handler dispatching " + "thread------\n"); + pr_info("Measurement exception received for task %d\n", + error_task_id); + pr_info("Step1: Query PSP for task %d status to confirm " + "the error.\n", error_task_id); + pr_info("Step2: Error confirmed, CALL measurement " + "exception handler.\n"); + ret = psp_query_measure_status(error_task_id, + &task_measure_status); + if (ret) { + pr_err("task_id %d status query failed\n", + error_task_id); + goto end; + } + + if (task_measure_status.error == MER_ERR) { + /*error--1 normal--0 */ + pr_info("Error detected for task %d, " + "action TODO!\n", error_task_id); + pr_info("----Measurement exception handler----\n"); + task_node->handler(error_task_id); + pr_info("Exit measurement exception handler.\n"); + } else { + pr_info("No error detected for task %d, please " + "check it again!\n", error_task_id); + } + } else { + pr_err("task %d's callback function is not registered, " + "please check it\n", error_task_id); + } + } + } + } +end: + return ret; +} + +static int tdm_interrupt_handler(uint32_t id, uint64_t data) +{ + if (kthread) { + kfifo_in_spinlocked(&kfifo_error_task, (unsigned char *)&data, sizeof(uint32_t), + &kfifo_lock); + wake_up_process(kthread); + } + + return 0; +} + +static int tdm_do_cmd(unsigned int cmd_id, void *cmd_data, int *error) +{ + if (cmd_id >= TDM_CMD_ID_MAX) { + pr_err("%s cmd_id %u beyond limit\n", __func__, cmd_id); + return -DYN_BEYOND_MAX; + } + + return psp_do_cmd(TDM2PSP_CMD(cmd_id), cmd_data, error); +} + +static int calc_task_context_hash(struct context_message context_msg, uint8_t *hash) +{ + int ret = 0; + struct crypto_shash *shash = NULL; + + if (!hash) { + ret = -DYN_NULL_POINTER; + pr_err("Null pointer\n"); + goto end; + } + + shash = crypto_alloc_shash("sha256", 0, 0); + if (IS_ERR(shash)) { + pr_err("can't alloc hash\n"); + return -DYN_ERR_API; + } + + { + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + + ret = crypto_shash_init(sdesc); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_init failed\n"); + goto end; + } + + if (context_msg.flag & CONTEXT_CHECK_PID) { + ret = crypto_shash_update(sdesc, (uint8_t *)&context_msg.pid, + sizeof(context_msg.pid)); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_update failed\n"); + goto free_shash; + } + } + + if (context_msg.flag & CONTEXT_CHECK_COMM) { + ret = crypto_shash_update(sdesc, context_msg.comm, + strlen(context_msg.comm)); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_update failed\n"); + goto free_shash; + } + } + + if (context_msg.flag & CONTEXT_CHECK_MODNAME) { + ret = crypto_shash_update(sdesc, context_msg.module_name, + strlen(context_msg.module_name)); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_update failed\n"); + goto free_shash; + } + } + + ret = crypto_shash_final(sdesc, hash); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_final failed\n"); + goto free_shash; + } + } + +free_shash: + crypto_free_shash(shash); +end: + return ret; +} + +static int tdm_get_cmd_context_hash(uint32_t flag, uint8_t *hash) +{ + int ret = 0; + struct context_message ctx_msg = {0}; + unsigned long return_address = 0; +#if IS_BUILTIN(CONFIG_CRYPTO_DEV_CCP_DD) + struct module *p_module = NULL; +#elif IS_ENABLED(CONFIG_KALLSYMS) + char symbol_buf[128] = {0}; + int symbol_len = 0; + char *symbol_begin = NULL; + char *symbol_end = NULL; +#endif + + if (!hash) { + ret = -DYN_NULL_POINTER; + pr_err("Null pointer\n"); + goto end; + } + + ctx_msg.flag = flag; + ctx_msg.pid = current->pid; + memcpy(ctx_msg.comm, current->comm, sizeof(current->comm)); + + return_address = CALLER_ADDR1; + if (return_address) { +#if IS_BUILTIN(CONFIG_CRYPTO_DEV_CCP_DD) + p_module = __module_address(return_address); + // caller is module + if (p_module) + memcpy(ctx_msg.module_name, p_module->name, sizeof(p_module->name)); + // caller is build-in + else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); +#elif IS_ENABLED(CONFIG_KALLSYMS) + symbol_len = sprint_symbol((char *)symbol_buf, return_address); + if (!symbol_len) { + ret = -DYN_ERR_API; + pr_err("sprint_symbol failed\n"); + goto end; + } + symbol_begin = strchr((char *)symbol_buf, '['); + if (!symbol_begin) { + ret = -DYN_NULL_POINTER; + pr_err("module name is not exist\n"); + goto end; + } + symbol_end = strchr((char *)symbol_buf, ']'); + if (!symbol_end) { + ret = -DYN_NULL_POINTER; + pr_err("module name is not exist\n"); + goto end; + } + symbol_begin++; + if (symbol_end - symbol_begin) + memcpy(ctx_msg.module_name, symbol_begin, symbol_end - symbol_begin); + else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); +#else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); +#endif + } else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); + + ret = calc_task_context_hash(ctx_msg, hash); + if (ret) { + pr_err("calc_task_context_hash failed\n"); + goto end; + } + +end: + return ret; +} + +static int tdm_verify_phy_addr_valid(struct addr_range_info *range) +{ + int ret = 0; +#if IS_BUILTIN(CONFIG_CRYPTO_DEV_CCP_DD) + int i; + uint64_t phy_addr_start, phy_addr_end; + + for (i = 0; i < range->count; i++) { + phy_addr_start = __sme_clr(range->addr[i].addr_start); + phy_addr_end = __sme_clr(range->addr[i].addr_start + range->addr[i].length); + + if ((PHYS_PFN(phy_addr_start) >= max_pfn) || (PHYS_PFN(phy_addr_end) >= max_pfn)) { + pr_err("phy_addr or length beyond max_pfn\n"); + ret = -DYN_ERR_MEM; + break; + } + } +#else + pr_warn("TDM: Can't get max_pfn, skip physical address check\n"); +#endif + + return ret; +} + +/* Convert the virtual address to physics address,then judge whether it is + * continuous physics memory + */ +static int ptable_virt_to_phy(uint64_t vaddr, struct addr_info *p_addr_info, uint64_t *left_convert) +{ + int ret = 0; + unsigned int level = 0; + pte_t *pte; + uint64_t local_page_mask = 0; + uint64_t local_page_size = 0; + uint64_t now_base = vaddr; + uint64_t last_phy_addr = 0; + uint64_t last_phy_len = 0; + uint64_t now_phy_addr = 0; + + pte = lookup_address(now_base, &level); + if (!pte) { + ret = -DYN_ERR_MEM; + pr_err("lookup_address failed!\n"); + goto end; + } + + local_page_size = page_level_size(level); + local_page_mask = page_level_mask(level); + + switch (level) { + case PG_LEVEL_4K: + p_addr_info->addr_start = (uint64_t)((pte_val(*pte) & local_page_mask & ~_PAGE_NX) + + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_2M: + p_addr_info->addr_start = (uint64_t)((pmd_val(*(pmd_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_1G: + p_addr_info->addr_start = (uint64_t)((pud_val(*(pud_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + default: + pr_err("page table level is not supported!\n"); + return -DYN_ERR_MEM; + } + + if ((p_addr_info->addr_start & ~local_page_mask) == 0) { + /*|--------------page_size-------------------|*/ + /*|-------*left_convert-------|*/ + if (*left_convert < local_page_size) { + p_addr_info->length = *left_convert; + *left_convert = 0; + } + /*|--------------page_size-------------------|-----*/ + /*|---------------------*left_convert-----------------------|*/ + else { + p_addr_info->length = local_page_size; + now_base += local_page_size; + *left_convert -= local_page_size; + } + } else { + /*|--------------page_size-------------------|------*/ + /* |-------*left_convert---------|*/ + if ((p_addr_info->addr_start + *left_convert) < + ((p_addr_info->addr_start & local_page_mask) + local_page_size)) { + p_addr_info->length = *left_convert; + *left_convert = 0; + } + /*|--------------page_size-------------------|........*/ + /* |-----------------*left_convert-----------------|*/ + else { + p_addr_info->length = (p_addr_info->addr_start & local_page_mask) + + local_page_size - p_addr_info->addr_start; + now_base += p_addr_info->length; + *left_convert -= p_addr_info->length; + } + } + + last_phy_len = p_addr_info->length; + last_phy_addr = p_addr_info->addr_start; + + while (*left_convert) { + pte = lookup_address(now_base, &level); + if (!pte) { + ret = -DYN_ERR_MEM; + pr_err("lookup_address failed!\n"); + goto end; + } + + switch (level) { + case PG_LEVEL_4K: + now_phy_addr = (uint64_t)((pte_val(*pte) & local_page_mask & ~_PAGE_NX) + + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_2M: + now_phy_addr = (uint64_t)((pmd_val(*(pmd_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_1G: + now_phy_addr = (uint64_t)((pud_val(*(pud_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + default: + pr_err("page table level is not supported!\n"); + return -DYN_ERR_MEM; + } + + /*not continuous memory*/ + if ((last_phy_addr + last_phy_len) != now_phy_addr) + break; + + if (*left_convert < local_page_size) { + p_addr_info->length += *left_convert; + *left_convert = 0; + } else { + p_addr_info->length += local_page_size; + now_base += local_page_size; + *left_convert -= local_page_size; + last_phy_addr = now_phy_addr; + last_phy_len = local_page_size; + } + } + +end: + return ret; +} + +int psp_check_tdm_support(void) +{ + int ret = 0; + struct tdm_version version; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (tdm_support) + goto end; + + ret = psp_get_fw_info(&version); + if (ret) { + tdm_support = 0; + goto end; + } + + tdm_support = 1; + } + +end: + return tdm_support; +} +EXPORT_SYMBOL_GPL(psp_check_tdm_support); + +int psp_get_fw_info(struct tdm_version *version) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_fw_cmd *fw_cmd = NULL; + struct tdm_fw_resp *fw_resp = NULL; + + if (!version) { + ret = -DYN_NULL_POINTER; + pr_err("version is null pointer\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + fw_cmd = (struct tdm_fw_cmd *)tdm_cmdresp_data; + fw_cmd->cmd_type = TDM_FW_VERSION; + + ret = tdm_do_cmd(0, (void *)fw_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + + if (error) { + ret = -error; + pr_warn("get_fw_info exception: 0x%x\n", error); + goto free_cmdresp; + } + + fw_resp = (struct tdm_fw_resp *)tdm_cmdresp_data; + memcpy(version, &fw_resp->version, sizeof(struct tdm_version)); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_get_fw_info); + +int psp_create_measure_task(struct addr_range_info *range, struct measure_data *data, + uint32_t flag, struct authcode_2b *code) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_create_cmd *create_cmd = NULL; + struct tdm_create_resp *create_resp = NULL; + uint32_t addr_range_info_len = 0; + struct addr_range_info *paddr_range_info = NULL; + uint32_t info_index = 0; + uint64_t now_base_vaddr = 0; + uint64_t tf_left_size = 0; + uint32_t count = 0; + + if (!range) { + ret = -DYN_NULL_POINTER; + pr_err("range is null pointer\n"); + goto end; + } + if (!data) { + ret = -DYN_NULL_POINTER; + pr_err("data is null pointer\n"); + goto end; + } + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (range->count > RANGE_CNT_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("range->count %d is beyond RANGE_CNT_MAX %d\n", range->count, RANGE_CNT_MAX); + goto end; + } + if (range->count == 0) { + ret = -DYN_ERR_SIZE_SMALL; + pr_err("range->count is zero!\n"); + goto end; + } + + /*create task by vaddr*/ + if (flag & TASK_CREATE_VADDR) { + paddr_range_info = kzalloc(sizeof(struct addr_range_info) + + RANGE_CNT_MAX * sizeof(struct addr_info), GFP_KERNEL); + if (!paddr_range_info) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for paddr_range_info failed\n"); + goto end; + } + + now_base_vaddr = range->addr[0].addr_start; + tf_left_size = range->addr[0].length; + while (tf_left_size && (count++ < RANGE_CNT_MAX + 1)) { + ret = ptable_virt_to_phy(now_base_vaddr, + &paddr_range_info->addr[info_index], &tf_left_size); + if (ret) { + pr_err("address convert failed!\n"); + goto free_paddr_range_info; + } + + now_base_vaddr = now_base_vaddr + + paddr_range_info->addr[info_index++].length; + if (info_index > RANGE_CNT_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("info_index: %d is beyond %d\n", info_index, RANGE_CNT_MAX); + goto free_paddr_range_info; + } + } + + paddr_range_info->count = info_index; + addr_range_info_len = paddr_range_info->count * sizeof(struct addr_info) + + sizeof(struct addr_range_info); + } else { + /*check if physics address valid*/ + ret = tdm_verify_phy_addr_valid(range); + if (ret) { + pr_err("range address is abnormal!\n"); + goto end; + } + addr_range_info_len = range->count * sizeof(struct addr_info) + + sizeof(struct addr_range_info); + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto free_paddr_range_info; + } + + create_cmd = (struct tdm_create_cmd *)tdm_cmdresp_data; + create_cmd->cmd_type = TDM_TASK_CREATE; + create_cmd->cmd_ctx_flag = flag; + + memcpy(&create_cmd->m_data, data, sizeof(struct measure_data)); + create_cmd->authcode_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : code->len; + + ret = tdm_get_cmd_context_hash(flag, create_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + if (flag & TASK_CREATE_VADDR) + memcpy(&create_cmd->range_info, paddr_range_info, addr_range_info_len); + else + memcpy(&create_cmd->range_info, range, addr_range_info_len); + + ret = tdm_do_cmd(0, (void *)create_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("create_measure_task exception error: 0x%x\n", error); + goto free_cmdresp; + } + + create_resp = (struct tdm_create_resp *)tdm_cmdresp_data; + code->len = create_resp->authcode_len; + code->len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : code->len; + memcpy(&code->val[0], &create_resp->authcode_val[0], code->len); + + head = &dyn_head.head; + task_node = kzalloc(sizeof(struct tdm_task_ctx), GFP_KERNEL); + if (!task_node) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", sizeof(struct tdm_task_ctx)); + goto free_cmdresp; + } + + task_node->task_id = create_resp->task_id; + task_node->handler = NULL; + task_node->cmd_ctx_flag = flag; + + ret = list_enqueue(task_node); + if (ret) { + pr_err("task %d enqueue failed!!!\n", task_node->task_id); + goto free_task_node; + } + + kfree(tdm_cmdresp_data); + if (flag & TASK_CREATE_VADDR) + kfree(paddr_range_info); + + return task_node->task_id; + +free_task_node: + kfree(task_node); +free_cmdresp: + kfree(tdm_cmdresp_data); +free_paddr_range_info: + if (flag & TASK_CREATE_VADDR) + kfree(paddr_range_info); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_create_measure_task); + +int psp_query_measure_status(uint32_t task_id, struct measure_status *status) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_query_cmd *query_cmd = NULL; + struct tdm_query_resp *query_resp = NULL; + + if (!status) { + ret = -DYN_NULL_POINTER; + pr_err("status is null pointer\n"); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + query_cmd = (struct tdm_query_cmd *)tdm_cmdresp_data; + query_cmd->cmd_type = TDM_TASK_QUERY; + query_cmd->task_id = task_id; + + ret = tdm_do_cmd(0, query_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + query_resp = (struct tdm_query_resp *)tdm_cmdresp_data; + memcpy(status, &query_resp->m_status, sizeof(struct measure_status)); +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_query_measure_status); + +int psp_register_measure_exception_handler(uint32_t task_id, struct authcode_2b *code, + measure_exception_handler_t handler) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_register_cmd *register_cmd = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + /* check if task_id is registered already */ + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) { + if ((handler && task_node->handler)) { + pr_err("task %d is registered already\n", task_id); + read_unlock(lock); + return -DYN_EEXIST; + } + break; + /* task_node will be used for next context */ + } + } + read_unlock(lock); + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + register_cmd = (struct tdm_register_cmd *)tdm_cmdresp_data; + temp_cmd = ®ister_cmd->cmd; + temp_cmd->cmd_type = TDM_TASK_VERIFY_AUTH; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + ret = tdm_do_cmd(0, register_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + write_lock(lock); + task_node->handler = handler; + write_unlock(lock); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_register_measure_exception_handler); + +int psp_destroy_measure_task(uint32_t task_id, struct authcode_2b *code) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_destroy_cmd *destroy_cmd = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) + break; + } + read_unlock(lock); + + if (task_node->cmd_ctx_flag & TASK_ATTR_NO_UPDATE) { + pr_warn("Task %d is not allowed to destroy!\n", task_node->task_id); + ret = -DYN_NO_ALLOW_UPDATE; + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + destroy_cmd = (struct tdm_destroy_cmd *)tdm_cmdresp_data; + temp_cmd = &destroy_cmd->cmd; + temp_cmd->cmd_type = TDM_TASK_DESTROY; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + ret = tdm_do_cmd(0, destroy_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + if (task_node->handler) { + write_lock(lock); + task_node->handler = NULL; + write_unlock(lock); + } + + write_lock(lock); + list_del(&task_node->list); + write_unlock(lock); + + kfree(task_node); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_destroy_measure_task); + +int psp_update_measure_task(uint32_t task_id, struct authcode_2b *code, + struct measure_update_data *data) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_update_cmd *update_cmd = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!data) { + ret = -DYN_NULL_POINTER; + pr_err("data is null pointer\n"); + goto end; + } + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) + break; + } + read_unlock(lock); + + if (task_node->cmd_ctx_flag & TASK_ATTR_NO_UPDATE) { + pr_warn("Task %d is not allowed to update!\n", task_node->task_id); + ret = -DYN_NO_ALLOW_UPDATE; + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + update_cmd = (struct tdm_update_cmd *)tdm_cmdresp_data; + temp_cmd = &update_cmd->cmd; + temp_cmd->cmd_type = TDM_TASK_UPDATE; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + memcpy(&update_cmd->update_data, data, sizeof(struct measure_update_data)); + + ret = tdm_do_cmd(0, tdm_cmdresp_data, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_update_measure_task); + +int psp_startstop_measure_task(uint32_t task_id, struct authcode_2b *code, bool start) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_startstop_cmd *startstop_cmd = NULL; + struct tdm_startstop_resp *startstop_resp = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) + break; + } + read_unlock(lock); + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + startstop_cmd = (struct tdm_startstop_cmd *)tdm_cmdresp_data; + temp_cmd = &startstop_cmd->cmd; + temp_cmd->cmd_type = start ? TDM_TASK_START : TDM_TASK_STOP; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + if ((temp_cmd->cmd_type == TDM_TASK_STOP) && (task_node->cmd_ctx_flag & + TASK_ATTR_NO_UPDATE)) { + pr_warn("Task %d is not allowed to stop!\n", task_node->task_id); + ret = -DYN_NO_ALLOW_UPDATE; + goto free_cmdresp; + } + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + ret = tdm_do_cmd(0, startstop_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + startstop_resp = (struct tdm_startstop_resp *)tdm_cmdresp_data; + + kfree(tdm_cmdresp_data); + + return startstop_resp->m_status.status; + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_startstop_measure_task); + +int tdm_export_cert(uint32_t key_usage_id, struct tdm_cert *cert) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_export_cert_cmd *cert_cmd = NULL; + struct tdm_export_cert_resp *cert_resp = NULL; + + if (!cert) { + ret = -DYN_NULL_POINTER; + pr_err("cert is null pointer\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + cert_cmd = (struct tdm_export_cert_cmd *)tdm_cmdresp_data; + cert_cmd->cmd_type = TDM_EXPORT_CERT; + cert_cmd->key_usage_id = key_usage_id; + + ret = tdm_do_cmd(0, (void *)cert_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + cert_resp = (struct tdm_export_cert_resp *)tdm_cmdresp_data; + memcpy(cert, &cert_resp->cert, sizeof(struct tdm_cert)); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(tdm_export_cert); + +int tdm_get_report(uint32_t task_id, struct task_selection_2b *selection, + struct data_2b *user_supplied_data, uint8_t report_type, uint32_t key_usage_id, + uint8_t *report_buffer, uint32_t *length) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_get_report_cmd *report_cmd = NULL; + struct tdm_report *report_resp = NULL; + uint32_t needed_length = 0; + + if (!user_supplied_data) { + ret = -DYN_NULL_POINTER; + pr_err("user_supplied_data is null pointer\n"); + goto end; + } + if (!report_buffer) { + ret = -DYN_NULL_POINTER; + pr_err("report_buffer is null pointer\n"); + goto end; + } + if (!length) { + ret = -DYN_NULL_POINTER; + pr_err("length is null pointer\n"); + goto end; + } + if ((report_type != TDM_REPORT_SUMMARY) && (report_type != TDM_REPORT_DETAIL)) { + ret = -DYN_ERR_REPORT_TYPE; + pr_err("invalid report_type\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + report_cmd = (struct tdm_get_report_cmd *)tdm_cmdresp_data; + + report_cmd->cmd_type = TDM_GET_REPORT; + report_cmd->task_id = task_id; + if (task_id == TDM_TASK_ALL) { + if (!selection) { + ret = -DYN_NULL_POINTER; + pr_err("selection is null pointer\n"); + goto end; + } + report_cmd->selection_len = selection->len; + report_cmd->selection_len = (report_cmd->selection_len > TDM_MAX_TASK_BITMAP) ? + TDM_MAX_TASK_BITMAP : report_cmd->selection_len; + memcpy(&report_cmd->selection_bitmap[0], &selection->bitmap[0], + report_cmd->selection_len); + } + + report_cmd->user_data_len = (user_supplied_data->len > TDM_MAX_NONCE_SIZE) ? + TDM_MAX_NONCE_SIZE : user_supplied_data->len; + memcpy(&report_cmd->user_data_val[0], &user_supplied_data->val[0], + report_cmd->user_data_len); + report_cmd->report_type = report_type; + report_cmd->key_usage_id = key_usage_id; + + ret = tdm_do_cmd(0, (void *)report_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + report_resp = (struct tdm_report *)tdm_cmdresp_data; + if (report_type == TDM_REPORT_SUMMARY) + needed_length = sizeof(struct tdm_report) + sizeof(struct tdm_report_sig); + else + needed_length = sizeof(struct tdm_report) + + report_resp->task_nums * sizeof(struct tdm_detail_task_status) + + sizeof(struct tdm_report_sig); + + if (needed_length > *length) { + pr_warn("needed_length %d is beyond length %d\n", needed_length, *length); + *length = needed_length; + ret = -DYN_ERR_SIZE_SMALL; + } else { + memcpy(report_buffer, report_resp, needed_length); + } + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(tdm_get_report); + +int tdm_get_vpcr_audit(struct pcr_select pcr, struct tpm2b_digest *digest, + struct tdm_pcr_value_2b *pcr_values) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_get_vpcr_cmd *vpcr_cmd = NULL; + struct tdm_get_vpcr_resp *vpcr_resp = NULL; + + if (!digest) { + ret = -DYN_NULL_POINTER; + pr_err("digest is null pointer\n"); + goto end; + } + if (!pcr_values) { + ret = -DYN_NULL_POINTER; + pr_err("pcr_values is null pointer\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + vpcr_cmd = (struct tdm_get_vpcr_cmd *)tdm_cmdresp_data; + + vpcr_cmd->cmd_type = TDM_VPCR_AUDIT; + memcpy(&vpcr_cmd->pcr, &pcr, sizeof(struct pcr_select)); + + ret = tdm_do_cmd(0, (void *)vpcr_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + vpcr_resp = (struct tdm_get_vpcr_resp *)tdm_cmdresp_data; + memcpy(digest, &vpcr_resp->digest, sizeof(struct tpm2b_digest)); + pcr_values->task_nums = vpcr_resp->pcr_values.task_nums; + memcpy(&pcr_values->task_data[0], &vpcr_resp->pcr_values.task_data[0], + pcr_values->task_nums * sizeof(struct tdm_task_data)); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(tdm_get_vpcr_audit); + +static long tdm_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) +{ + int ret = 0; + void __user *argp = (void __user *)arg; + unsigned int tdm_cmd = 0; + unsigned char *temp_cmd_data = NULL; + struct task_selection_2b *selection = NULL; + struct data_2b *data = NULL; + uint32_t data_to_user_len = 0; + uint16_t selection_len = 0; + uint16_t user_data_len = 0; + struct tdm_get_report_cmd *report_cmd = NULL; + struct tdm_user_report_cmd *user_report_cmd = NULL; + uint32_t needed_length = 0; + struct tdm_get_vpcr_cmd *vpcr_cmd = NULL; + struct tdm_get_vpcr_resp *vpcr_resp = NULL; + uint32_t pcr_num = 0; + + if (_IOC_TYPE(ioctl) != TDM_IOC_TYPE) { + ret = -EINVAL; + pr_err("ioctl 0x%08x is invalid\n", ioctl); + goto end; + } + + temp_cmd_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!temp_cmd_data) { + ret = -ENOMEM; + pr_err("kzalloc for size 0x%lx failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + tdm_cmd = _IOC_NR(ioctl); + + switch (tdm_cmd) { + case USER_EXPORT_CERT: + ret = tdm_export_cert(TDM_AK_USAGE_ID, (struct tdm_cert *)temp_cmd_data); + if (ret) { + pr_err("Execute tdm export cert command failed!\n"); + goto free_mem; + } + data_to_user_len = sizeof(struct tdm_cert); + break; + + case USER_GET_REPORT: + if (copy_from_user(temp_cmd_data, argp, sizeof(struct tdm_user_report_cmd))) { + pr_err("%s copy from user failed\n", __func__); + ret = -EFAULT; + goto end; + } + + user_report_cmd = (struct tdm_user_report_cmd *)temp_cmd_data; + needed_length = user_report_cmd->needed_length; + report_cmd = &user_report_cmd->report_cmd; + selection_len = report_cmd->selection_len > TDM_MAX_TASK_BITMAP ? + TDM_MAX_TASK_BITMAP : report_cmd->selection_len; + + selection = kzalloc(sizeof(struct task_selection_2b) + + selection_len * sizeof(uint8_t), GFP_KERNEL); + if (!selection) { + ret = -ENOMEM; + pr_err("kzalloc failed\n"); + goto free_mem; + } + + selection->len = selection_len; + memcpy(&selection->bitmap[0], &report_cmd->selection_bitmap[0], selection->len); + + user_data_len = report_cmd->user_data_len > TDM_MAX_NONCE_SIZE ? + TDM_MAX_NONCE_SIZE : report_cmd->user_data_len; + data = kzalloc(sizeof(struct data_2b) + + user_data_len * sizeof(uint8_t), GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + pr_err("kzalloc failed\n"); + goto free_mem; + } + + data->len = user_data_len; + memcpy(&data->val[0], &report_cmd->user_data_val[0], data->len); + + ret = tdm_get_report(report_cmd->task_id, selection, data, report_cmd->report_type, + report_cmd->key_usage_id, temp_cmd_data, &needed_length); + if (ret) { + pr_err("Execute tdm report command failed!\n"); + goto free_mem; + } + + data_to_user_len = needed_length; + break; + + case USER_VPCR_AUDIT: + if (copy_from_user(temp_cmd_data, argp, sizeof(struct tdm_get_vpcr_cmd))) { + pr_err("%s copy from user failed\n", __func__); + ret = -EFAULT; + goto end; + } + + vpcr_cmd = (struct tdm_get_vpcr_cmd *)temp_cmd_data; + vpcr_resp = (struct tdm_get_vpcr_resp *)temp_cmd_data; + pcr_num = vpcr_cmd->pcr.pcr; + + ret = tdm_get_vpcr_audit(vpcr_cmd->pcr, &vpcr_resp->digest, &vpcr_resp->pcr_values); + if (ret) { + pr_err("Execute tdm vpcr audit command failed!\n"); + goto free_mem; + } + + vpcr_resp->pcr = pcr_num; + data_to_user_len = sizeof(struct tdm_get_vpcr_resp) + + vpcr_resp->pcr_values.task_nums * sizeof(struct tdm_task_data); + break; + + case USER_SHOW_DEVICE: + ret = psp_get_fw_info(&((struct tdm_show_device *)temp_cmd_data)->version); + if (ret) { + pr_err("firmware version get failed!\n"); + goto free_mem; + } + + data_to_user_len = sizeof(struct tdm_show_device); + break; + + default: + pr_err("invalid tdm_cmd: %d from user\n", tdm_cmd); + ret = -EINVAL; + goto free_mem; + } + + if (copy_to_user(argp, temp_cmd_data, data_to_user_len)) { + pr_err("%s copy to user failed\n", __func__); + ret = -EFAULT; + goto free_mem; + } + +free_mem: + kfree(temp_cmd_data); + kfree(selection); + kfree(data); +end: + return ret; +} + +static const struct file_operations tdm_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = tdm_ioctl, +}; + +static struct miscdevice misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "tdm", + .fops = &tdm_fops, +}; + +int tdm_dev_init(void) +{ + int ret = 0; + + if (tdm_init_flag) + return 0; + + INIT_KFIFO(kfifo_error_task); + INIT_LIST_HEAD(&dyn_head.head); + rwlock_init(&dyn_head.lock); + spin_lock_init(&kfifo_lock); + + ret = psp_register_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); + if (ret) { + pr_err("notifier function registration failed\n"); + return ret; + } + + kthread = kthread_create(measure_exception_handling_thread, NULL, + "measure_exception_handling_thread"); + if (IS_ERR(kthread)) { + pr_err("kthread_create fail\n"); + ret = PTR_ERR(kthread); + goto unreg; + } + + wake_up_process(kthread); + + ret = misc_register(&misc); + if (ret) { + pr_err("misc_register for tdm failed\n"); + goto stop_kthread; + } + + tdm_init_flag = 1; + pr_info("TDM driver loaded successfully!\n"); + + return ret; + +stop_kthread: + if (kthread) { + kthread_stop(kthread); + kthread = NULL; + } +unreg: + psp_unregister_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); + + return ret; +} + +int tdm_dev_destroy(void) +{ + if (tdm_destroy_flag) + goto end; + + if (kthread) { + kthread_stop(kthread); + kthread = NULL; + } + + psp_unregister_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); + + misc_deregister(&misc); + tdm_destroy_flag = 1; +end: + return 0; +} + diff --git a/drivers/crypto/ccp/hygon/tdm-dev.h b/drivers/crypto/ccp/hygon/tdm-dev.h new file mode 100644 index 0000000000000000000000000000000000000000..afc4761a7e81778bb8e0bb9c4984504a540766bc --- /dev/null +++ b/drivers/crypto/ccp/hygon/tdm-dev.h @@ -0,0 +1,504 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * The Hygon TDM CPU-to-PSP communication driver + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* Change log: + * Version: 0.7 (fw version 1.4) + * 1.Adjust the TDM driver to accommodate multiple versions of the kernel. + * Version: 0.6 (fw version 1.4) + * 1.remove psp_get_fw_info from hygon_tdm_init, add tdm show device support to ioctl for hag. + * Version: 0.5 (fw version 1.4) + * 1.add support for hanging machine when task exception with special attribute. + * Version: 0.4 (fw version 1.3) + * 1.add vpcr support. + * 2.add task create by vaddr. + * Version: 0.3 (fw version 1.2) + * 1.add remote authentication support. + */ +#ifndef __TDM_DEV_H__ +#define __TDM_DEV_H__ + +#include +#include + +#define MIN_VPCR 10 +#define MAX_VPCR 16 + +/*Macro definition for measurement*/ +#define TDM_MAX_TASK_BITMAP 16 +#define TDM_MAX_NONCE_SIZE 32 + +#define RANGE_CNT_MAX 0x80 +#define MEASURE_TASK_MAX 100 +#define AUTHCODE_MAX 16 +#define AUTH_TRY_DELAY 1 + +#define HASH_ALGO_SM3 0 +#define HASH_ALGO_SHA1 1 +#define HASH_ALGO_SHA256 2 +#define HASH_ALGO_SHA384 3 +#define HASH_ALGO_SHA512 4 + +#define SM3_256_DIGEST_SIZE 32 +#define SHA1_DIGEST_SIZE 20 +#define SHA256_DIGEST_SIZE 32 +#define SHA384_DIGEST_SIZE 48 +#define SHA512_DIGEST_SIZE 64 + +#define CONTEXT_CHECK_PID 0x1 +#define CONTEXT_CHECK_COMM 0x2 +#define CONTEXT_CHECK_MODNAME 0x4 +#define TASK_ATTR_NO_UPDATE 0x10000 +#define TASK_SUPPORT_VPCR 0x20000 +#define TASK_CREATE_VADDR 0x40000 +#define TASK_EXCEPTION_CRASH 0x80000 + +#define MEASURE_UPDATE_ALGO 0x1 +#define MEASURE_UPDATE_EXPECTED_MEASUREMENT 0x2 + +/*Macro definition for tdm certificate*/ +#define TDM_MAX_CHIP_ID_LEN 40 +#define TDM_CURVE_SM2_ID 0x3 +#define TDM_PUBKEY_LEN 32 +#define TDM_MAX_USER_ID_LEN 126 +#define TDM_SIG_LEN 32 +#define TDM_HEADER_AND_PUBKEY_LEN 284 + +/*Macro definition for tdm report*/ +#define TDM_TASK_ALL 0xffffffff +#define TDM_REPORT_SUMMARY 0 +#define TDM_REPORT_DETAIL 1 + +/* CPU to psp command declaration */ +enum C2P_CMD_TYPE { + TDM_TASK_CREATE = 0x0, + TDM_TASK_VERIFY_AUTH, + TDM_TASK_QUERY, + TDM_TASK_DESTROY, + TDM_TASK_UPDATE, + TDM_TASK_STOP, + TDM_TASK_START, + TDM_FW_VERSION, + TDM_EXPORT_CERT, + TDM_GET_REPORT, + TDM_VPCR_AUDIT, + TDM_MAX_CMD +}; + +/* User interaction command declaration */ +enum USER_CMD_TYPE { + USER_EXPORT_CERT = 0x80, + USER_GET_REPORT, + USER_VPCR_AUDIT, + USER_SHOW_DEVICE, + USER_MAX_CMD +}; + +/*Public usage id definition for tdm certificate*/ +enum _tdm_key_usage_id { + TDM_INVALID_USAGE_ID = 0x1000, + TDM_CEK_USAGE_ID = 0x1004, + TDM_AK_USAGE_ID = 0x2001, + TDM_MAX_USAGE_ID +}; + +/*Public status ans type declaration*/ +enum TDM_TASK_STATUS { + DYN_INIT = 0x0, + DYN_TO_RUN, + DYN_RUN, + DYN_TO_STOP, + DYN_STOP +}; + +enum TDM_MEASURE_STATUS { + MER_NORMAL = 0x0, + MER_ERR +}; + +enum DYN_ERROR_TYPE { + DYN_NORMAL = 0x0, + DYN_NOT_EXIST, + DYN_AUTH_FAIL, + DYN_STATUS_NOT_SUIT, + DYN_BEYOND_MAX, + DYN_DA_PERIOD, + DYN_NULL_POINTER, + DYN_ERR_API, + DYN_EEXIST, + DYN_ERR_MEM, + DYN_ERR_AUTH_LEN, + DYN_ERR_KEY_ID, + DYN_NO_ALLOW_UPDATE, + DYN_ERR_HASH_ALGO, + DYN_ERR_REPORT_TYPE, + DYN_ERR_SIZE_SMALL, + DYN_ERR_ADDR_MAPPING, + DYN_ERR_PCR_NUM, + DYN_ERR_ORIG_TPM_PCR, + DYN_MAX_ERR_TYPE +}; + +/*Data structure declaration for measurement*/ +struct addr_info { + uint64_t addr_start; + uint64_t length; +} __packed; + +struct addr_range_info { + uint32_t count; + struct addr_info addr[]; +} __packed; + +struct measure_data { + uint32_t hash_algo; + uint8_t expected_measurement[32]; + uint32_t period_ms; + uint32_t pcr; +} __packed; + +struct authcode_2b { + uint16_t len; + uint8_t val[]; +} __packed; + +struct measure_status { + uint8_t status; + uint8_t error; + uint64_t count; +} __packed; + +struct measure_update_data { + uint32_t update_flag; + uint32_t algo; + uint8_t expected_measurement[32]; +} __packed; + +struct da_status { + uint64_t err_time; + uint16_t interval_time; + uint16_t err_cnt; +} __packed; + +struct tdm_version { + uint8_t api_major; + uint8_t api_minor; + uint32_t buildId; + uint32_t task_max; + uint32_t range_max_per_task; +} __packed; + +struct task_selection_2b { + uint16_t len; + uint8_t bitmap[]; +}; + +struct data_2b { + uint16_t len; + uint8_t val[]; +}; + +/*Data structure declaration for vpcr*/ +struct pcr_select { + uint16_t hash; + uint32_t pcr; +} __packed; + +union tpmu_ha { + uint8_t sha1[SHA1_DIGEST_SIZE]; + uint8_t sha256[SHA256_DIGEST_SIZE]; + uint8_t sha384[SHA384_DIGEST_SIZE]; + uint8_t sha512[SHA512_DIGEST_SIZE]; + uint8_t sm3_256[SM3_256_DIGEST_SIZE]; +}; + +struct tpm2b_digest { + uint16_t size; + uint8_t buffer[sizeof(union tpmu_ha)]; +} __packed; + +struct tdm_task_data { + uint32_t task_id; + uint8_t hash[32]; +} __packed; + +struct tdm_pcr_value_2b { + uint32_t task_nums; + struct tdm_task_data task_data[]; +} __packed; + +/*Data structure declaration for tdm certificate*/ +struct _tdm_ecc_pubkey { + uint32_t curve_id; + uint8_t pubkey_qx[TDM_PUBKEY_LEN]; + uint8_t pubkey_qy[TDM_PUBKEY_LEN]; + uint16_t user_id_len; + uint8_t user_id[TDM_MAX_USER_ID_LEN]; +} __packed; + +struct _tdm_ecc_signature { + uint8_t sig_r[TDM_SIG_LEN]; + uint8_t sig_s[TDM_SIG_LEN]; +} __packed; + +/* + ************************ Hygon TDM Certificate - ECC256*************************** + *|00h |31:0 |VERSION |Certificate version. 0... | + *|04h |7:0 |- |Reserved. Set to zero | + *|06h |7:0 |CHIP_ID_LEN | | + *|08h |319:0 |CHIP_ID |Unique ID of every chip. | + *|30h |31:0 |KEY_USAGE_ID |Usage id of the key. | + *|34h |63:0 |- |Reserved. Set to zero. | + *|3Ch |31:0 |CURVE_ID |ECC curve id | + *|40h |255:0 |Qx |Public key Qx | + *|60h |255:0 |Qy |Public key Qy | + *|80h |7:0 |USER_ID_LEN |GM user id len | + *|82h |1007:0 |USER_ID |GM user id | + *|100h|223:0 |- |Reserved. Set to zero. | + *|11Ch|31:0 |SIG1_KEY_USAGE_ID|Key type for sig1. | + *|120h|255:0 |SIG1_R |Signature R of key1. | + *|140h|255:0 |SIG1_S |Signature S of key1. | + *|160h|223:0 |- |Reserved. Set to zero | + *|17Ch|31:0 |SIG2_KEY_USAGE_ID|Key type for sig2. | + *|180h|255:0 |SIG2_R |Signature R of key2. | + *|1A0h|255:0 |SIG2_S |Signature S of key2. | + ************************************************************************************* + */ +struct tdm_cert { + uint32_t version; + uint8_t reserved_0[2]; + uint16_t chip_id_len; + uint8_t chip_id[TDM_MAX_CHIP_ID_LEN]; + uint32_t key_usage_id; + uint8_t reserved_1[8]; + struct _tdm_ecc_pubkey ecc_pubkey; + uint8_t reserved_2[28]; + uint32_t sig1_key_usage_id; + struct _tdm_ecc_signature ecc_sig1; + uint8_t reserved_3[28]; + uint32_t sig2_key_usage_id; + struct _tdm_ecc_signature ecc_sig2; +} __packed; + +/*Data structure declaration for tdm measurement report*/ +/* + ******************** Hygon TDM Report for Single Task - ECC256*********************** + *|+(00h) |31:0 |TASK_ID |Measured task ID | + *|+(04h) |31:0 |PERIOD_MS |Meaured period time for the related task | + *|+(08h) |63:0 |MEAURED_COUNT |Meaured count for the related task | + *|+(10h) |31:0 |LAST_MEASURE_ELAPSED_MS|Meaured time for last mesurement. | + *|+(14h) |95:0 |- |Reserved. Set to zero | + *|+(20h) |255:0 |MEASURED_HASH |Mesured hash for the related task. | + ************************************************************************************* + */ +struct tdm_detail_task_status { + uint32_t task_id; + uint32_t period_ms; + uint64_t measured_count; + uint32_t last_measure_elapsed_ms; + uint8_t reserved[12]; + uint8_t measured_hash[32]; +} __packed; + +/* + ************************ Hygon TDM Report - ECC256*************************** + *|00h |31:0 |VERSION |Certificate version. 0... | + *|04h |31:0 |FW_VERSION |Firmware verfion,BUILD_ID | + *|08h |7:0 |REPORT_TYPE |Summary report:0, Detailed report:1 | + *|09h |39:0 |- |Reserved. Set to zero. | + *|0Eh |15:0 |TASK_NUMS |ALL task numbers. | + *|10h |127:0 |TASK_BITMAP |ALL task bitmap. | + *|20h |127:0 |TASK_ERROR_BITMAP |Bitmap for error tasks | + *|30h |127:0 |TASK_RUNNING_BITMAP|Bitmap for runnint tasks | + *|40h |239:0 |- |Reserved. Set to zero. | + *|5Eh |15:0 |USER_DATA_LEN |User supplied data length. | + *|60h |255:0 |USER_DATA |User supplied data. | + *|80h |255:0 |AGGREGATE_HASH |Aggregate hash for tasks | + ************************************************************************************* + */ +struct tdm_report { + uint32_t version; + uint32_t fw_version; + uint8_t report_type; + uint8_t reserved_0[5]; + uint16_t task_nums; + uint8_t task_bitmap[TDM_MAX_TASK_BITMAP]; + uint8_t task_error_bitmap[TDM_MAX_TASK_BITMAP]; + uint8_t task_running_bitmap[TDM_MAX_TASK_BITMAP]; + uint8_t reserved_1[30]; + uint16_t user_supplied_data_len; + uint8_t user_supplied_data[TDM_MAX_NONCE_SIZE]; + uint8_t aggregate_hash[32]; + struct tdm_detail_task_status detailed_task_status[]; +} __packed; + +/* + ************************ Hygon TDM Report Signature - ECC256************************* + *|A0h |223:0 |- |Reserved. Set to zero | + *|BCh |31:0 |SIG_KEY_USAGE_ID |Key type for sig. | + *|C0h |255:0 |SIG_R |Signature R of key. | + *|E0h |255:0 |SIG_S |Signature S of key. | + ************************************************************************************* + */ +struct tdm_report_sig { + uint8_t reserved[28]; + uint32_t sig_key_usage_id; + uint8_t sig_r[TDM_SIG_LEN]; + uint8_t sig_s[TDM_SIG_LEN]; +} __packed; + +/*Data structure declaration for tdm command/response interface*/ +/* + * The following commands use this structure: + * psp_register_measure_exception_handler + * psp_destroy_measure_task + * psp_update_measure_task + * psp_startstop_measure_task + */ +struct tdm_common_cmd { + uint32_t cmd_type; + uint32_t task_id; + uint16_t code_len; + uint8_t code_val[AUTHCODE_MAX]; + uint8_t context_hash[32]; +} __packed; + +/*TASK_CREATE*/ +struct tdm_create_cmd { + uint32_t cmd_type; + uint32_t cmd_ctx_flag; + struct measure_data m_data; + uint16_t authcode_len; + uint8_t context_hash[32]; + struct addr_range_info range_info; +} __packed; + +struct tdm_create_resp { + uint32_t task_id; + uint16_t authcode_len; + uint8_t authcode_val[AUTHCODE_MAX]; +} __packed; + +/*TASK_VERIFY_AUTH*/ +struct tdm_register_cmd { + struct tdm_common_cmd cmd; +} __packed; + +/*TASK_QUERY*/ +struct tdm_query_cmd { + uint32_t cmd_type; + uint32_t task_id; +} __packed; + +struct tdm_query_resp { + struct measure_status m_status; +} __packed; + +/*TASK_DESTROY*/ +struct tdm_destroy_cmd { + struct tdm_common_cmd cmd; +} __packed; + +/*TASK_UPDATE*/ +struct tdm_update_cmd { + struct tdm_common_cmd cmd; + struct measure_update_data update_data; +} __packed; + +/*TASK_STOP,TASK_START*/ +struct tdm_startstop_cmd { + struct tdm_common_cmd cmd; +} __packed; + +struct tdm_startstop_resp { + struct measure_status m_status; +} __packed; + +/*TDM_VERSION*/ +struct tdm_fw_cmd { + uint32_t cmd_type; +} __packed; + +struct tdm_fw_resp { + struct tdm_version version; +} __packed; + +/*TDM_EXPORT_CERT*/ +struct tdm_export_cert_cmd { + uint32_t cmd_type; + uint32_t key_usage_id; +} __packed; + +struct tdm_export_cert_resp { + struct tdm_cert cert; +} __packed; + +/*TDM_GET_REPORT*/ +struct tdm_get_report_cmd { + uint32_t cmd_type; + uint32_t task_id; + uint16_t selection_len; + uint8_t selection_bitmap[TDM_MAX_TASK_BITMAP]; + uint16_t user_data_len; + uint8_t user_data_val[TDM_MAX_NONCE_SIZE]; + uint8_t report_type; + uint32_t key_usage_id; +} __packed; + +/* Resopnse: + * struct tdm_report measure_report; + * struct tdm_report_sig measure_report_sig; + */ + +struct tdm_user_report_cmd { + struct tdm_get_report_cmd report_cmd; + uint32_t needed_length; +} __packed; + +/*TDM_VPCR_AUDIT*/ +struct tdm_get_vpcr_cmd { + uint32_t cmd_type; + struct pcr_select pcr; +} __packed; + +struct tdm_get_vpcr_resp { + uint32_t pcr; + struct tpm2b_digest digest; + struct tdm_pcr_value_2b pcr_values; +} __packed; + +struct tdm_show_device { + struct tdm_version version; +} __packed; + +/*Public api definition for tdm*/ +typedef int (*measure_exception_handler_t)(uint32_t task_id); + +int psp_check_tdm_support(void); +int psp_get_fw_info(struct tdm_version *version); +int psp_create_measure_task(struct addr_range_info *range, struct measure_data *data, + uint32_t flag, struct authcode_2b *code); +int psp_query_measure_status(uint32_t task_id, struct measure_status *status); +int psp_register_measure_exception_handler(uint32_t task_id, struct authcode_2b *code, + measure_exception_handler_t handler); +int psp_destroy_measure_task(uint32_t task_id, struct authcode_2b *code); +int psp_update_measure_task(uint32_t task_id, struct authcode_2b *code, + struct measure_update_data *data); +int psp_startstop_measure_task(uint32_t task_id, struct authcode_2b *code, bool start); +int tdm_export_cert(uint32_t key_usage_id, struct tdm_cert *cert); +int tdm_get_report(uint32_t task_id, struct task_selection_2b *selection, + struct data_2b *user_supplied_data, uint8_t report_type, uint32_t key_usage_id, + uint8_t *report_buffer, uint32_t *length); +int tdm_get_vpcr_audit(struct pcr_select pcr, struct tpm2b_digest *digest, + struct tdm_pcr_value_2b *pcr_values); + +int tdm_dev_init(void); +int tdm_dev_destroy(void); +#endif /* __TDM_DEV_H__*/ diff --git a/drivers/crypto/ccp/hygon/tdm-kernel-guard.c b/drivers/crypto/ccp/hygon/tdm-kernel-guard.c new file mode 100644 index 0000000000000000000000000000000000000000..c3afe888ea04d77ed2eb53d67364423915e9b300 --- /dev/null +++ b/drivers/crypto/ccp/hygon/tdm-kernel-guard.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * The Hygon TDM KERNEL GUARD module driver + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include "tdm-dev.h" + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +static int eh_obj = -1; +module_param(eh_obj, int, 0644); +MODULE_PARM_DESC(eh_obj, "security enhance object for TDM"); + +/* Objects are protected by TDM now + * SCT: 0 + * IDT: 1 + */ +enum ENHANCE_OBJS { + SCT = 0, + IDT, + MAX_OBJ +}; + +static char *obj_names[MAX_OBJ] = { + "SCT", + "IDT", +}; + +struct tdm_security_enhance { + uint64_t vaddr; + uint32_t size; + struct addr_range_info *mem_range; + struct authcode_2b *authcode; + struct measure_data mdata; + uint32_t context; + uint32_t task_id; + char *obj_name; +} __packed; + +static struct tdm_security_enhance eh_objs[MAX_OBJ]; + +static int tdm_regi_callback_handler(uint32_t task_id) +{ + int i = 0; + int ret = 0; + + for (i = 0; i < MAX_OBJ; i++) { + if (task_id == eh_objs[i].task_id) { + pr_warn("Obj: %s, Task:%d, corruption detected!\n", eh_objs[i].obj_name, + task_id); + pr_warn("Please check if it's intended, or your machine may be on danger!\n"); + break; + } + } + return ret; +} + +static int calc_expected_hash(uint8_t *base_addr, uint32_t size, uint8_t *hash) +{ + int ret = 0; + struct crypto_shash *shash = NULL; + + shash = crypto_alloc_shash("sm3", 0, 0); + if (IS_ERR(shash)) { + ret = PTR_ERR(shash); + return ret; + } + + { + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + ret = crypto_shash_init(sdesc); + if (ret) { + pr_err("crypto_shash_init failed\n"); + ret = -1; + goto out; + } + + ret = crypto_shash_update(sdesc, base_addr, size); + if (ret) { + pr_err("crypto_shash_update failed\n"); + ret = -1; + goto out; + } + + ret = crypto_shash_final(sdesc, hash); + if (ret) { + pr_err("crypto_shash_final failed\n"); + ret = -1; + goto out; + } + } + +out: + crypto_free_shash(shash); + return ret; +} + +static int tdm_task_create_and_run(struct tdm_security_enhance *data) +{ + int ret = 0; + int task_status = 0; + + data->task_id = psp_create_measure_task(data->mem_range, &data->mdata, data->context, + data->authcode); + if (data->task_id < 0) { + ret = data->task_id < 0; + pr_err("create measurement task failed with 0x%x!\n", data->task_id); + goto end; + } + + ret = psp_register_measure_exception_handler(data->task_id, data->authcode, + tdm_regi_callback_handler); + if (ret < 0) { + pr_err("task_id %d callback function register failed with 0x%x\n", data->task_id, + ret); + goto release_task; + } + + task_status = psp_startstop_measure_task(data->task_id, data->authcode, true); + if (task_status < 0) { + ret = task_status; + pr_err("task_id %d start failed with 0x%x\n", data->task_id, ret); + goto release_task; + } + + return ret; + +release_task: + psp_destroy_measure_task(data->task_id, data->authcode); +end: + return ret; +} + +int tdm_service_run(struct tdm_security_enhance *data) +{ + int ret = 0; + struct addr_range_info *addr_range = NULL; + + // Allocate memory for addr_range + addr_range = kzalloc(sizeof(struct addr_range_info) + sizeof(struct addr_info), GFP_KERNEL); + if (!addr_range) { + ret = -DYN_ERR_MEM; + pr_err("addr_range kzalloc memory failed\n"); + goto end; + } + + // Fill in addr_range + addr_range->count = 1; + addr_range->addr[0].addr_start = data->vaddr; + addr_range->addr[0].length = data->size; + data->mem_range = addr_range; + + // Context configuration + data->context |= TASK_CREATE_VADDR; + + // Allocate memory for authcode + data->authcode = kzalloc(sizeof(struct authcode_2b) + AUTHCODE_MAX, GFP_KERNEL); + if (!data->authcode) { + ret = -DYN_ERR_MEM; + pr_err("authcode_2b kzalloc memory failed\n"); + goto free_addr_range_info; + } + + data->authcode->len = AUTHCODE_MAX; + + // Measurement data configuration + data->mdata.hash_algo = HASH_ALGO_SM3; + data->mdata.period_ms = 0; + ret = calc_expected_hash((uint8_t *)data->vaddr, data->size, + data->mdata.expected_measurement); + if (ret) { + pr_err("calculate expected hash failed!\n"); + goto free_authcode; + } + + // Create and start tdm task + ret = tdm_task_create_and_run(data); + if (ret) { + pr_err("tdm_task_create_and_run failed!\n"); + goto free_authcode; + } + + return ret; + +free_authcode: + kfree(data->authcode); + data->authcode = NULL; +free_addr_range_info: + kfree(data->mem_range); + data->mem_range = NULL; +end: + return ret; +} + +int tdm_service_exit(struct tdm_security_enhance *data) +{ + int ret = 0; + int task_status = 0; + + task_status = psp_startstop_measure_task(data->task_id, data->authcode, false); + if (task_status < 0) { + ret = task_status; + pr_err("task_id %d stop failed with 0x%x\n", data->task_id, ret); + goto end; + } + + // Waiting for the task to end + msleep(40); + + psp_destroy_measure_task(data->task_id, data->authcode); + + kfree(data->authcode); + data->authcode = NULL; + kfree(data->mem_range); + data->mem_range = NULL; +end: + return ret; +} + +#if !IS_BUILTIN(CONFIG_TDM_KERNEL_GUARD) +static int p_tmp_kprobe_handler(struct kprobe *p_ri, struct pt_regs *p_regs) +{ + return 0; +} + +unsigned long kprobe_symbol_address_byname(const char *name) +{ + int p_ret; + struct kprobe p_kprobe; + unsigned long addr = 0; + + memset(&p_kprobe, 0, sizeof(p_kprobe)); + + p_kprobe.pre_handler = p_tmp_kprobe_handler; + p_kprobe.symbol_name = name; + + p_ret = register_kprobe(&p_kprobe); + if (p_ret < 0) { + pr_err("register_kprobe error [%d] :(\n", p_ret); + return 0; + } + + addr = (unsigned long)p_kprobe.addr; + unregister_kprobe(&p_kprobe); + + return addr; +} +#endif + +static int __init kernel_security_enhance_init(void) +{ + int i = 0; + int ret = 0; + unsigned long *sct_addr; + struct desc_ptr idtr; +#if !IS_BUILTIN(CONFIG_TDM_KERNEL_GUARD) + unsigned long (*f_kallsyms_lookup_name)(const char *); + + f_kallsyms_lookup_name = (unsigned long (*)(const char *))kprobe_symbol_address_byname( + "kallsyms_lookup_name"); + if (!f_kallsyms_lookup_name) { + ret = -DYN_ERR_API; + pr_err("kprobe_symbol_address_byname failed!"); + goto end; + } + + sct_addr = (unsigned long *)f_kallsyms_lookup_name("sys_call_table"); +#else + + sct_addr = (unsigned long *)kallsyms_lookup_name("sys_call_table"); +#endif + if (!sct_addr) { + ret = -DYN_ERR_API; + pr_err("kallsyms_lookup_name for sys_call_table failed!"); + goto end; + } + + asm("sidt %0":"=m"(idtr)); + + if (!psp_check_tdm_support()) + return 0; + + for (i = 0; i < MAX_OBJ; i++) { + memset(&eh_objs[i], 0, sizeof(eh_objs[i])); + eh_objs[i].context = CONTEXT_CHECK_MODNAME; + eh_objs[i].obj_name = obj_names[i]; + } + + if ((eh_obj == -1) || (eh_obj & (1 << SCT))) { + eh_objs[SCT].vaddr = (uint64_t)sct_addr; + eh_objs[SCT].size = NR_syscalls * sizeof(char *); + } + if ((eh_obj == -1) || (eh_obj & (1 << IDT))) { + eh_objs[IDT].vaddr = idtr.address; + eh_objs[IDT].size = idtr.size; + } + + for (i = 0; i < MAX_OBJ; i++) { + if (eh_objs[i].vaddr) + tdm_service_run(&eh_objs[i]); + } + + pr_info("Hygon TDM guard load successfully!\n"); + +end: + return ret; +} + +static void __exit kernel_security_enhance_exit(void) +{ + int i = 0; + + if (!psp_check_tdm_support()) + return; + + for (i = 0; i < MAX_OBJ; i++) { + if (eh_objs[i].vaddr) + tdm_service_exit(&eh_objs[i]); + } + pr_info("Hygon TDM guard unload successfully!\n"); +} + +MODULE_AUTHOR("niuyongwen@hygon.cn"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.1"); +MODULE_DESCRIPTION("Kernel security enhancement module by TDM"); + +/* + * kernel_security_enhance_init must be done after ccp module init. + * That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(kernel_security_enhance_init); +module_exit(kernel_security_enhance_exit); diff --git a/drivers/crypto/ccp/hygon/vpsp.c b/drivers/crypto/ccp/hygon/vpsp.c new file mode 100644 index 0000000000000000000000000000000000000000..13208fe2c4b3d37acb4af0cf06d5ddb27cdbc86e --- /dev/null +++ b/drivers/crypto/ccp/hygon/vpsp.c @@ -0,0 +1,641 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PSP virtualization + * + * Copyright (c) 2023, HYGON CORPORATION. All rights reserved. + * Author: Ge Yang + * + */ + +#include +#include +#include +#include +#include +#include + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) "vpsp: " fmt + +/* + * The file mainly implements the base execution + * logic of virtual PSP in kernel mode, which mainly includes: + * (1) Obtain the VM command and preprocess the pointer + * mapping table information in the command buffer + * (2) The command that has been converted will interact + * with the channel of the psp through the driver and + * try to obtain the execution result + * (3) The executed command data is recovered according to + * the multilevel pointer of the mapping table, and then returned to the VM + * + * The primary implementation logic of virtual PSP in kernel mode + * call trace: + * guest command(vmmcall) + * | + * | |-> kvm_pv_psp_cmd_pre_op + * | | + * | | -> guest_addr_map_table_op + * | | + * | | -> guest_multiple_level_gpa_replace + * | + * kvm_pv_psp_op->|-> vpsp_try_do_cmd/vpsp_try_get_result <====> psp device driver + * | + * | + * |-> kvm_pv_psp_cmd_post_op + * | + * | -> guest_addr_map_table_op + * | + * | -> guest_multiple_level_gpa_restore + */ + +#define TKM_CMD_ID_MIN 0x120 +#define TKM_CMD_ID_MAX 0x12f + +struct psp_cmdresp_head { + uint32_t buf_size; + uint32_t cmdresp_size; + uint32_t cmdresp_code; +} __packed; + +/** + * struct map_tbl - multilevel pointer address mapping table + * + * @parent_pa: parent address block's physics address + * @offset: offset in parent address block + * @size: submemory size + * @align: submemory align size, hva need to keep size alignment in kernel + * @hva: submemory copy block in kernel virtual address + */ +struct map_tbl { + uint64_t parent_pa; + uint32_t offset; + uint32_t size; + uint32_t align; + uint64_t hva; +} __packed; + +struct addr_map_tbls { + uint32_t tbl_nums; + struct map_tbl tbl[]; +} __packed; + +/* gpa and hva conversion maintenance table for internal use */ +struct gpa2hva_t { + void *hva; + gpa_t gpa; +}; + +struct gpa2hva_tbls { + uint32_t max_nums; + uint32_t tbl_nums; + struct gpa2hva_t tbl[]; +}; + +/* save command data for restoring later */ +struct vpsp_hbuf_wrapper { + void *data; + uint32_t data_size; + struct addr_map_tbls *map_tbls; + struct gpa2hva_tbls *g2h_tbls; +}; + +/* Virtual PSP host memory information maintenance, used in ringbuffer mode */ +struct vpsp_hbuf_wrapper +g_hbuf_wrap[CSV_COMMAND_PRIORITY_NUM][CSV_RING_BUFFER_SIZE / CSV_RING_BUFFER_ESIZE] = {0}; + +void __maybe_unused map_tbl_dump(const char *title, struct addr_map_tbls *tbls) +{ + int i; + + pr_info("[%s]-> map_tbl_nums: %d", title, tbls->tbl_nums); + for (i = 0; i < tbls->tbl_nums; i++) { + pr_info("\t[%d]: parent_pa: 0x%llx, offset: 0x%x, size: 0x%x, align: 0x%x hva: 0x%llx", + i, tbls->tbl[i].parent_pa, tbls->tbl[i].offset, + tbls->tbl[i].size, tbls->tbl[i].align, tbls->tbl[i].hva); + } + pr_info("\n"); +} + +void __maybe_unused g2h_tbl_dump(const char *title, struct gpa2hva_tbls *tbls) +{ + int i; + + pr_info("[%s]-> g2h_tbl_nums: %d, max_nums: %d", title, tbls->tbl_nums, + tbls->max_nums); + for (i = 0; i < tbls->tbl_nums; i++) + pr_info("\t[%d]: hva: 0x%llx, gpa: 0x%llx", i, + (uint64_t)tbls->tbl[i].hva, tbls->tbl[i].gpa); + pr_info("\n"); +} + +static int gpa2hva_tbl_fill(struct gpa2hva_tbls *tbls, void *hva, gpa_t gpa) +{ + uint32_t fill_idx = tbls->tbl_nums; + + if (fill_idx >= tbls->max_nums) + return -EFAULT; + + tbls->tbl[fill_idx].hva = hva; + tbls->tbl[fill_idx].gpa = gpa; + tbls->tbl_nums = fill_idx + 1; + + return 0; +} + +static void clear_hva_in_g2h_tbls(struct gpa2hva_tbls *g2h, void *hva) +{ + int i; + + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].hva == hva) + g2h->tbl[i].hva = NULL; + } +} + +static void *get_hva_from_gpa(struct gpa2hva_tbls *g2h, gpa_t gpa) +{ + int i; + + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].gpa == gpa) + return (void *)g2h->tbl[i].hva; + } + + return NULL; +} + +static gpa_t get_gpa_from_hva(struct gpa2hva_tbls *g2h, void *hva) +{ + int i; + + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].hva == hva) + return g2h->tbl[i].gpa; + } + + return 0; +} + +/* + * The virtual machine multilevel pointer command buffer handles the + * execution entity, synchronizes the data in the original gpa to the + * newly allocated hva(host virtual address) and updates the mapping + * relationship in the parent memory + */ +static int guest_multiple_level_gpa_replace(struct kvm_vpsp *vpsp, + struct map_tbl *tbl, struct gpa2hva_tbls *g2h) +{ + int ret = 0; + uint32_t sub_block_size; + uint64_t sub_paddr; + void *parent_kva = NULL; + + /* kmalloc memory for child block */ + sub_block_size = max(tbl->size, tbl->align); + tbl->hva = (uint64_t)kzalloc(sub_block_size, GFP_KERNEL); + if (!tbl->hva) + return -ENOMEM; + + /* get child gpa from parent gpa */ + if (unlikely(vpsp->read_guest(vpsp->kvm, tbl->parent_pa + tbl->offset, + &sub_paddr, sizeof(sub_paddr)))) { + pr_err("[%s]: kvm_read_guest for parent gpa failed\n", + __func__); + ret = -EFAULT; + goto e_free; + } + + /* copy child block data from gpa to hva */ + if (unlikely(vpsp->read_guest(vpsp->kvm, sub_paddr, (void *)tbl->hva, + tbl->size))) { + pr_err("[%s]: kvm_read_guest for sub_data failed\n", + __func__); + ret = -EFAULT; + goto e_free; + } + + /* get hva from gpa */ + parent_kva = get_hva_from_gpa(g2h, tbl->parent_pa); + if (unlikely(!parent_kva)) { + pr_err("[%s]: get_hva_from_gpa for parent_pa failed\n", + __func__); + ret = -EFAULT; + goto e_free; + } + + /* replace pa of hva from gpa */ + *(uint64_t *)((uint8_t *)parent_kva + tbl->offset) = __psp_pa(tbl->hva); + + /* fill in gpa and hva to map table for restoring later */ + if (unlikely(gpa2hva_tbl_fill(g2h, (void *)tbl->hva, sub_paddr))) { + pr_err("[%s]: gpa2hva_tbl_fill for sub_addr failed\n", + __func__); + ret = -EFAULT; + goto e_free; + } + + return ret; + +e_free: + kfree((const void *)tbl->hva); + return ret; +} + +/* The virtual machine multi-level pointer command memory handles the + * execution entity, synchronizes the data in the hva(host virtual + * address) back to the memory corresponding to the gpa, and restores + * the mapping relationship in the original parent memory + */ +static int guest_multiple_level_gpa_restore(struct kvm_vpsp *vpsp, + struct map_tbl *tbl, struct gpa2hva_tbls *g2h) +{ + int ret = 0; + gpa_t sub_gpa; + void *parent_hva = NULL; + + /* get gpa from hva */ + sub_gpa = get_gpa_from_hva(g2h, (void *)tbl->hva); + if (unlikely(!sub_gpa)) { + pr_err("[%s]: get_gpa_from_hva for sub_gpa failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + /* copy child block data from hva to gpa */ + if (unlikely(vpsp->write_guest(vpsp->kvm, sub_gpa, (void *)tbl->hva, + tbl->size))) { + pr_err("[%s]: kvm_write_guest for sub_gpa failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + /* get parent hva from parent gpa */ + parent_hva = get_hva_from_gpa(g2h, tbl->parent_pa); + if (unlikely(!parent_hva)) { + pr_err("[%s]: get_hva_from_gpa for parent_pa failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + /* restore gpa from pa of hva in parent block */ + *(uint64_t *)((uint8_t *)parent_hva + tbl->offset) = sub_gpa; + + /* free child block memory */ + clear_hva_in_g2h_tbls(g2h, (void *)tbl->hva); + kfree((const void *)tbl->hva); + tbl->hva = 0; + +end: + return ret; +} + +/* + * The virtual machine multilevel pointer command memory processing + * executes upper-layer abstract interfaces, including replacing and + * restoring two sub-processing functions + */ +static int guest_addr_map_table_op(struct kvm_vpsp *vpsp, struct gpa2hva_tbls *g2h, + struct addr_map_tbls *map_tbls, int op) +{ + int ret = 0; + int i; + uint64_t *sub_paddr_ptr; + + if (op) { + for (i = map_tbls->tbl_nums - 1; i >= 0; i--) { + /* check if the gpa of root points to itself */ + if (map_tbls->tbl[i].parent_pa == g2h->tbl[0].gpa) { + sub_paddr_ptr = (uint64_t *)((uint8_t *)g2h->tbl[0].hva + + map_tbls->tbl[i].offset); + /* if the child paddr is equal to the parent paddr */ + if ((uint64_t)g2h->tbl[0].hva == map_tbls->tbl[i].hva) { + *sub_paddr_ptr = g2h->tbl[0].gpa; + continue; + } + } + + /* restore new pa of kva with the gpa from guest */ + if (unlikely(guest_multiple_level_gpa_restore(vpsp, + &map_tbls->tbl[i], g2h))) { + pr_err("[%s]: guest_multiple_level_gpa_restore failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + } else { + for (i = 0; i < map_tbls->tbl_nums; i++) { + /* check if the gpa of root points to itself */ + if (map_tbls->tbl[i].parent_pa == g2h->tbl[0].gpa) { + sub_paddr_ptr = (uint64_t *)((uint8_t *)g2h->tbl[0].hva + + map_tbls->tbl[i].offset); + /* if the child paddr is equal to the parent paddr */ + if (*sub_paddr_ptr == map_tbls->tbl[i].parent_pa) { + *sub_paddr_ptr = __psp_pa(g2h->tbl[0].hva); + map_tbls->tbl[i].hva = (uint64_t)g2h->tbl[0].hva; + continue; + } + } + + /* check if parent_pa is valid */ + if (unlikely(!get_hva_from_gpa(g2h, map_tbls->tbl[i].parent_pa))) { + pr_err("[%s]: g2h->tbl[%d].parent_pa: 0x%llx is invalid\n", + __func__, i, map_tbls->tbl[i].parent_pa); + ret = -EFAULT; + goto end; + } + + /* replace the gpa from guest with the new pa of kva */ + if (unlikely(guest_multiple_level_gpa_replace(vpsp, + &map_tbls->tbl[i], g2h))) { + pr_err("[%s]: guest_multiple_level_gpa_replace failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + } + +end: + return ret; +} + +static void kvm_pv_psp_mem_free(struct gpa2hva_tbls *g2h, struct addr_map_tbls + *map_tbl, void *data) +{ + int i; + + if (g2h) { + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].hva && (g2h->tbl[i].hva != data)) { + kfree(g2h->tbl[i].hva); + g2h->tbl[i].hva = NULL; + } + } + kfree(g2h); + } + + kfree(map_tbl); + kfree(data); +} + +/* + * Obtain the VM command and preprocess the pointer mapping table + * information in the command buffer, the processed data will be + * used to interact with the psp device + */ +static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, + gpa_t table_gpa, struct vpsp_hbuf_wrapper *hbuf) +{ + int ret = 0; + void *data = NULL; + struct psp_cmdresp_head psp_head; + uint32_t data_size; + struct addr_map_tbls map_head, *map_tbls = NULL; + uint32_t map_tbl_size; + struct gpa2hva_tbls *g2h = NULL; + uint32_t g2h_tbl_size; + + if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, &psp_head, + sizeof(struct psp_cmdresp_head)))) + return -EFAULT; + + data_size = psp_head.buf_size; + data = kzalloc(data_size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, data, data_size))) { + ret = -EFAULT; + goto end; + } + + if (table_gpa) { + /* parse address map table from guest */ + if (unlikely(vpsp->read_guest(vpsp->kvm, table_gpa, &map_head, + sizeof(struct addr_map_tbls)))) { + pr_err("[%s]: kvm_read_guest for map_head failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + map_tbl_size = sizeof(struct addr_map_tbls) + map_head.tbl_nums + * sizeof(struct map_tbl); + map_tbls = kzalloc(map_tbl_size, GFP_KERNEL); + if (!map_tbls) { + ret = -ENOMEM; + goto end; + } + + if (unlikely(vpsp->read_guest(vpsp->kvm, table_gpa, map_tbls, + map_tbl_size))) { + pr_err("[%s]: kvm_read_guest for map_tbls failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + /* init for gpa2hva table*/ + g2h_tbl_size = sizeof(struct gpa2hva_tbls) + (map_head.tbl_nums + + 1) * sizeof(struct gpa2hva_t); + g2h = kzalloc(g2h_tbl_size, GFP_KERNEL); + if (!g2h) { + ret = -ENOMEM; + goto end; + } + g2h->max_nums = map_head.tbl_nums + 1; + + /* fill the root parent address */ + if (gpa2hva_tbl_fill(g2h, data, data_gpa)) { + pr_err("[%s]: gpa2hva_tbl_fill for root data address failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + if (guest_addr_map_table_op(vpsp, g2h, map_tbls, 0)) { + pr_err("[%s]: guest_addr_map_table_op for replacing failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + + hbuf->data = data; + hbuf->data_size = data_size; + hbuf->map_tbls = map_tbls; + hbuf->g2h_tbls = g2h; + +end: + return ret; +} + +/* + * The executed command data is recovered according to the multilevel + * pointer of the mapping table when the command has finished + * interacting with the psp device + */ +static int kvm_pv_psp_cmd_post_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, + struct vpsp_hbuf_wrapper *hbuf) +{ + int ret = 0; + + if (hbuf->map_tbls) { + if (guest_addr_map_table_op(vpsp, hbuf->g2h_tbls, + hbuf->map_tbls, 1)) { + pr_err("[%s]: guest_addr_map_table_op for restoring failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + + /* restore cmdresp's buffer from context */ + if (unlikely(vpsp->write_guest(vpsp->kvm, data_gpa, hbuf->data, + hbuf->data_size))) { + pr_err("[%s]: kvm_write_guest for cmdresp data failed\n", + __func__); + ret = -EFAULT; + goto end; + } + +end: + /* release memory and clear hbuf */ + kvm_pv_psp_mem_free(hbuf->g2h_tbls, hbuf->map_tbls, hbuf->data); + memset(hbuf, 0, sizeof(*hbuf)); + + return ret; +} + +static int cmd_type_is_tkm(int cmd) +{ + if (cmd >= TKM_CMD_ID_MIN && cmd <= TKM_CMD_ID_MAX) + return 1; + return 0; +} + +/* + * The primary implementation interface of virtual PSP in kernel mode + */ +int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, + gpa_t table_gpa) +{ + int ret = 0; + struct vpsp_ret psp_ret = {0}; + struct vpsp_hbuf_wrapper hbuf = {0}; + struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; + uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + uint32_t index = 0; + uint32_t vid = 0; + + // only tkm cmd need vid + if (cmd_type_is_tkm(vcmd->cmd_id)) { + // check the permission to use the default vid when no vid is set + ret = vpsp_get_vid(&vid, vpsp->kvm->userspace_pid); + if (ret && !vpsp_get_default_vid_permission()) { + pr_err("[%s]: not allowed tkm command without vid\n", __func__); + return -EFAULT; + } + } + + if (unlikely(vpsp->read_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, + sizeof(psp_ret)))) + return -EFAULT; + + switch (psp_ret.status) { + case VPSP_INIT: + /* multilevel pointer replace*/ + ret = kvm_pv_psp_cmd_pre_op(vpsp, data_gpa, table_gpa, &hbuf); + if (unlikely(ret)) { + psp_ret.status = VPSP_FINISH; + pr_err("[%s]: kvm_pv_psp_cmd_pre_op failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + /* try to send command to the device for execution*/ + ret = vpsp_try_do_cmd(vid, cmd, (void *)hbuf.data, + (struct vpsp_ret *)&psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_do_cmd failed\n", __func__); + ret = -EFAULT; + goto end; + } + + switch (psp_ret.status) { + case VPSP_RUNNING: + /* backup host memory message for restoring later*/ + prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : + CSV_COMMAND_PRIORITY_LOW; + g_hbuf_wrap[prio][psp_ret.index] = hbuf; + break; + + case VPSP_FINISH: + /* restore multilevel pointer data */ + ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, &hbuf); + if (unlikely(ret)) { + pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", + __func__); + ret = -EFAULT; + goto end; + } + break; + + default: + ret = -EFAULT; + break; + } + break; + + case VPSP_RUNNING: + prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : + CSV_COMMAND_PRIORITY_LOW; + index = psp_ret.index; + /* try to get the execution result from ringbuffer*/ + ret = vpsp_try_get_result(vid, prio, index, g_hbuf_wrap[prio][index].data, + (struct vpsp_ret *)&psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_try_get_result failed\n", __func__); + ret = -EFAULT; + goto end; + } + + switch (psp_ret.status) { + case VPSP_RUNNING: + break; + + case VPSP_FINISH: + /* restore multilevel pointer data */ + ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, + &g_hbuf_wrap[prio][index]); + if (unlikely(ret)) { + pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", + __func__); + ret = -EFAULT; + goto end; + } + break; + + default: + ret = -EFAULT; + break; + } + break; + + default: + pr_err("[%s]: invalid command status\n", __func__); + ret = -EFAULT; + break; + } +end: + /* return psp_ret to guest */ + vpsp->write_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)); + return ret; +} EXPORT_SYMBOL_GPL(kvm_pv_psp_op); diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 88478187de3ce41eda8f905f4bfa706f5a2cb8cf..49dde0baae4178f06bd6b5d3357b2db7f4bd2f76 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -18,6 +18,9 @@ #include "dbc.h" #include "hygon/psp-dev.h" +#ifdef CONFIG_TDM_DEV_HYGON +#include "hygon/tdm-dev.h" +#endif struct psp_device *psp_master; @@ -153,6 +156,14 @@ static int psp_init(struct psp_device *psp) if (psp->vdata->platform_access) psp_init_platform_access(psp); +#ifdef CONFIG_TDM_DEV_HYGON + if (is_vendor_hygon()) { + ret = tdm_dev_init(); + if (ret) + return ret; + } +#endif + return 0; } @@ -188,6 +199,11 @@ int psp_dev_init(struct sp_device *sp) /* Request an irq */ if (is_vendor_hygon()) { + ret = hygon_psp_additional_setup(sp); + if (ret) { + dev_err(dev, "psp: unable to do additional setup\n"); + goto e_err; + } ret = sp_request_hygon_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); } else { ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); @@ -237,6 +253,11 @@ void psp_dev_destroy(struct sp_device *sp) if (!psp) return; +#ifdef CONFIG_TDM_DEV_HYGON + if (is_vendor_hygon()) + tdm_dev_destroy(); +#endif + sev_dev_destroy(psp); tee_dev_destroy(psp); @@ -247,6 +268,9 @@ void psp_dev_destroy(struct sp_device *sp) sp_free_psp_irq(sp, psp); + if (is_vendor_hygon() && hygon_psp_hooks.psp_misc) + kref_put(&hygon_psp_hooks.psp_misc->refcount, hygon_psp_exit); + if (sp->clear_psp_master_device) sp->clear_psp_master_device(sp); } diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index f0fc57bdff0492cb9e98856605b4d259d0e9cae5..8231b88663ccbff70c9fd208f62476c0d8f9e62e 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -408,10 +408,20 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) static int sev_do_cmd(int cmd, void *data, int *psp_ret) { int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - mutex_lock(&sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } rc = __sev_do_cmd_locked(cmd, data, psp_ret); - mutex_unlock(&sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); return rc; } @@ -529,10 +539,20 @@ static int __sev_platform_init_locked(int *error) int sev_platform_init(int *error) { int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - mutex_lock(&sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } rc = __sev_platform_init_locked(error); - mutex_unlock(&sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); return rc; } @@ -569,10 +589,20 @@ static int __sev_platform_shutdown_locked(int *error) static int sev_platform_shutdown(int *error) { int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - mutex_lock(&sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } rc = __sev_platform_shutdown_locked(NULL); - mutex_unlock(&sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); return rc; } @@ -1119,6 +1149,7 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) struct sev_issue_cmd input; int ret = -EFAULT; bool writable = file->f_mode & FMODE_WRITE; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); if (!psp_master || !psp_master->sev_data) return -ENODEV; @@ -1132,7 +1163,13 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (input.cmd > SEV_MAX) return -EINVAL; - mutex_lock(&sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } switch (input.cmd) { @@ -1172,7 +1209,10 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) ret = -EFAULT; out: - mutex_unlock(&sev_cmd_mutex); + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); return ret; } diff --git a/include/linux/cma.h b/include/linux/cma.h index 18c8d6495f0899a5bdd815aa13fe6ffba1ef8b07..010c89f4b7727323ff690664a69ed885f771b838 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -58,4 +58,5 @@ extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) extern void cma_reserve_pages_on_error(struct cma *cma); extern int __init cma_alloc_areas(unsigned int max_cma_size); +extern void cma_enable_concurrency(struct cma *cma); #endif diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index efb618a0eb6e8443b08d4ad0dd1cdc8a84275d14..41308eed5a277384ca46d0f87da77cda4cf93924 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -12,6 +12,7 @@ #include #include +#include /*****************************************************************************/ /***************************** CSV interface *********************************/ @@ -305,8 +306,62 @@ struct csv3_data_dbg_read_mem { u32 size; /* In */ } __packed; +/* + * enum VPSP_CMD_STATUS - virtual psp command status + * + * @VPSP_INIT: the initial command from guest + * @VPSP_RUNNING: the middle command to check and run ringbuffer command + * @VPSP_FINISH: inform the guest that the command ran successfully + */ +enum VPSP_CMD_STATUS { + VPSP_INIT = 0, + VPSP_RUNNING, + VPSP_FINISH, + VPSP_MAX +}; + +/** + * struct vpsp_cmd - virtual psp command + * + * @cmd_id: the command id is used to distinguish different commands + * @is_high_rb: indicates the ringbuffer level in which the command is placed + */ +struct vpsp_cmd { + u32 cmd_id : 31; + u32 is_high_rb : 1; +}; + +/** + * struct vpsp_ret - virtual psp return result + * + * @pret: the return code from device + * @resv: reserved bits + * @index: used to distinguish the position of command in the ringbuffer + * @status: indicates the current status of the related command + */ +struct vpsp_ret { + u32 pret : 16; + u32 resv : 2; + u32 index : 12; + u32 status : 2; +}; + +struct kvm_vpsp { + struct kvm *kvm; + int (*write_guest)(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); + int (*read_guest)(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); +}; + +#define PSP_VID_MASK 0xff +#define PSP_VID_SHIFT 56 +#define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT)) +#define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK) +#define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT)) + #ifdef CONFIG_CRYPTO_DEV_SP_PSP +int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret); + int psp_do_cmd(int cmd, void *data, int *psp_ret); int csv_ring_buffer_queue_init(void); @@ -320,8 +375,21 @@ int csv_check_stat_queue_status(int *psp_ret); */ int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); +int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, + void *data, struct vpsp_ret *psp_ret); + +int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret); + +int vpsp_get_vid(uint32_t *vid, pid_t pid); + +int vpsp_get_default_vid_permission(void); + +int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, + gpa_t table_gpa); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ +static inline int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) { return -ENODEV; } + static inline int psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; } static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } @@ -332,6 +400,23 @@ static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } static inline int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; } +static inline int +vpsp_try_get_result(uint32_t vid, uint8_t prio, + uint32_t index, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } + +static inline int +vpsp_try_do_cmd(uint32_t vid, int cmd, + void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } + +static inline int +vpsp_get_vid(uint32_t *vid, pid_t pid) { return -ENODEV; } + +static inline int +vpsp_get_default_vid_permission(void) { return -ENODEV; } + +static inline int +kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, + gpa_t psp_ret_gpa, gpa_t table_gpa) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data); diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 76ee067a962c93ed72c1c207393673532fe4def6..a2a4e975fb0cbc437620cec29b8c10777c569cf2 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -13,6 +13,7 @@ #define __PSP_SEV_H__ #include +#include #define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 3ce071c933a86047a3dcf7bec592f397969cc8df..052d6f416445ddab25eb61f8394d87a43fa92475 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2310,4 +2310,25 @@ struct kvm_csv_init { #define KVM_CONTROL_PRE_SYSTEM_RESET _IO(KVMIO, 0xe8) #define KVM_CONTROL_POST_SYSTEM_RESET _IO(KVMIO, 0xe9) +/* CSV3 command */ +enum csv3_cmd_id { + KVM_CSV3_NR_MIN = 0xc0, + + KVM_CSV3_INIT = KVM_CSV3_NR_MIN, + KVM_CSV3_LAUNCH_ENCRYPT_DATA, + KVM_CSV3_LAUNCH_ENCRYPT_VMCB, + + KVM_CSV3_NR_MAX, +}; + +struct kvm_csv3_init_data { + __u64 nodemask; +}; + +struct kvm_csv3_launch_encrypt_data { + __u64 gpa; + __u64 uaddr; + __u32 len; +}; + #endif /* __LINUX_KVM_H */ diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h index 67192835455e0707993d021f38991803bce2c550..86369b7a573397a66e4b371c5e21a32c7fac5ccc 100644 --- a/include/uapi/linux/kvm_para.h +++ b/include/uapi/linux/kvm_para.h @@ -31,6 +31,7 @@ #define KVM_HC_SCHED_YIELD 11 #define KVM_HC_MAP_GPA_RANGE 12 #define KVM_HC_VM_ATTESTATION 100 /* Specific to Hygon CPU */ +#define KVM_HC_PSP_OP 101 /* Specific to Hygon platform */ /* * hypercalls use architecture specific diff --git a/mm/cma.c b/mm/cma.c index 721316622bca835fd7490847fde5c9aa465746dd..0238fc625127366b7354172f611f973f8e3c0c5f 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -496,10 +496,12 @@ struct page *cma_alloc(struct cma *cma, unsigned long count, spin_unlock_irq(&cma->lock); pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); - mutex_lock(&cma_mutex); + if (!cma->no_mutex) + mutex_lock(&cma_mutex); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); - mutex_unlock(&cma_mutex); + if (!cma->no_mutex) + mutex_unlock(&cma_mutex); if (ret == 0) { page = pfn_to_page(pfn); break; @@ -613,3 +615,11 @@ int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) return 0; } + +void cma_enable_concurrency(struct cma *cma) +{ + if (!cma) + return; + + cma->no_mutex = true; +} diff --git a/mm/cma.h b/mm/cma.h index 12aba820969c236691617e8cbabf208d8af9a0e1..50275c1d98cc656497018577da57bb9417e47846 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -16,6 +16,7 @@ struct cma { unsigned long *bitmap; unsigned int order_per_bit; /* Order of pages represented by one bit */ spinlock_t lock; + bool no_mutex; #ifdef CONFIG_CMA_DEBUGFS struct hlist_head mem_head; spinlock_t mem_head_lock;