From 68e55e76be0bed2d5b1c1bd5d3099c8ba87e5455 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 29 Dec 2025 15:31:33 +0800 Subject: [PATCH 1/5] x86/mm: csv: Rename CSV3 CMA-related variables and functions Upstream: no The following patches will add support for reserving SMCRs at host boot time and for allocating CSV3 private memory from 1G hugetlb pages. This patch renames variables and functions closely related to CSV3 CMA, and wraps the code that directly depends on Linux CMA with `#ifdef CONFIG_CMA`. This improves readability when the subsequent patches are applied. No functional changes. Hygon-SIG: commit none hygon x86/mm: csv: Rename CSV3 CMA-related variables and functions Signed-off-by: hanliyang --- arch/x86/mm/mem_encrypt_hygon.c | 313 +++++++++++++++++--------------- 1 file changed, 171 insertions(+), 142 deletions(-) diff --git a/arch/x86/mm/mem_encrypt_hygon.c b/arch/x86/mm/mem_encrypt_hygon.c index c3cc7b00d943..acda02d8b496 100644 --- a/arch/x86/mm/mem_encrypt_hygon.c +++ b/arch/x86/mm/mem_encrypt_hygon.c @@ -136,68 +136,43 @@ bool csv3_active(void) } EXPORT_SYMBOL_GPL(csv3_active); -/******************************************************************************/ -/**************************** CSV3 CMA interfaces *****************************/ -/******************************************************************************/ - -#define CSV_MEM_PCT_MAX (95U) - -/* 0 percent of total memory by default*/ -static unsigned char csv_mem_percentage; -static unsigned long csv_mem_size; - -static int __init cmdline_parse_csv_mem_size(char *str) -{ - unsigned long size; - char *endp; - - if (str) { - size = memparse(str, &endp); - csv_mem_size = size; - if (!csv_mem_size) - csv_mem_percentage = 0; - } - - return 0; -} -early_param("csv_mem_size", cmdline_parse_csv_mem_size); - -static int __init cmdline_parse_csv_mem_percentage(char *str) -{ - unsigned char percentage; - int ret; - - if (!str) - return 0; - - ret = kstrtou8(str, 10, &percentage); - if (!ret) { - csv_mem_percentage = min_t(unsigned char, percentage, CSV_MEM_PCT_MAX); - if (csv_mem_percentage != percentage) - pr_warn("csv_mem_percentage is limited to %d.\n", - CSV_MEM_PCT_MAX); - } else { - /* Disable CSV CMA. */ - csv_mem_percentage = 0; - pr_err("csv_mem_percentage is invalid. (0 - %d) is expected.\n", - CSV_MEM_PCT_MAX); - } - - return ret; -} -early_param("csv_mem_percentage", cmdline_parse_csv_mem_percentage); - +/* Common CSV3 memory protection macros */ #define NUM_SMR_ENTRIES (8 * 1024) -#define CSV_CMA_SHIFT PUD_SHIFT -#define CSV_CMA_SIZE (1 << CSV_CMA_SHIFT) #define MIN_SMR_ENTRY_SHIFT 23 #define CSV_SMR_INFO_SIZE (nr_node_ids * sizeof(struct csv_mem)) +/* CSV3 CMA macros */ +#define CSV_CMA_SHIFT PUD_SHIFT +#define CSV_CMA_SIZE (1 << CSV_CMA_SHIFT) +#define MAX_CSV_CMA_PCT 95 +#define CSV_CMA_AREAS 2458 +/** + * An array of Secure Memory Regions (SMRs), where each entry records a physical + * address range within a NUMA node that will be managed by hardware. + * Each NUMA node has at most one entry, and NUMA nodes without physical memory + * are not included in the array. + */ struct csv_mem *csv_smr; EXPORT_SYMBOL_GPL(csv_smr); - +/* Number of entries in the @csv_smr array */ unsigned int csv_smr_num; EXPORT_SYMBOL_GPL(csv_smr_num); +/** + * @csv_cma_pct specifies the percentage of total system memory to be managed by + * CMA, while the @csv_cma_size specifies the absolute size of CMA-managed + * memory. + * These values can be set via the mutually exclusive kernel cmdline parameter: + * - 'csv_mem_percentage=' sets @csv_cma_pct + * - 'csv_mem_size=' sets @csv_cma_size. + * Required when the CSV3 private memory is allocated from CSV-CMA. + */ +static unsigned char csv_cma_pct; +static unsigned long csv_cma_size; +/** + * The memory unit size managed by the hardware. Do not confuse this with + * @csv_smr or @csv_smr_num. + */ +static unsigned int smr_entry_shift; #ifdef CONFIG_SYSFS @@ -224,21 +199,46 @@ EXPORT_SYMBOL_GPL(csv3_shared_mem); #endif /* CONFIG_SYSFS */ -struct csv_cma { - int nid; - int fast; - struct cma *cma; -}; +static int __init cmdline_parse_csv_cma_size(char *str) +{ + unsigned long size; + char *endp; -struct cma_array { - unsigned long count; - atomic64_t csv_used_size; - unsigned int index; - struct csv_cma csv_cma[]; -}; + if (str) { + size = memparse(str, &endp); + csv_cma_size = size; + if (!csv_cma_size) + csv_cma_pct = 0; + } -static unsigned int smr_entry_shift; -static struct cma_array *csv_contiguous_pernuma_area[MAX_NUMNODES]; + return 0; +} +early_param("csv_mem_size", cmdline_parse_csv_cma_size); + +static int __init cmdline_parse_csv_cma_pct(char *str) +{ + unsigned char percentage; + int ret; + + if (!str) + return 0; + + ret = kstrtou8(str, 10, &percentage); + if (!ret) { + csv_cma_pct = min_t(unsigned char, percentage, MAX_CSV_CMA_PCT); + if (csv_cma_pct != percentage) + pr_warn("csv_mem_percentage is limited to %d.\n", + MAX_CSV_CMA_PCT); + } else { + /* Disable CSV CMA. */ + csv_cma_pct = 0; + pr_err("csv_mem_percentage is invalid. [0 - %d] is expected.\n", + MAX_CSV_CMA_PCT); + } + + return ret; +} +early_param("csv_mem_percentage", cmdline_parse_csv_cma_pct); static void csv_set_smr_entry_shift(unsigned int shift) { @@ -252,6 +252,23 @@ unsigned int csv_get_smr_entry_shift(void) } EXPORT_SYMBOL_GPL(csv_get_smr_entry_shift); +#ifdef CONFIG_CMA + +struct csv_cma { + int nid; + int fast; + struct cma *cma; +}; + +struct cma_array { + unsigned long count; + atomic64_t csv_used_size; + unsigned int index; + struct csv_cma csv_cma[]; +}; + +static struct cma_array *csv_contiguous_pernuma_area[MAX_NUMNODES]; + static unsigned long __init present_pages_in_node(int nid) { unsigned long range_start_pfn, range_end_pfn; @@ -266,7 +283,7 @@ static unsigned long __init present_pages_in_node(int nid) static phys_addr_t __init csv_early_percent_memory_on_node(int nid) { - return (present_pages_in_node(nid) * csv_mem_percentage / 100) << PAGE_SHIFT; + return (present_pages_in_node(nid) * csv_cma_pct / 100) << PAGE_SHIFT; } static void __init csv_cma_reserve_mem(void) @@ -352,40 +369,8 @@ static void __init csv_cma_reserve_mem(void) csv_set_smr_entry_shift(ilog2(max_spanned_size / NUM_SMR_ENTRIES - 1) + 1); } -#define CSV_CMA_AREAS 2458 - -void __init early_csv_reserve_mem(void) -{ - unsigned long total_pages; - - /* Only reserve memory on the host that enabled CSV3 feature */ - if (!csv3_check_cpu_support()) - return; - - if (cma_alloc_areas(CSV_CMA_AREAS)) - return; - - total_pages = PHYS_PFN(memblock_phys_mem_size()); - if (csv_mem_size) { - if (csv_mem_size < (total_pages << PAGE_SHIFT)) { - csv_mem_percentage = div_u64((u64)csv_mem_size * 100, - (u64)total_pages << PAGE_SHIFT); - if (csv_mem_percentage > CSV_MEM_PCT_MAX) - csv_mem_percentage = CSV_MEM_PCT_MAX; /* Maximum percentage */ - } else - csv_mem_percentage = CSV_MEM_PCT_MAX; /* Maximum percentage */ - } - - if (!csv_mem_percentage) { - pr_warn("CSV-CMA: Don't reserve any memory\n"); - return; - } - - csv_cma_reserve_mem(); -} - phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, - unsigned int align) + unsigned int align) { int nid; int nr_nodes; @@ -483,6 +468,58 @@ void csv_release_to_contiguous(phys_addr_t pa, size_t size) } EXPORT_SYMBOL_GPL(csv_release_to_contiguous); +#else /* !CONFIG_CMA */ + +phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, + unsigned int align) +{ + return 0; +} +EXPORT_SYMBOL_GPL(csv_alloc_from_contiguous); + +void csv_release_to_contiguous(phys_addr_t pa, size_t size) +{ +} +EXPORT_SYMBOL_GPL(csv_release_to_contiguous); + +#endif /* CONFIG_CMA */ + +void __init early_csv_reserve_mem(void) +{ + unsigned long total_pages; + + /* Only reserve memory on the host that enabled CSV3 feature */ + if (!csv3_check_cpu_support()) + return; + + total_pages = PHYS_PFN(memblock_phys_mem_size()); + if (csv_cma_size) { + if (csv_cma_size < (total_pages << PAGE_SHIFT)) { + csv_cma_pct = div_u64((u64)csv_cma_size * 100, + (u64)total_pages << PAGE_SHIFT); + /* Maximum percentage */ + if (csv_cma_pct > MAX_CSV_CMA_PCT) + csv_cma_pct = MAX_CSV_CMA_PCT; + } else + /* Maximum percentage */ + csv_cma_pct = MAX_CSV_CMA_PCT; + } + + if (!csv_cma_pct) { + pr_warn("CSV-CMA: Don't reserve any memory\n"); + return; + } + +#ifdef CONFIG_CMA + if (cma_alloc_areas(CSV_CMA_AREAS)) + return; + + csv_cma_reserve_mem(); +#else + pr_warn("CSV: Fail, csv_mem_percentage depends on CONFIG_CMA\n"); +#endif +} + #ifdef CONFIG_SYSFS /** @@ -493,67 +530,57 @@ static ssize_t mem_info_show(struct kobject *kobj, { int node; int offset = 0; - unsigned long csv_used_size, total_used_size = 0; - unsigned long csv_size, total_csv_size = 0; - unsigned long shared_mem, total_shared_mem = 0; - unsigned long npt_size, pri_mem; - struct cma_array *array = NULL; - unsigned long bytes_per_mib = 1UL << 20; + unsigned long cma_cur_used, cma_total_used = 0; + unsigned long cma_cur, cma_total = 0; + unsigned long shared_mem, shared_mem_total = 0; + unsigned long npt_total, priv_mem_total; for_each_node_state(node, N_ONLINE) { - array = csv_contiguous_pernuma_area[node]; - if (array == NULL) { - csv_size = 0; - csv_used_size = 0; - shared_mem = 0; - - offset += snprintf(buf + offset, PAGE_SIZE - offset, "Node%d:\n", node); - offset += snprintf(buf + offset, PAGE_SIZE - offset, - " csv3 shared size:%10lu MiB\n", shared_mem); - offset += snprintf(buf + offset, PAGE_SIZE - offset, - " total cma size:%12lu MiB\n", csv_size); - offset += snprintf(buf + offset, PAGE_SIZE - offset, - " csv3 cma used:%13lu MiB\n", csv_used_size); - continue; +#ifdef CONFIG_CMA + struct cma_array *array = csv_contiguous_pernuma_area[node]; +#endif + cma_cur = 0; + cma_cur_used = 0; +#ifdef CONFIG_CMA + if (array) { + cma_cur = DIV_ROUND_UP(array->count * CSV_CMA_SIZE, SZ_1M); + cma_cur_used = DIV_ROUND_UP(atomic64_read(&array->csv_used_size), + SZ_1M); } - +#endif shared_mem = DIV_ROUND_UP(atomic_long_read(&csv3_shared_mem[node]), - bytes_per_mib); - csv_size = DIV_ROUND_UP(array->count * CSV_CMA_SIZE, - bytes_per_mib); - csv_used_size = DIV_ROUND_UP(atomic64_read(&array->csv_used_size), - bytes_per_mib); - - total_shared_mem += shared_mem; - total_csv_size += csv_size; - total_used_size += csv_used_size; + SZ_1M); offset += snprintf(buf + offset, PAGE_SIZE - offset, "Node%d:\n", node); offset += snprintf(buf + offset, PAGE_SIZE - offset, " csv3 shared size:%10lu MiB\n", shared_mem); offset += snprintf(buf + offset, PAGE_SIZE - offset, - " total cma size:%12lu MiB\n", csv_size); + " total cma size:%12lu MiB\n", cma_cur); offset += snprintf(buf + offset, PAGE_SIZE - offset, - " csv3 cma used:%13lu MiB\n", csv_used_size); + " csv3 cma used:%13lu MiB\n", cma_cur_used); + + shared_mem_total += shared_mem; + cma_total += cma_cur; + cma_total_used += cma_cur_used; } - npt_size = DIV_ROUND_UP(atomic_long_read(&csv3_npt_size), bytes_per_mib); - pri_mem = DIV_ROUND_UP(atomic_long_read(&csv3_pri_mem), bytes_per_mib); + npt_total = DIV_ROUND_UP(atomic_long_read(&csv3_npt_size), SZ_1M); + priv_mem_total = DIV_ROUND_UP(atomic_long_read(&csv3_pri_mem), SZ_1M); offset += snprintf(buf + offset, PAGE_SIZE - offset, "All Nodes:\n"); offset += snprintf(buf + offset, PAGE_SIZE - offset, - " csv3 shared size:%10lu MiB\n", total_shared_mem); + " csv3 shared size:%10lu MiB\n", shared_mem_total); offset += snprintf(buf + offset, PAGE_SIZE - offset, - " total cma size:%12lu MiB\n", total_csv_size); + " total cma size:%12lu MiB\n", cma_total); offset += snprintf(buf + offset, PAGE_SIZE - offset, - " csv3 cma used:%13lu MiB\n", total_used_size); + " csv3 cma used:%13lu MiB\n", cma_total_used); offset += snprintf(buf + offset, PAGE_SIZE - offset, - " npt table:%16lu MiB\n", npt_size); + " npt table:%16lu MiB\n", npt_total); offset += snprintf(buf + offset, PAGE_SIZE - offset, - " csv3 private memory:%6lu MiB\n", pri_mem); + " csv3 private memory:%6lu MiB\n", priv_mem_total); offset += snprintf(buf + offset, PAGE_SIZE - offset, " meta data:%16lu MiB\n", - DIV_ROUND_UP(csv3_meta, bytes_per_mib)); + DIV_ROUND_UP(csv3_meta, SZ_1M)); return offset; } @@ -605,8 +632,10 @@ static void __exit csv_cma_sysfs_exit(void) if (!is_x86_vendor_hygon() || !boot_cpu_has(X86_FEATURE_CSV3)) return; - if (csv_cma_kobj_root != NULL) + if (csv_cma_kobj_root) { + sysfs_remove_group(csv_cma_kobj_root, &csv_cma_attr_group); kobject_put(csv_cma_kobj_root); + } } module_init(csv_cma_sysfs_init); -- Gitee From 1f1da76b7c63cc5a401712e86c11c319757fe8da Mon Sep 17 00:00:00 2001 From: yangge Date: Sat, 8 Nov 2025 10:26:33 +0800 Subject: [PATCH 2/5] x86/mm: Global reservation of CSV3 meta memory Upstream: no CSV3 meta memory consists of secure VMSA/SMCR and these memory regions must be isolated from host. These memory regions are reserved from memblock in the early stage of host kernel. Further more, SMCR memory contains secure NPT and vmcb memory. These two memory regions will be delivered to secure processor during later platform initialization. Secure processor will set these memory as secure. Meta memory such as VMSA which is allocated when CSV3 guest is launched. Hygon-SIG: commit none hygon x86/mm: Global reservation of CSV3 meta memory Signed-off-by: yangge Signed-off-by: hanliyang --- arch/x86/include/asm/csv.h | 21 ++ arch/x86/mm/mem_encrypt_hygon.c | 467 +++++++++++++++++++++++++++++++- 2 files changed, 474 insertions(+), 14 deletions(-) diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h index 8d9aa7b2c464..027b595c72b1 100644 --- a/arch/x86/include/asm/csv.h +++ b/arch/x86/include/asm/csv.h @@ -14,17 +14,26 @@ #include +enum csv_smr_source { + USE_CMA, + USE_HUGETLB, + NOT_SUPPORTED, +}; + #ifdef CONFIG_HYGON_CSV struct csv_mem { uint64_t start; uint64_t size; + int nid; }; #define CSV_MR_ALIGN_BITS (28) extern struct csv_mem *csv_smr; extern unsigned int csv_smr_num; +extern struct csv_mem *csv_smcr; +extern unsigned int csv_smcr_num; #ifdef CONFIG_SYSFS extern atomic_long_t csv3_npt_size; @@ -39,12 +48,19 @@ phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, unsigned int align); void csv_release_to_contiguous(phys_addr_t pa, size_t size); +phys_addr_t csv_alloc_metadata(void); +void csv_free_metadata(u64 hpa); + +enum csv_smr_source get_csv_smr_source(void); + uint32_t csv_get_smr_entry_shift(void); #else /* !CONFIG_HYGON_CSV */ #define csv_smr NULL #define csv_smr_num 0U +#define csv_smcr NULL +#define csv_smcr_num 0U static inline void __init early_csv_reserve_mem(void) { } @@ -53,6 +69,11 @@ csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, unsigned int align) { return 0; } static inline void csv_release_to_contiguous(phys_addr_t pa, size_t size) { } +static inline phys_addr_t csv_alloc_metadata(void) { return 0; } +static inline void csv_free_metadata(u64 hpa) { } + +static inline enum csv_smr_source get_csv_smr_source(void) { return NOT_SUPPORTED; } + static inline uint32_t csv_get_smr_entry_shift(void) { return 0; } #endif /* CONFIG_HYGON_CSV */ diff --git a/arch/x86/mm/mem_encrypt_hygon.c b/arch/x86/mm/mem_encrypt_hygon.c index acda02d8b496..6b20f83e351b 100644 --- a/arch/x86/mm/mem_encrypt_hygon.c +++ b/arch/x86/mm/mem_encrypt_hygon.c @@ -140,11 +140,14 @@ EXPORT_SYMBOL_GPL(csv3_active); #define NUM_SMR_ENTRIES (8 * 1024) #define MIN_SMR_ENTRY_SHIFT 23 #define CSV_SMR_INFO_SIZE (nr_node_ids * sizeof(struct csv_mem)) +#define CSV_SMCR_MAX_ENTRIES 64 /* 16GB SMCR */ /* CSV3 CMA macros */ #define CSV_CMA_SHIFT PUD_SHIFT #define CSV_CMA_SIZE (1 << CSV_CMA_SHIFT) #define MAX_CSV_CMA_PCT 95 #define CSV_CMA_AREAS 2458 +/* CSV3 1G hugetlb macros */ +#define DEFAULT_MAX_CSV_NUMBER 113 /* This is an empirical value */ /** * An array of Secure Memory Regions (SMRs), where each entry records a physical @@ -157,6 +160,19 @@ EXPORT_SYMBOL_GPL(csv_smr); /* Number of entries in the @csv_smr array */ unsigned int csv_smr_num; EXPORT_SYMBOL_GPL(csv_smr_num); +/** + * The kernel cmdline parameter 'csv_smcr_size=' can be used to + * reserve memory for CSV3 VM NPT on the host platform. When this parameter is + * specified, the kernel reserves physical memory of the given size. These + * reserved memory are described by @csv_smcr. + * Optional when CSV3 private memory is allocated from CSV-CMA. + * Required when CSV3 private memory is allocated from 1G hugetlb pages. + */ +struct csv_mem *csv_smcr; +EXPORT_SYMBOL_GPL(csv_smcr); +/* Number of entries in the @csv_smcr array */ +unsigned int csv_smcr_num; +EXPORT_SYMBOL_GPL(csv_smcr_num); /** * @csv_cma_pct specifies the percentage of total system memory to be managed by * CMA, while the @csv_cma_size specifies the absolute size of CMA-managed @@ -168,6 +184,25 @@ EXPORT_SYMBOL_GPL(csv_smr_num); */ static unsigned char csv_cma_pct; static unsigned long csv_cma_size; +/** + * Number of CSV3 VMs that the system is intended to support. + * The kernel cmdline parameter 'csv_use_hugetlb=' specify the value of + * @csv_use_hugetlb. + * Required when CSV3 private memory is allocated from 1G hugetlb pages. + */ +static unsigned int csv_use_hugetlb; +/** + * When CSV3 private memory is allocated from 1G hugetlb, metadata blocks are + * reserved for per-VM VMSAs. + * The @csv_use_hugetlb indicates how many metadata blocks to reserve. + */ +struct csv_metadata { + struct list_head list; + unsigned long hpa; + bool used; +}; +static LIST_HEAD(csv_metadata_list); +DEFINE_SPINLOCK(csv_metadata_lock); /** * The memory unit size managed by the hardware. Do not confuse this with * @csv_smr or @csv_smr_num. @@ -240,10 +275,65 @@ static int __init cmdline_parse_csv_cma_pct(char *str) } early_param("csv_mem_percentage", cmdline_parse_csv_cma_pct); +static int __init cmdline_parse_csv_smcr_size(char *str) +{ + unsigned long size; + char *endp; + + if (str) { + size = memparse(str, &endp); + if (size) { + csv_smcr_num = size >> CSV_MR_ALIGN_BITS; + if (csv_smcr_num < 2) { + csv_smcr_num = 0; + pr_err("CSV-SMCR: csv_smcr_size must be >= 512MB\n"); + } + if (csv_smcr_num > CSV_SMCR_MAX_ENTRIES) { + csv_smcr_num = CSV_SMCR_MAX_ENTRIES; + pr_warn("CSV-SMCR: csv_smcr_size is limited to 16GB\n"); + } + } else + pr_err("CSV-SMCR: csv_smcr_size is invalid\n"); + } + + return 0; +} +early_param("csv_smcr_size", cmdline_parse_csv_smcr_size); + +static int __init cmdline_parse_csv_use_hugetlb(char *str) +{ + unsigned int count, limit = 0; + int ret; + + if (!str) { + csv_use_hugetlb = DEFAULT_MAX_CSV_NUMBER; + return 0; + } + + if (is_x86_vendor_hygon() && boot_cpu_data.x86_model >= 0x4) + limit = 500; + + ret = kstrtou32(str, 10, &count); + if (!ret) { + if (limit < count) { + pr_info("csv_use_hugetlb is limited to %d\n", limit); + count = limit; + } + csv_use_hugetlb = count; + } else { + /* Disable CSV hugetlb. */ + csv_use_hugetlb = 0; + pr_err("csv_use_hugetlb is invalid. [0 - %d] is expected.\n", limit); + } + + return ret; +} +early_param("csv_use_hugetlb", cmdline_parse_csv_use_hugetlb); + static void csv_set_smr_entry_shift(unsigned int shift) { smr_entry_shift = max_t(unsigned int, shift, MIN_SMR_ENTRY_SHIFT); - pr_info("CSV-CMA: SMR entry size is 0x%x\n", 1 << smr_entry_shift); + pr_info("CSV: SMR entry size is 0x%x\n", 1 << smr_entry_shift); } unsigned int csv_get_smr_entry_shift(void) @@ -252,6 +342,129 @@ unsigned int csv_get_smr_entry_shift(void) } EXPORT_SYMBOL_GPL(csv_get_smr_entry_shift); +static void __init csv_smcr_free_mem(void) +{ + unsigned int i; + + if (!csv_smcr_num) + return; + + for (i = 0; i < csv_smcr_num; i++) { + if (csv_smcr[i].start && csv_smcr[i].size) { + memblock_phys_free(csv_smcr[i].start, csv_smcr[i].size); + pr_info("CSV-SMCR: free mem - paddr 0x%016llx, size 0x%016llx\n", + csv_smcr[i].start, csv_smcr[i].size); + } + } + + if (csv_smcr) { + memblock_free(csv_smcr, sizeof(struct csv_mem) * csv_smcr_num); + csv_smcr = NULL; + csv_smcr_num = 0; + } +} + +static int __init csv_smcr_reserve_mem(void) +{ + unsigned int i; + + if (!csv_smcr_num) + return 0; + + csv_smcr = memblock_alloc_node(sizeof(struct csv_mem) * csv_smcr_num, + SMP_CACHE_BYTES, NUMA_NO_NODE); + if (!csv_smcr) { + pr_err("CSV-SMCR: Fail to allocate memory\n"); + return -ENOMEM; + } + + memset(csv_smcr, 0, sizeof(struct csv_mem) * csv_smcr_num); + for (i = 0; i < csv_smcr_num; i++) { + csv_smcr[i].size = 1UL << CSV_MR_ALIGN_BITS; + csv_smcr[i].start = memblock_phys_alloc_try_nid(csv_smcr[i].size, + csv_smcr[i].size, + NUMA_NO_NODE); + if (csv_smcr[i].start == 0) { + csv_smcr[i].size = 0; + pr_err("CSV-SMCR: Fail to reserve memory\n"); + goto failure; + } + csv_smcr[i].nid = phys_to_target_node(csv_smcr[i].start); + + pr_info("CSV-SMCR: reserve mem - paddr 0x%016llx, size 0x%016llx\n", + csv_smcr[i].start, csv_smcr[i].size); + } + + return 0; + +failure: + csv_smcr_free_mem(); + + return -ENOMEM; +} + +static unsigned long __init smallest_pfn_in_node(int nid) +{ + unsigned long range_start_pfn, range_end_pfn; + unsigned long smallest = -1; + int i; + + for_each_mem_pfn_range(i, nid, &range_start_pfn, &range_end_pfn, NULL) { + if (range_start_pfn < smallest) + smallest = range_start_pfn; + } + + return smallest; +} + +static unsigned long __init largest_pfn_in_node(int nid) +{ + unsigned long range_start_pfn, range_end_pfn; + unsigned long largest = 0; + int i; + + for_each_mem_pfn_range(i, nid, &range_start_pfn, &range_end_pfn, NULL) { + if (range_end_pfn > largest) + largest = range_end_pfn; + } + + return largest; +} + +static unsigned long __init largest_pfn(void) +{ + unsigned long range_start_pfn, range_end_pfn; + unsigned long largest = 0; + int node, i; + + for_each_node_state(node, N_ONLINE) { + for_each_mem_pfn_range(i, node, &range_start_pfn, &range_end_pfn, NULL) { + if (range_end_pfn > largest) + largest = range_end_pfn; + } + } + + return largest; +} + +static struct csv_mem * __init find_csv_smcr_mem_nid(int nid) +{ + int i; + struct csv_mem *smcr = NULL; + + if (!csv_smcr) + return NULL; + + for (i = 0; i < csv_smcr_num; i++) { + if (csv_smcr[i].nid == nid) { + smcr = &csv_smcr[i]; + return smcr; + } + } + + return NULL; +} + #ifdef CONFIG_CMA struct csv_cma { @@ -286,7 +499,7 @@ static phys_addr_t __init csv_early_percent_memory_on_node(int nid) return (present_pages_in_node(nid) * csv_cma_pct / 100) << PAGE_SHIFT; } -static void __init csv_cma_reserve_mem(void) +static int __init csv_cma_reserve_mem(void) { int node, i; unsigned long size; @@ -295,12 +508,6 @@ static void __init csv_cma_reserve_mem(void) int cma_array_size; unsigned long max_spanned_size = 0; - csv_smr = memblock_alloc_node(CSV_SMR_INFO_SIZE, SMP_CACHE_BYTES, NUMA_NO_NODE); - if (!csv_smr) { - pr_err("CSV-CMA: Fail to allocate csv_smr\n"); - return; - } - for_each_node_state(node, N_ONLINE) { int ret; char name[CMA_MAX_NAME]; @@ -351,6 +558,20 @@ static void __init csv_cma_reserve_mem(void) continue; array->count = i; + + /** + * If CSV3 private memory is allocated from CSV-CMA and + * @csv_smcr is specified, the reserved SMCRs are taken into + * account. + */ + if (find_csv_smcr_mem_nid(node)) { + pr_info("CSV-CMA: Node %d has smcr reserved, set all mem as SMR\n", node); + start = ALIGN(smallest_pfn_in_node(node) << PAGE_SHIFT, + 1ull << CSV_MR_ALIGN_BITS); + end = ALIGN_DOWN(largest_pfn_in_node(node) << PAGE_SHIFT, + 1ull << CSV_MR_ALIGN_BITS); + } + spanned_size = end - start; if (spanned_size > max_spanned_size) max_spanned_size = spanned_size; @@ -367,6 +588,8 @@ static void __init csv_cma_reserve_mem(void) WARN_ON((max_spanned_size / NUM_SMR_ENTRIES) < 1); if (likely((max_spanned_size / NUM_SMR_ENTRIES) >= 1)) csv_set_smr_entry_shift(ilog2(max_spanned_size / NUM_SMR_ENTRIES - 1) + 1); + + return csv_smr_num ? 0 : -ENOMEM; } phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, @@ -484,9 +707,150 @@ EXPORT_SYMBOL_GPL(csv_release_to_contiguous); #endif /* CONFIG_CMA */ +static int __init csv_mark_secure_mem_region(void) +{ + int node; + int idx = 0; + unsigned long max_spanned_size = 0; + + for_each_node_state(node, N_ONLINE) { + unsigned long spanned_size; + unsigned long start = 0, end = 0; + + start = ALIGN(smallest_pfn_in_node(node) << PAGE_SHIFT, + 1ull << CSV_MR_ALIGN_BITS); + end = ALIGN_DOWN(largest_pfn_in_node(node) << PAGE_SHIFT, + 1ull << CSV_MR_ALIGN_BITS); + + if (start >= end) + continue; + + spanned_size = end - start; + if (spanned_size > max_spanned_size) + max_spanned_size = spanned_size; + + csv_smr[idx].start = start; + csv_smr[idx].size = end - start; + idx++; + + pr_info("CSV: Node %d - secure range 0x%016lx ~ 0x%016lx\n", + node, start, end); + } + + csv_smr_num = idx; + WARN_ON((max_spanned_size / NUM_SMR_ENTRIES) < 1); + if (likely((max_spanned_size / NUM_SMR_ENTRIES) >= 1)) + csv_set_smr_entry_shift(ilog2(max_spanned_size / NUM_SMR_ENTRIES - 1) + 1); + + return csv_smr_num ? 0 : -ENOMEM; +} + +static int __init csv_reserve_metadata(void) +{ + unsigned int i; + struct csv_metadata *metadata, *cur; + u64 hpa; + u64 smr_size; + struct list_head *pos, *q; + + if (WARN_ON_ONCE(!csv_smr)) + return -EFAULT; + + metadata = memblock_alloc_node(sizeof(*metadata) * csv_use_hugetlb, + SMP_CACHE_BYTES, NUMA_NO_NODE); + if (WARN_ON(!metadata)) + return -ENOMEM; + + smr_size = 1 << smr_entry_shift; + for (i = 0; i < csv_use_hugetlb; i++) { + hpa = memblock_phys_alloc_range(smr_size, smr_size, 0, + ALIGN_DOWN((largest_pfn() << PAGE_SHIFT) - PUD_SIZE, + PUD_SIZE)); + if (WARN_ON(!hpa)) + goto err; + + metadata[i].hpa = hpa; + metadata[i].used = false; + list_add_tail(&metadata[i].list, &csv_metadata_list); + } + + return 0; +err: + list_for_each_safe(pos, q, &csv_metadata_list) { + cur = list_entry(pos, struct csv_metadata, list); + memblock_phys_free(cur->hpa, 1 << smr_entry_shift); + list_del(&cur->list); + } + memblock_free(metadata, sizeof(*metadata) * csv_use_hugetlb); + + pr_warn("CSV: Fail to reserve metadata.\n"); + + return -ENOMEM; +} + +/** + * The helper functions csv_alloc_metadata() and csv_free_metadata() are used + * for the VMSA of CSV3 VMs only when CSV3 private memory is allocated from 1G + * hugetlb pages. In this case, the host should set the kernel cmdline parameter + * 'csv_use_hugetlb=', which causes the kernel to reserve metadata blocks + * at boot time. The function csv_alloc_metadata() allocates one block from this + * reserved pool, and csv_free_metadata() returns it. + * + * If CSV3 private memory is allocated from CSV-CMA, the VMSA is allocated + * from CSV-CMA, and these helper functions are not used. The host must not + * specify 'csv_use_hugetlb' on the kernel cmdline in this scenario. + */ +phys_addr_t csv_alloc_metadata(void) +{ + struct csv_metadata *metadata; + struct list_head *pos, *q; + u64 hpa = 0; + + spin_lock(&csv_metadata_lock); + + list_for_each_safe(pos, q, &csv_metadata_list) { + metadata = list_entry(pos, struct csv_metadata, list); + if (metadata) { + if (!metadata->used) { + metadata->used = true; + hpa = metadata->hpa; + break; + } + } + } + + spin_unlock(&csv_metadata_lock); + + return hpa; +} +EXPORT_SYMBOL_GPL(csv_alloc_metadata); + +void csv_free_metadata(u64 hpa) +{ + struct csv_metadata *metadata; + struct list_head *pos, *q; + + spin_lock(&csv_metadata_lock); + + list_for_each_safe(pos, q, &csv_metadata_list) { + metadata = list_entry(pos, struct csv_metadata, list); + if (metadata) { + if (metadata->hpa == hpa) { + WARN_ON(metadata->used != true); + metadata->used = false; + break; + } + } + } + + spin_unlock(&csv_metadata_lock); +} +EXPORT_SYMBOL_GPL(csv_free_metadata); + void __init early_csv_reserve_mem(void) { unsigned long total_pages; + int ret; /* Only reserve memory on the host that enabled CSV3 feature */ if (!csv3_check_cpu_support()) @@ -505,20 +869,95 @@ void __init early_csv_reserve_mem(void) csv_cma_pct = MAX_CSV_CMA_PCT; } - if (!csv_cma_pct) { - pr_warn("CSV-CMA: Don't reserve any memory\n"); + if (!csv_cma_pct && !(csv_smcr_num && csv_use_hugetlb)) { + pr_warn("CSV: Fail, either kernel param csv_mem_percentage or csv_smcr_size&csv_use_hugetlb is required\n"); + pr_warn("CSV: Don't reserve any memory\n"); return; } -#ifdef CONFIG_CMA - if (cma_alloc_areas(CSV_CMA_AREAS)) + csv_smr = memblock_alloc_node(CSV_SMR_INFO_SIZE, SMP_CACHE_BYTES, NUMA_NO_NODE); + if (!csv_smr) { + pr_err("CSV: Fail to allocate csv_smr\n"); return; + } + memset(csv_smr, 0, CSV_SMR_INFO_SIZE); + + /* SMCR memory for CSV3 NPT/context. */ + ret = csv_smcr_reserve_mem(); + if (ret) + pr_warn("CSV: Fail to reserve SMCR!\n"); - csv_cma_reserve_mem(); + /** + * The kernel cmdline parameter csv_mem_percentage= take precedence + * over csv_use_hugetlb=. + */ + if (csv_cma_pct) { +#ifdef CONFIG_CMA + /** + * If reach here, the CSV3 private memory should be allocated + * from CMA. + */ + csv_use_hugetlb = 0; + + ret = cma_alloc_areas(CSV_CMA_AREAS); + if (ret) + goto err_free_smcr; + + ret = csv_cma_reserve_mem(); + if (ret) + goto err_free_smcr; + + return; #else - pr_warn("CSV: Fail, csv_mem_percentage depends on CONFIG_CMA\n"); + if (csv_smcr && csv_use_hugetlb) { + pr_info("CSV: Fallback to csv_use_hugetlb logic\n"); + } else { + pr_warn("CSV: Fail, csv_mem_percentage depends on CONFIG_CMA\n"); + goto err_free_smcr; + } #endif + } + + /** + * If reach here, the CSV3 private memory should be allocated from + * 1G hugetlb. + */ + csv_cma_pct = 0; + + if (csv_smcr && csv_use_hugetlb) { + ret = csv_mark_secure_mem_region(); + if (ret) + goto err_free_smcr; + + ret = csv_reserve_metadata(); + if (ret) + goto err_free_smcr; + + return; + } + +err_free_smcr: + csv_smcr_free_mem(); + + memblock_free(csv_smr, CSV_SMR_INFO_SIZE); + csv_smr = NULL; + csv_smr_num = 0; + + csv_cma_pct = 0; + csv_use_hugetlb = 0; +} + +enum csv_smr_source get_csv_smr_source(void) +{ + if (csv_cma_pct) + return USE_CMA; + + if (csv_use_hugetlb) + return USE_HUGETLB; + + return NOT_SUPPORTED; } +EXPORT_SYMBOL_GPL(get_csv_smr_source); #ifdef CONFIG_SYSFS -- Gitee From 14dc8d0d33f48bc478a867d199ffffabb10603b4 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Sat, 8 Nov 2025 10:54:35 +0800 Subject: [PATCH 3/5] crypto: ccp: Extend CSV3 CMD to support multiple SMCR regions Upstream: no In certain specific scenarios, customers need to manage CSV3 NPT memory separately. So the NPT memory should be included in SMCR regions then delivered to secure processor during platform initialization. As NPT memory is also included in SMCR and the CSV3_CMD_SET_SMCR comamnd cannot hold multiple memory regions, extend CSV3_CMD_SET_SMR with SMCR flag to deliver SMCR memory regions to secure processor. Hygon-SIG: commit none hygon crypto: ccp: Extend CSV3 CMD to support multiple SMCR regions Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- drivers/crypto/ccp/hygon/csv-dev.c | 87 +++++++++++++++++++----------- include/linux/psp-hygon.h | 6 ++- 2 files changed, 62 insertions(+), 31 deletions(-) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 5d88ab87f6f5..0256b04d919a 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -690,15 +690,18 @@ int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error { int ret = 0; unsigned int i = 0; - struct csv3_data_set_smr *cmd_set_smr; - struct csv3_data_set_smcr *cmd_set_smcr; - struct csv3_data_memory_region *smr_regions; + struct csv3_data_set_smr *cmd_set_smr = NULL; + struct csv3_data_set_smcr *cmd_set_smcr = NULL; + struct csv3_data_memory_region *smr_regions = NULL; + unsigned int smr_num; + enum csv_smr_source source = get_csv_smr_source(); if (!hygon_psp_hooks.sev_dev_hooks_installed) { ret = -ENODEV; goto l_end; } + /* Initialize global SMRs */ if (!csv_smr || !csv_smr_num) { ret = -EINVAL; goto l_end; @@ -710,7 +713,8 @@ int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error goto l_end; } - smr_regions = kcalloc(csv_smr_num, sizeof(*smr_regions), GFP_KERNEL); + smr_num = max_t(unsigned int, csv_smr_num, csv_smcr_num); + smr_regions = kcalloc(smr_num, sizeof(*smr_regions), GFP_KERNEL); if (!smr_regions) { ret = -ENOMEM; goto e_free_cmd_set_smr; @@ -729,37 +733,60 @@ int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error goto e_free_smr_area; } - cmd_set_smcr = kzalloc(sizeof(*cmd_set_smcr), GFP_KERNEL); - if (!cmd_set_smcr) { - ret = -ENOMEM; - goto e_free_smr_area; - } - - cmd_set_smcr->base_address = csv_alloc_from_contiguous(1UL << CSV_MR_ALIGN_BITS, - &node_online_map, - get_order(1 << CSV_MR_ALIGN_BITS)); - if (!cmd_set_smcr->base_address) { - pr_err("Fail to alloc SMCR memory\n"); - ret = -ENOMEM; - goto e_free_cmd_set_smcr; - } - - cmd_set_smcr->size = 1UL << CSV_MR_ALIGN_BITS; - ret = hygon_psp_hooks.sev_do_cmd(CSV3_CMD_SET_SMCR, cmd_set_smcr, error); - if (ret) { - if (*error == SEV_RET_INVALID_COMMAND) - ret = 0; + /* Initialize global SMCRs */ + if (csv_smcr && csv_smcr_num && csv_version_greater_or_equal(2407)) { + for (i = 0; i < csv_smcr_num; i++) { + smr_regions[i].base_address = csv_smcr[i].start; + smr_regions[i].size = csv_smcr[i].size; + } + cmd_set_smr->smcr_flag = 0; /* 0 as SMCR memory flag */ + cmd_set_smr->regions_paddr = __psp_pa(smr_regions); + cmd_set_smr->nregions = csv_smcr_num; + ret = hygon_psp_hooks.sev_do_cmd(CSV3_CMD_SET_SMR, cmd_set_smr, error); + if (ret) + pr_err("Fail to set SMCR, ret %#x, error %#x\n", ret, *error); else - pr_err("set smcr ret %#x, error %#x\n", ret, *error); + pr_info("CSV: manage CSV3 VM by SMCR pool,%s\n", + source == USE_CMA ? "CMA" : + source == USE_HUGETLB ? "1G hugetlb,Metadata pool" : "?"); + goto e_free_smr_area; + } else if (source == USE_CMA) { + cmd_set_smcr = kzalloc(sizeof(*cmd_set_smcr), GFP_KERNEL); + if (!cmd_set_smcr) { + ret = -ENOMEM; + goto e_free_smr_area; + } - csv_release_to_contiguous(cmd_set_smcr->base_address, - 1UL << CSV_MR_ALIGN_BITS); - goto e_free_cmd_set_smcr; - } + cmd_set_smcr->base_address = csv_alloc_from_contiguous(1UL << CSV_MR_ALIGN_BITS, + &node_online_map, + get_order(1 << CSV_MR_ALIGN_BITS)); + if (!cmd_set_smcr->base_address) { + pr_err("Fail to alloc SMCR memory\n"); + ret = -ENOMEM; + goto e_free_cmd_set_smcr; + } + cmd_set_smcr->size = 1UL << CSV_MR_ALIGN_BITS; + ret = hygon_psp_hooks.sev_do_cmd(CSV3_CMD_SET_SMCR, cmd_set_smcr, error); + if (ret) { + if (*error == SEV_RET_INVALID_COMMAND) + ret = 0; + else + pr_err("Fail to set SMCR, ret %#x, error %#x\n", ret, *error); + + csv_release_to_contiguous(cmd_set_smcr->base_address, + 1UL << CSV_MR_ALIGN_BITS); + goto e_free_cmd_set_smcr; + } else { + pr_info("CSV: manage CSV3 VM by CMA\n"); #ifdef CONFIG_SYSFS - csv3_meta = cmd_set_smcr->size; + csv3_meta = cmd_set_smcr->size; #endif + } + } else { + pr_err("Unable to set hardware SMCR\n"); + goto e_free_smr_area; + } e_free_cmd_set_smcr: kfree((void *)cmd_set_smcr); diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 74f5d750292d..e5b5ef693d2f 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -280,11 +280,15 @@ struct csv3_data_set_guest_private_memory { * struct csv3_data_set_smr - CSV3_CMD_SET_SMR command parameters * * @smr_entry_size: size of SMR entry + * @smcr_flag: must be 0 for SMCR memory region * @nregions: number of memory regions * @regions_paddr: address of memory containing multiple memory regions */ struct csv3_data_set_smr { - u32 smr_entry_size; /* In */ + union { + u32 smr_entry_size; /* In */ + u32 smcr_flag; /* In */ + }; u32 nregions; /* In */ u64 regions_paddr; /* In */ } __packed; -- Gitee From 1b530d413880033e32b36aa8ef1f675f8c646475 Mon Sep 17 00:00:00 2001 From: yangge Date: Sat, 8 Nov 2025 11:09:58 +0800 Subject: [PATCH 4/5] mm/huegtlb: Introduce dequeue_1G_hugetlb_folio_nodemask() helper Upstream: no Introduce dequeue_1G_hugetlb_folio_nodemask() to allow users to directly allocate memory from 1G hugetlb. Hygon-SIG: commit none hygon mm/huegtlb: Introduce dequeue_1G_hugetlb_folio_nodemask() helper Signed-off-by: yangge Signed-off-by: hanliyang --- include/linux/hugetlb.h | 7 +++++++ mm/hugetlb.c | 26 ++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d8bf19c1adb1..e10783e75d75 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -752,6 +752,7 @@ struct huge_bootmem_page { int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn); void wait_for_freed_hugetlb_folios(void); +struct folio *dequeue_1G_hugetlb_folio_nodemask(gfp_t gfp_mask, int node, nodemask_t *nodemask); struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, @@ -1076,6 +1077,12 @@ static inline void wait_for_freed_hugetlb_folios(void) { } +static inline struct folio *dequeue_1G_hugetlb_folio_nodemask(gfp_t gfp_mask, + int node, nodemask_t *nodemask) +{ + return NULL; +} + static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 94e1df675927..48ddb5cd6582 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1440,6 +1440,32 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, return NULL; } +struct folio *dequeue_1G_hugetlb_folio_nodemask(gfp_t gfp_mask, int node, nodemask_t *nodemask) +{ + struct folio *folio = NULL; + struct hstate *h; + + h = size_to_hstate(1 << PUD_SHIFT); + if (!h) + return NULL; + + if (node == NUMA_NO_NODE) + node = numa_mem_id(); + + spin_lock_irq(&hugetlb_lock); + + if (!available_huge_pages(h)) + goto err; + + folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, node, nodemask); + +err: + spin_unlock_irq(&hugetlb_lock); + + return folio; +} +EXPORT_SYMBOL(dequeue_1G_hugetlb_folio_nodemask); + /* * common helper functions for hstate_next_node_to_{alloc|free}. * We may have allocated or freed a huge page based on a different -- Gitee From d1e7ef23bb7d00e9de623bf52d824e4f33ace1e6 Mon Sep 17 00:00:00 2001 From: yangge Date: Sat, 8 Nov 2025 11:18:56 +0800 Subject: [PATCH 5/5] KVM: SVM: CSV: Support 1G hugetlb for CSV3 Upstream: no Add a new feature to allow the private memory of the CSV3 virtual machine to be allocated from 1G hugetlb folios. Hygon-SIG: commit none hygon KVM: SVM: CSV: Support 1G hugetlb for CSV3 Signed-off-by: yangge Signed-off-by: hanliyang --- arch/x86/kvm/svm/csv.c | 501 ++++++++++++++++++++++++++++++++++++----- 1 file changed, 450 insertions(+), 51 deletions(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index cfbb6e3608be..1fc7d78d2de0 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -898,6 +899,7 @@ struct kvm_csv_info { struct mutex sp_lock; /* shared page lock */ struct list_head smr_list; /* List of guest secure memory regions */ + struct list_head hugetlb_list; /* List of 1G hugetlb (if used) */ unsigned long nodemask; /* Nodemask where CSV3 guest's memory resides */ /* The following 5 fields record the extension status for current VM */ @@ -918,10 +920,21 @@ struct kvm_svm_csv { struct kvm_csv_info csv_info; }; +enum csv_mem_type { + CSV_METADATA, + CSV_SEC_MEM, +}; + struct secure_memory_region { struct list_head list; u64 npages; u64 hpa; + enum csv_mem_type type; +}; + +struct csv3_hugetlb { + struct list_head list; + struct folio *folio; }; #ifdef CONFIG_SYSFS @@ -1325,6 +1338,7 @@ static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) csv->nodemask = (unsigned long)params.nodemask; INIT_LIST_HEAD(&csv->smr_list); + INIT_LIST_HEAD(&csv->hugetlb_list); mutex_init(&csv->sp_lock); return 0; @@ -1337,43 +1351,197 @@ static bool csv3_is_mmio_pfn(kvm_pfn_t pfn) E820_TYPE_RAM); } -static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *argp) +/** + * csv3_free_smr_list - Free secure_memory_region structure from CSV3 VM's + * smr_list. + * There are 3 types memory described by secure_memory_region structure: + * - Guest private memory, NPT, metadata (e.g., VMSA) allocated from CMA. + * The CMA should be released when remove it from CSV3 VM's smr_list. + * - Guest private memory allocated from 1G hugetlb. + * The caller should release the 1G hugetlb pages. + * - Guest metadata (e.g., VMSA) allocated from system-reserved memory + * pool, used only when guest private memory is allocated from 1G + * hugetlb. This memory should be free back to the pool. + */ +static void csv3_free_smr_list(struct kvm *kvm) { - struct kvm_memslots *slots = kvm_memslots(kvm); - struct kvm_memory_slot *memslot; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; struct secure_memory_region *smr; + struct list_head *pos, *q; + enum csv_smr_source source = get_csv_smr_source(); + + list_for_each_safe(pos, q, &csv->smr_list) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + if (source == USE_CMA) + csv_release_to_contiguous(smr->hpa, + smr->npages << PAGE_SHIFT); + + if (smr->type == CSV_METADATA) + csv_free_metadata(smr->hpa); + + list_del(&smr->list); + kfree(smr); + } + } +} + +/** + * This helper function is used when CSV3 private memory is allocated from 1G + * hugetlb. + */ +static void csv3_free_1G_hugetlb_folios(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_hugetlb *hugetlb; + struct list_head *pos, *q; + + list_for_each_safe(pos, q, &csv->hugetlb_list) { + hugetlb = list_entry(pos, struct csv3_hugetlb, list); + if (hugetlb) { + folio_put(hugetlb->folio); + list_del(&hugetlb->list); + kfree(hugetlb); + } + } +} + +/** + * This helper function is used when CSV3 private memory is allocated from 1G + * hugetlb. + */ +static int csv3_alloc_1G_hugetlb_folios(struct kvm *kvm, + unsigned long size, + nodemask_t *nodemask) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct folio *folio; + struct csv3_hugetlb *hugetlb; + int huge_count; + int i; + int ret = 0; + + huge_count = ALIGN(size, PUD_SIZE) / PUD_SIZE; + + for (i = 0; i < huge_count; i++) { + hugetlb = kzalloc(sizeof(*hugetlb), GFP_KERNEL_ACCOUNT); + if (!hugetlb) { + ret = -ENOMEM; + goto err; + } + + folio = dequeue_1G_hugetlb_folio_nodemask(GFP_KERNEL, + NUMA_NO_NODE, nodemask); + if (!folio) { + kfree(hugetlb); + ret = -ENOMEM; + goto err; + } + + hugetlb->folio = folio; + list_add_tail(&hugetlb->list, &csv->hugetlb_list); + } + + return 0; +err: + csv3_free_1G_hugetlb_folios(kvm); + + return ret; +} + +/** + * This helper function is used when CSV3 private memory is allocated from 1G + * hugetlb. + */ +static int csv3_init_1G_hugetlb_smrs(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_hugetlb *hugetlb; + struct secure_memory_region *smr; + struct list_head *pos, *q; + u64 hpa; + u32 smr_entry_shift; + int smr_count; + int i; + int ret = 0; + + smr_entry_shift = csv_get_smr_entry_shift(); + smr_count = PUD_SIZE / (1UL << smr_entry_shift); + + list_for_each_safe(pos, q, &csv->hugetlb_list) { + hugetlb = list_entry(pos, struct csv3_hugetlb, list); + if (!hugetlb) { + WARN_ON(1); + ret = -ENOMEM; + goto err; + } + + hpa = folio_pfn(hugetlb->folio) << PAGE_SHIFT; + for (i = 0; i < smr_count; i++) { + smr = kzalloc(sizeof(*smr), GFP_KERNEL_ACCOUNT); + if (!smr) { + ret = -ENOMEM; + goto err; + } + + smr->hpa = hpa + i * (1UL << smr_entry_shift); + list_add_tail(&smr->list, &csv->smr_list); + } + } + + return 0; + +err: + csv3_free_smr_list(kvm); + + return ret; +} + +/** + * This helper function is used when CSV3 private memory is allocated from 1G + * hugetlb. + */ +static u64 csv3_get_1G_hugetlb_smr(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct secure_memory_region *smr; + struct list_head *pos, *q; + u64 hpa = 0; + + list_for_each_safe(pos, q, &csv->smr_list) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + hpa = smr->hpa; + list_del(&smr->list); + kfree(smr); + break; + } + } + + return hpa; +} + +/** + * This helper function is used when CSV3 private memory is allocated from 1G + * hugetlb. + */ +static int csv3_set_hugetlb_smr(struct kvm *kvm, unsigned long vm_size, + nodemask_t *nodemask, struct kvm_sev_cmd *argp) +{ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; struct csv3_data_set_guest_private_memory *set_guest_private_memory; + struct secure_memory_region *smr; struct csv3_data_memory_region *regions; - nodemask_t nodemask; - nodemask_t *nodemask_ptr; LIST_HEAD(tmp_list); struct list_head *pos, *q; u32 i = 0, count = 0, remainder; int ret = 0; - u64 size = 0, nr_smr = 0, nr_pages = 0; + u64 nr_smr = 0; u32 smr_entry_shift; - int bkt; - - unsigned int flags = FOLL_HWPOISON; - int npages; - struct page *page; - - if (!csv3_guest(kvm)) - return -ENOTTY; - - /* The smr_list should be initialized only once */ - if (!list_empty(&csv->smr_list)) - return -EFAULT; - - nodes_clear(nodemask); - for_each_set_bit(i, &csv->nodemask, BITS_PER_LONG) - if (i < MAX_NUMNODES) - node_set(i, nodemask); - nodemask_ptr = csv->nodemask ? &nodemask : &node_online_map; + bool metadata_allocated = false; set_guest_private_memory = kzalloc(sizeof(*set_guest_private_memory), GFP_KERNEL_ACCOUNT); @@ -1386,17 +1554,118 @@ static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *ar return -ENOMEM; } - /* Get guest secure memory size */ - kvm_for_each_memslot(memslot, bkt, slots) { - npages = get_user_pages_unlocked(memslot->userspace_addr, 1, - &page, flags); - if (npages != 1) - continue; + ret = csv3_alloc_1G_hugetlb_folios(kvm, vm_size, nodemask); + if (ret) + goto done; - nr_pages += memslot->npages; + ret = csv3_init_1G_hugetlb_smrs(kvm); + if (ret) + goto e_free_hugetlb; - put_page(page); + smr_entry_shift = csv_get_smr_entry_shift(); + nr_smr = vm_size >> smr_entry_shift; + nr_smr += 1; + + remainder = nr_smr; + for (i = 0; i < nr_smr; i++) { + smr = kzalloc(sizeof(*smr), GFP_KERNEL_ACCOUNT); + if (!smr) { + ret = -ENOMEM; + goto e_free_smr; + } + + if (metadata_allocated == false) { + smr->hpa = csv_alloc_metadata(); + smr->type = CSV_METADATA; + metadata_allocated = true; + } else { + smr->hpa = csv3_get_1G_hugetlb_smr(kvm); + smr->type = CSV_SEC_MEM; + } + if (!smr->hpa) { + kfree(smr); + ret = -ENOMEM; + goto e_free_smr; + } + + smr->npages = ((1UL << smr_entry_shift) >> PAGE_SHIFT); + list_add_tail(&smr->list, &tmp_list); + + regions[count].size = (1UL << smr_entry_shift); + regions[count].base_address = smr->hpa; + count++; + + if (count >= (PAGE_SIZE / sizeof(regions[0])) || (remainder == count)) { + set_guest_private_memory->nregions = count; + set_guest_private_memory->handle = sev->handle; + set_guest_private_memory->regions_paddr = __sme_pa(regions); + + /* set secury memory region for launch enrypt data */ + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, + CSV3_CMD_SET_GUEST_PRIVATE_MEMORY, + set_guest_private_memory, &argp->error); + if (ret) + goto e_free_smr; + + memset(regions, 0, PAGE_SIZE); + remainder -= count; + count = 0; + } + } + + list_splice(&tmp_list, &csv->smr_list); + +#ifdef CONFIG_SYSFS + /* The NPT is allocated from global SMCR */ + csv->npt_size = 0; + /** + * Don't account for guest private memory since: + * - VMSAs are allocated from system-reserverd memory pool. + * - CSV3 private memory is allocated from 1G hugetlb. + */ + csv->pri_mem = 0; +#endif + + goto done; + +e_free_smr: + /* Remove temporary smr_list */ + if (!list_empty(&tmp_list)) { + list_for_each_safe(pos, q, &tmp_list) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + if (smr->type == CSV_METADATA) + csv_free_metadata(smr->hpa); + + list_del(&smr->list); + kfree(smr); + } + } } + /* Remove smr_list created by csv3_init_1G_hugetlb_smrs() */ + csv3_free_smr_list(kvm); + +e_free_hugetlb: + csv3_free_1G_hugetlb_folios(kvm); + +done: + kfree(set_guest_private_memory); + kfree(regions); + + return ret; +} + +/* + * Calculate the npt size according to the input VM size, return the SMR number + * for the NPT. + * + * This helper function is used when CSV3 private memory is allocated from CMA. + */ +static u32 csv3_get_vm_npt_smr_number(unsigned long vm_size) +{ + u64 nr_pages = vm_size >> PAGE_SHIFT; + u32 smr_entry_shift, nr_smr; + u64 size; /* * NPT secure memory size @@ -1415,9 +1684,56 @@ static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *ar * */ smr_entry_shift = csv_get_smr_entry_shift(); - size = ALIGN((nr_pages << PAGE_SHIFT), 1UL << smr_entry_shift) + - ALIGN(nr_pages * 9, 1UL << smr_entry_shift); + size = ALIGN(nr_pages * 9, 1UL << smr_entry_shift); nr_smr = size >> smr_entry_shift; + + return nr_smr; +} + +/** + * This helper function is used when CSV3 private memory is allocated from CMA. + */ +static int csv3_set_cma_smr(struct kvm *kvm, unsigned long vm_size, + nodemask_t *nodemask, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_set_guest_private_memory *set_guest_private_memory; + struct secure_memory_region *smr; + struct csv3_data_memory_region *regions; + + LIST_HEAD(tmp_list); + struct list_head *pos, *q; + u32 i = 0, count = 0, remainder; + int ret = 0; + u64 nr_smr = 0; + u32 smr_entry_shift; + + set_guest_private_memory = kzalloc(sizeof(*set_guest_private_memory), + GFP_KERNEL_ACCOUNT); + if (!set_guest_private_memory) + return -ENOMEM; + + regions = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!regions) { + kfree(set_guest_private_memory); + return -ENOMEM; + } + + smr_entry_shift = csv_get_smr_entry_shift(); + + /** + * If @csv_smcr_num is zero or the firmware does not support explicit + * NPT reservation in SMCR, both VMSA and NPT are allocated from CMA. + * Otherwise, the NPT is already reserved in SMCR, and only a single + * SMR is needed for the VMSA. + */ + if (!csv_smcr_num || hygon_csv_build < 2407) + nr_smr = (vm_size >> smr_entry_shift) + + 1 + csv3_get_vm_npt_smr_number(vm_size); + else + nr_smr = (vm_size >> smr_entry_shift) + 1; + remainder = nr_smr; for (i = 0; i < nr_smr; i++) { smr = kzalloc(sizeof(*smr), GFP_KERNEL_ACCOUNT); @@ -1427,7 +1743,7 @@ static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *ar } smr->hpa = csv_alloc_from_contiguous((1UL << smr_entry_shift), - nodemask_ptr, + nodemask, get_order(1 << smr_entry_shift)); if (!smr->hpa) { kfree(smr); @@ -1463,8 +1779,16 @@ static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *ar list_splice(&tmp_list, &csv->smr_list); #ifdef CONFIG_SYSFS - csv->npt_size = ALIGN(nr_pages * 9, 1UL << smr_entry_shift); - csv->pri_mem = ALIGN((nr_pages << PAGE_SHIFT), 1UL << smr_entry_shift); + /** + * The NPT can be allocated from either CMA or global SMCR. + * Only account for it when allocated from CMA. + */ + if (!csv_smcr_num || hygon_csv_build < 2407) + csv->npt_size = csv3_get_vm_npt_smr_number(vm_size) + << smr_entry_shift; + else + csv->npt_size = 0; + csv->pri_mem = (nr_smr << smr_entry_shift) - csv->npt_size; atomic_long_add(csv->npt_size, &csv3_npt_size); atomic_long_add(csv->pri_mem, &csv3_pri_mem); #endif @@ -1472,6 +1796,7 @@ static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *ar goto done; e_free_smr: + /* Remove temporary smr_list */ if (!list_empty(&tmp_list)) { list_for_each_safe(pos, q, &tmp_list) { smr = list_entry(pos, struct secure_memory_region, list); @@ -1486,6 +1811,85 @@ static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *ar done: kfree(set_guest_private_memory); kfree(regions); + + return ret; +} + +static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot; + nodemask_t nodemask; + nodemask_t *nodemask_ptr; + + u32 i = 0; + int ret = 0; + u32 smr_entry_shift; + int bkt; + + unsigned int flags = FOLL_HWPOISON; + u64 npages, nr_pages = 0; + struct page *page; + + unsigned long vm_size; + enum csv_smr_source source; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + /* The smr_list should be initialized only once */ + if (!list_empty(&csv->smr_list)) + return -EFAULT; + + nodes_clear(nodemask); + for_each_set_bit(i, &csv->nodemask, BITS_PER_LONG) + if (i < MAX_NUMNODES) + node_set(i, nodemask); + + nodemask_ptr = csv->nodemask ? &nodemask : &node_online_map; + + /* Get guest secure memory size */ + kvm_for_each_memslot(memslot, bkt, slots) { + if (memslot->flags & KVM_MEM_READONLY) + continue; + + npages = get_user_pages_unlocked(memslot->userspace_addr, 1, + &page, flags); + if (npages != 1) + continue; + + nr_pages += memslot->npages; + + put_page(page); + } + + smr_entry_shift = csv_get_smr_entry_shift(); + vm_size = ALIGN((nr_pages << PAGE_SHIFT), 1UL << smr_entry_shift); + + /** + * Allocation policy for NPT and VMSA based on CSV3 private memory + * source: + * + * - When CSV3 private memory is allocated from 1GB hugepages: + * * NPT is allocated from the global SMCR. + * * VMSA is allocated from system-reserved memory pool. + * + * - When CSV3 private memory is allocated from CMA: + * * VMSA is allocated from CMA. + * * NPT is allocated from the global SMCR if it was reserved at + * host boot time; otherwise, it is allocated from CMA. + */ + source = get_csv_smr_source(); + if (source == USE_HUGETLB) { + ret = csv3_set_hugetlb_smr(kvm, vm_size, nodemask_ptr, argp); + } else if (source == USE_CMA) { + ret = csv3_set_cma_smr(kvm, vm_size, nodemask_ptr, argp); + } else { + ret = -EFAULT; + WARN_ON(1); + } + return ret; } @@ -3081,11 +3485,10 @@ static void csv_vm_destroy(struct kvm *kvm) struct kvm_vcpu *vcpu; struct list_head *smr_head = &csv->smr_list; - struct list_head *pos, *q; - struct secure_memory_region *smr; struct shared_page *sp; struct rb_node *node; unsigned long i = 0; + enum csv_smr_source source = get_csv_smr_source(); if (csv3_guest(kvm)) { mutex_lock(&csv->sp_lock); @@ -3130,21 +3533,17 @@ static void csv_vm_destroy(struct kvm *kvm) return; /* free secure memory region */ - if (!list_empty(smr_head)) { - list_for_each_safe(pos, q, smr_head) { - smr = list_entry(pos, struct secure_memory_region, list); - if (smr) { - csv_release_to_contiguous(smr->hpa, smr->npages << PAGE_SHIFT); - list_del(&smr->list); - kfree(smr); - } - } + if (!list_empty(smr_head)) + csv3_free_smr_list(kvm); + + /* Free hugetlb if CSV3 private memory is allocated from 1G hugetlb */ + if (source == USE_HUGETLB) + csv3_free_1G_hugetlb_folios(kvm); #ifdef CONFIG_SYSFS - atomic_long_sub(csv->npt_size, &csv3_npt_size); - atomic_long_sub(csv->pri_mem, &csv3_pri_mem); + atomic_long_sub(csv->npt_size, &csv3_npt_size); + atomic_long_sub(csv->pri_mem, &csv3_pri_mem); #endif - } } static int csv3_handle_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, -- Gitee