diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h index 8d9aa7b2c4644b75c588b245000bf6bd48232349..027b595c72b18a1850006a1f066a2fe155c04ff7 100644 --- a/arch/x86/include/asm/csv.h +++ b/arch/x86/include/asm/csv.h @@ -14,17 +14,26 @@ #include +enum csv_smr_source { + USE_CMA, + USE_HUGETLB, + NOT_SUPPORTED, +}; + #ifdef CONFIG_HYGON_CSV struct csv_mem { uint64_t start; uint64_t size; + int nid; }; #define CSV_MR_ALIGN_BITS (28) extern struct csv_mem *csv_smr; extern unsigned int csv_smr_num; +extern struct csv_mem *csv_smcr; +extern unsigned int csv_smcr_num; #ifdef CONFIG_SYSFS extern atomic_long_t csv3_npt_size; @@ -39,12 +48,19 @@ phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, unsigned int align); void csv_release_to_contiguous(phys_addr_t pa, size_t size); +phys_addr_t csv_alloc_metadata(void); +void csv_free_metadata(u64 hpa); + +enum csv_smr_source get_csv_smr_source(void); + uint32_t csv_get_smr_entry_shift(void); #else /* !CONFIG_HYGON_CSV */ #define csv_smr NULL #define csv_smr_num 0U +#define csv_smcr NULL +#define csv_smcr_num 0U static inline void __init early_csv_reserve_mem(void) { } @@ -53,6 +69,11 @@ csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, unsigned int align) { return 0; } static inline void csv_release_to_contiguous(phys_addr_t pa, size_t size) { } +static inline phys_addr_t csv_alloc_metadata(void) { return 0; } +static inline void csv_free_metadata(u64 hpa) { } + +static inline enum csv_smr_source get_csv_smr_source(void) { return NOT_SUPPORTED; } + static inline uint32_t csv_get_smr_entry_shift(void) { return 0; } #endif /* CONFIG_HYGON_CSV */ diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index cfbb6e3608be8876e1b3c7c6641c21f6d8f0c3c7..1fc7d78d2de03db426c84259966bc8a12f0e1fb6 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -898,6 +899,7 @@ struct kvm_csv_info { struct mutex sp_lock; /* shared page lock */ struct list_head smr_list; /* List of guest secure memory regions */ + struct list_head hugetlb_list; /* List of 1G hugetlb (if used) */ unsigned long nodemask; /* Nodemask where CSV3 guest's memory resides */ /* The following 5 fields record the extension status for current VM */ @@ -918,10 +920,21 @@ struct kvm_svm_csv { struct kvm_csv_info csv_info; }; +enum csv_mem_type { + CSV_METADATA, + CSV_SEC_MEM, +}; + struct secure_memory_region { struct list_head list; u64 npages; u64 hpa; + enum csv_mem_type type; +}; + +struct csv3_hugetlb { + struct list_head list; + struct folio *folio; }; #ifdef CONFIG_SYSFS @@ -1325,6 +1338,7 @@ static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) csv->nodemask = (unsigned long)params.nodemask; INIT_LIST_HEAD(&csv->smr_list); + INIT_LIST_HEAD(&csv->hugetlb_list); mutex_init(&csv->sp_lock); return 0; @@ -1337,43 +1351,197 @@ static bool csv3_is_mmio_pfn(kvm_pfn_t pfn) E820_TYPE_RAM); } -static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *argp) +/** + * csv3_free_smr_list - Free secure_memory_region structure from CSV3 VM's + * smr_list. + * There are 3 types memory described by secure_memory_region structure: + * - Guest private memory, NPT, metadata (e.g., VMSA) allocated from CMA. + * The CMA should be released when remove it from CSV3 VM's smr_list. + * - Guest private memory allocated from 1G hugetlb. + * The caller should release the 1G hugetlb pages. + * - Guest metadata (e.g., VMSA) allocated from system-reserved memory + * pool, used only when guest private memory is allocated from 1G + * hugetlb. This memory should be free back to the pool. + */ +static void csv3_free_smr_list(struct kvm *kvm) { - struct kvm_memslots *slots = kvm_memslots(kvm); - struct kvm_memory_slot *memslot; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; struct secure_memory_region *smr; + struct list_head *pos, *q; + enum csv_smr_source source = get_csv_smr_source(); + + list_for_each_safe(pos, q, &csv->smr_list) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + if (source == USE_CMA) + csv_release_to_contiguous(smr->hpa, + smr->npages << PAGE_SHIFT); + + if (smr->type == CSV_METADATA) + csv_free_metadata(smr->hpa); + + list_del(&smr->list); + kfree(smr); + } + } +} + +/** + * This helper function is used when CSV3 private memory is allocated from 1G + * hugetlb. + */ +static void csv3_free_1G_hugetlb_folios(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_hugetlb *hugetlb; + struct list_head *pos, *q; + + list_for_each_safe(pos, q, &csv->hugetlb_list) { + hugetlb = list_entry(pos, struct csv3_hugetlb, list); + if (hugetlb) { + folio_put(hugetlb->folio); + list_del(&hugetlb->list); + kfree(hugetlb); + } + } +} + +/** + * This helper function is used when CSV3 private memory is allocated from 1G + * hugetlb. + */ +static int csv3_alloc_1G_hugetlb_folios(struct kvm *kvm, + unsigned long size, + nodemask_t *nodemask) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct folio *folio; + struct csv3_hugetlb *hugetlb; + int huge_count; + int i; + int ret = 0; + + huge_count = ALIGN(size, PUD_SIZE) / PUD_SIZE; + + for (i = 0; i < huge_count; i++) { + hugetlb = kzalloc(sizeof(*hugetlb), GFP_KERNEL_ACCOUNT); + if (!hugetlb) { + ret = -ENOMEM; + goto err; + } + + folio = dequeue_1G_hugetlb_folio_nodemask(GFP_KERNEL, + NUMA_NO_NODE, nodemask); + if (!folio) { + kfree(hugetlb); + ret = -ENOMEM; + goto err; + } + + hugetlb->folio = folio; + list_add_tail(&hugetlb->list, &csv->hugetlb_list); + } + + return 0; +err: + csv3_free_1G_hugetlb_folios(kvm); + + return ret; +} + +/** + * This helper function is used when CSV3 private memory is allocated from 1G + * hugetlb. + */ +static int csv3_init_1G_hugetlb_smrs(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_hugetlb *hugetlb; + struct secure_memory_region *smr; + struct list_head *pos, *q; + u64 hpa; + u32 smr_entry_shift; + int smr_count; + int i; + int ret = 0; + + smr_entry_shift = csv_get_smr_entry_shift(); + smr_count = PUD_SIZE / (1UL << smr_entry_shift); + + list_for_each_safe(pos, q, &csv->hugetlb_list) { + hugetlb = list_entry(pos, struct csv3_hugetlb, list); + if (!hugetlb) { + WARN_ON(1); + ret = -ENOMEM; + goto err; + } + + hpa = folio_pfn(hugetlb->folio) << PAGE_SHIFT; + for (i = 0; i < smr_count; i++) { + smr = kzalloc(sizeof(*smr), GFP_KERNEL_ACCOUNT); + if (!smr) { + ret = -ENOMEM; + goto err; + } + + smr->hpa = hpa + i * (1UL << smr_entry_shift); + list_add_tail(&smr->list, &csv->smr_list); + } + } + + return 0; + +err: + csv3_free_smr_list(kvm); + + return ret; +} + +/** + * This helper function is used when CSV3 private memory is allocated from 1G + * hugetlb. + */ +static u64 csv3_get_1G_hugetlb_smr(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct secure_memory_region *smr; + struct list_head *pos, *q; + u64 hpa = 0; + + list_for_each_safe(pos, q, &csv->smr_list) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + hpa = smr->hpa; + list_del(&smr->list); + kfree(smr); + break; + } + } + + return hpa; +} + +/** + * This helper function is used when CSV3 private memory is allocated from 1G + * hugetlb. + */ +static int csv3_set_hugetlb_smr(struct kvm *kvm, unsigned long vm_size, + nodemask_t *nodemask, struct kvm_sev_cmd *argp) +{ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; struct csv3_data_set_guest_private_memory *set_guest_private_memory; + struct secure_memory_region *smr; struct csv3_data_memory_region *regions; - nodemask_t nodemask; - nodemask_t *nodemask_ptr; LIST_HEAD(tmp_list); struct list_head *pos, *q; u32 i = 0, count = 0, remainder; int ret = 0; - u64 size = 0, nr_smr = 0, nr_pages = 0; + u64 nr_smr = 0; u32 smr_entry_shift; - int bkt; - - unsigned int flags = FOLL_HWPOISON; - int npages; - struct page *page; - - if (!csv3_guest(kvm)) - return -ENOTTY; - - /* The smr_list should be initialized only once */ - if (!list_empty(&csv->smr_list)) - return -EFAULT; - - nodes_clear(nodemask); - for_each_set_bit(i, &csv->nodemask, BITS_PER_LONG) - if (i < MAX_NUMNODES) - node_set(i, nodemask); - nodemask_ptr = csv->nodemask ? &nodemask : &node_online_map; + bool metadata_allocated = false; set_guest_private_memory = kzalloc(sizeof(*set_guest_private_memory), GFP_KERNEL_ACCOUNT); @@ -1386,17 +1554,118 @@ static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *ar return -ENOMEM; } - /* Get guest secure memory size */ - kvm_for_each_memslot(memslot, bkt, slots) { - npages = get_user_pages_unlocked(memslot->userspace_addr, 1, - &page, flags); - if (npages != 1) - continue; + ret = csv3_alloc_1G_hugetlb_folios(kvm, vm_size, nodemask); + if (ret) + goto done; - nr_pages += memslot->npages; + ret = csv3_init_1G_hugetlb_smrs(kvm); + if (ret) + goto e_free_hugetlb; - put_page(page); + smr_entry_shift = csv_get_smr_entry_shift(); + nr_smr = vm_size >> smr_entry_shift; + nr_smr += 1; + + remainder = nr_smr; + for (i = 0; i < nr_smr; i++) { + smr = kzalloc(sizeof(*smr), GFP_KERNEL_ACCOUNT); + if (!smr) { + ret = -ENOMEM; + goto e_free_smr; + } + + if (metadata_allocated == false) { + smr->hpa = csv_alloc_metadata(); + smr->type = CSV_METADATA; + metadata_allocated = true; + } else { + smr->hpa = csv3_get_1G_hugetlb_smr(kvm); + smr->type = CSV_SEC_MEM; + } + if (!smr->hpa) { + kfree(smr); + ret = -ENOMEM; + goto e_free_smr; + } + + smr->npages = ((1UL << smr_entry_shift) >> PAGE_SHIFT); + list_add_tail(&smr->list, &tmp_list); + + regions[count].size = (1UL << smr_entry_shift); + regions[count].base_address = smr->hpa; + count++; + + if (count >= (PAGE_SIZE / sizeof(regions[0])) || (remainder == count)) { + set_guest_private_memory->nregions = count; + set_guest_private_memory->handle = sev->handle; + set_guest_private_memory->regions_paddr = __sme_pa(regions); + + /* set secury memory region for launch enrypt data */ + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, + CSV3_CMD_SET_GUEST_PRIVATE_MEMORY, + set_guest_private_memory, &argp->error); + if (ret) + goto e_free_smr; + + memset(regions, 0, PAGE_SIZE); + remainder -= count; + count = 0; + } + } + + list_splice(&tmp_list, &csv->smr_list); + +#ifdef CONFIG_SYSFS + /* The NPT is allocated from global SMCR */ + csv->npt_size = 0; + /** + * Don't account for guest private memory since: + * - VMSAs are allocated from system-reserverd memory pool. + * - CSV3 private memory is allocated from 1G hugetlb. + */ + csv->pri_mem = 0; +#endif + + goto done; + +e_free_smr: + /* Remove temporary smr_list */ + if (!list_empty(&tmp_list)) { + list_for_each_safe(pos, q, &tmp_list) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + if (smr->type == CSV_METADATA) + csv_free_metadata(smr->hpa); + + list_del(&smr->list); + kfree(smr); + } + } } + /* Remove smr_list created by csv3_init_1G_hugetlb_smrs() */ + csv3_free_smr_list(kvm); + +e_free_hugetlb: + csv3_free_1G_hugetlb_folios(kvm); + +done: + kfree(set_guest_private_memory); + kfree(regions); + + return ret; +} + +/* + * Calculate the npt size according to the input VM size, return the SMR number + * for the NPT. + * + * This helper function is used when CSV3 private memory is allocated from CMA. + */ +static u32 csv3_get_vm_npt_smr_number(unsigned long vm_size) +{ + u64 nr_pages = vm_size >> PAGE_SHIFT; + u32 smr_entry_shift, nr_smr; + u64 size; /* * NPT secure memory size @@ -1415,9 +1684,56 @@ static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *ar * */ smr_entry_shift = csv_get_smr_entry_shift(); - size = ALIGN((nr_pages << PAGE_SHIFT), 1UL << smr_entry_shift) + - ALIGN(nr_pages * 9, 1UL << smr_entry_shift); + size = ALIGN(nr_pages * 9, 1UL << smr_entry_shift); nr_smr = size >> smr_entry_shift; + + return nr_smr; +} + +/** + * This helper function is used when CSV3 private memory is allocated from CMA. + */ +static int csv3_set_cma_smr(struct kvm *kvm, unsigned long vm_size, + nodemask_t *nodemask, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_set_guest_private_memory *set_guest_private_memory; + struct secure_memory_region *smr; + struct csv3_data_memory_region *regions; + + LIST_HEAD(tmp_list); + struct list_head *pos, *q; + u32 i = 0, count = 0, remainder; + int ret = 0; + u64 nr_smr = 0; + u32 smr_entry_shift; + + set_guest_private_memory = kzalloc(sizeof(*set_guest_private_memory), + GFP_KERNEL_ACCOUNT); + if (!set_guest_private_memory) + return -ENOMEM; + + regions = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!regions) { + kfree(set_guest_private_memory); + return -ENOMEM; + } + + smr_entry_shift = csv_get_smr_entry_shift(); + + /** + * If @csv_smcr_num is zero or the firmware does not support explicit + * NPT reservation in SMCR, both VMSA and NPT are allocated from CMA. + * Otherwise, the NPT is already reserved in SMCR, and only a single + * SMR is needed for the VMSA. + */ + if (!csv_smcr_num || hygon_csv_build < 2407) + nr_smr = (vm_size >> smr_entry_shift) + + 1 + csv3_get_vm_npt_smr_number(vm_size); + else + nr_smr = (vm_size >> smr_entry_shift) + 1; + remainder = nr_smr; for (i = 0; i < nr_smr; i++) { smr = kzalloc(sizeof(*smr), GFP_KERNEL_ACCOUNT); @@ -1427,7 +1743,7 @@ static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *ar } smr->hpa = csv_alloc_from_contiguous((1UL << smr_entry_shift), - nodemask_ptr, + nodemask, get_order(1 << smr_entry_shift)); if (!smr->hpa) { kfree(smr); @@ -1463,8 +1779,16 @@ static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *ar list_splice(&tmp_list, &csv->smr_list); #ifdef CONFIG_SYSFS - csv->npt_size = ALIGN(nr_pages * 9, 1UL << smr_entry_shift); - csv->pri_mem = ALIGN((nr_pages << PAGE_SHIFT), 1UL << smr_entry_shift); + /** + * The NPT can be allocated from either CMA or global SMCR. + * Only account for it when allocated from CMA. + */ + if (!csv_smcr_num || hygon_csv_build < 2407) + csv->npt_size = csv3_get_vm_npt_smr_number(vm_size) + << smr_entry_shift; + else + csv->npt_size = 0; + csv->pri_mem = (nr_smr << smr_entry_shift) - csv->npt_size; atomic_long_add(csv->npt_size, &csv3_npt_size); atomic_long_add(csv->pri_mem, &csv3_pri_mem); #endif @@ -1472,6 +1796,7 @@ static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *ar goto done; e_free_smr: + /* Remove temporary smr_list */ if (!list_empty(&tmp_list)) { list_for_each_safe(pos, q, &tmp_list) { smr = list_entry(pos, struct secure_memory_region, list); @@ -1486,6 +1811,85 @@ static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *ar done: kfree(set_guest_private_memory); kfree(regions); + + return ret; +} + +static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot; + nodemask_t nodemask; + nodemask_t *nodemask_ptr; + + u32 i = 0; + int ret = 0; + u32 smr_entry_shift; + int bkt; + + unsigned int flags = FOLL_HWPOISON; + u64 npages, nr_pages = 0; + struct page *page; + + unsigned long vm_size; + enum csv_smr_source source; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + /* The smr_list should be initialized only once */ + if (!list_empty(&csv->smr_list)) + return -EFAULT; + + nodes_clear(nodemask); + for_each_set_bit(i, &csv->nodemask, BITS_PER_LONG) + if (i < MAX_NUMNODES) + node_set(i, nodemask); + + nodemask_ptr = csv->nodemask ? &nodemask : &node_online_map; + + /* Get guest secure memory size */ + kvm_for_each_memslot(memslot, bkt, slots) { + if (memslot->flags & KVM_MEM_READONLY) + continue; + + npages = get_user_pages_unlocked(memslot->userspace_addr, 1, + &page, flags); + if (npages != 1) + continue; + + nr_pages += memslot->npages; + + put_page(page); + } + + smr_entry_shift = csv_get_smr_entry_shift(); + vm_size = ALIGN((nr_pages << PAGE_SHIFT), 1UL << smr_entry_shift); + + /** + * Allocation policy for NPT and VMSA based on CSV3 private memory + * source: + * + * - When CSV3 private memory is allocated from 1GB hugepages: + * * NPT is allocated from the global SMCR. + * * VMSA is allocated from system-reserved memory pool. + * + * - When CSV3 private memory is allocated from CMA: + * * VMSA is allocated from CMA. + * * NPT is allocated from the global SMCR if it was reserved at + * host boot time; otherwise, it is allocated from CMA. + */ + source = get_csv_smr_source(); + if (source == USE_HUGETLB) { + ret = csv3_set_hugetlb_smr(kvm, vm_size, nodemask_ptr, argp); + } else if (source == USE_CMA) { + ret = csv3_set_cma_smr(kvm, vm_size, nodemask_ptr, argp); + } else { + ret = -EFAULT; + WARN_ON(1); + } + return ret; } @@ -3081,11 +3485,10 @@ static void csv_vm_destroy(struct kvm *kvm) struct kvm_vcpu *vcpu; struct list_head *smr_head = &csv->smr_list; - struct list_head *pos, *q; - struct secure_memory_region *smr; struct shared_page *sp; struct rb_node *node; unsigned long i = 0; + enum csv_smr_source source = get_csv_smr_source(); if (csv3_guest(kvm)) { mutex_lock(&csv->sp_lock); @@ -3130,21 +3533,17 @@ static void csv_vm_destroy(struct kvm *kvm) return; /* free secure memory region */ - if (!list_empty(smr_head)) { - list_for_each_safe(pos, q, smr_head) { - smr = list_entry(pos, struct secure_memory_region, list); - if (smr) { - csv_release_to_contiguous(smr->hpa, smr->npages << PAGE_SHIFT); - list_del(&smr->list); - kfree(smr); - } - } + if (!list_empty(smr_head)) + csv3_free_smr_list(kvm); + + /* Free hugetlb if CSV3 private memory is allocated from 1G hugetlb */ + if (source == USE_HUGETLB) + csv3_free_1G_hugetlb_folios(kvm); #ifdef CONFIG_SYSFS - atomic_long_sub(csv->npt_size, &csv3_npt_size); - atomic_long_sub(csv->pri_mem, &csv3_pri_mem); + atomic_long_sub(csv->npt_size, &csv3_npt_size); + atomic_long_sub(csv->pri_mem, &csv3_pri_mem); #endif - } } static int csv3_handle_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, diff --git a/arch/x86/mm/mem_encrypt_hygon.c b/arch/x86/mm/mem_encrypt_hygon.c index c3cc7b00d943c8e2c7d1b65748e0a44bc7d0041c..6b20f83e351b8eec1efe3d26cf343c8a4cb8e518 100644 --- a/arch/x86/mm/mem_encrypt_hygon.c +++ b/arch/x86/mm/mem_encrypt_hygon.c @@ -136,33 +136,121 @@ bool csv3_active(void) } EXPORT_SYMBOL_GPL(csv3_active); -/******************************************************************************/ -/**************************** CSV3 CMA interfaces *****************************/ -/******************************************************************************/ +/* Common CSV3 memory protection macros */ +#define NUM_SMR_ENTRIES (8 * 1024) +#define MIN_SMR_ENTRY_SHIFT 23 +#define CSV_SMR_INFO_SIZE (nr_node_ids * sizeof(struct csv_mem)) +#define CSV_SMCR_MAX_ENTRIES 64 /* 16GB SMCR */ +/* CSV3 CMA macros */ +#define CSV_CMA_SHIFT PUD_SHIFT +#define CSV_CMA_SIZE (1 << CSV_CMA_SHIFT) +#define MAX_CSV_CMA_PCT 95 +#define CSV_CMA_AREAS 2458 +/* CSV3 1G hugetlb macros */ +#define DEFAULT_MAX_CSV_NUMBER 113 /* This is an empirical value */ -#define CSV_MEM_PCT_MAX (95U) +/** + * An array of Secure Memory Regions (SMRs), where each entry records a physical + * address range within a NUMA node that will be managed by hardware. + * Each NUMA node has at most one entry, and NUMA nodes without physical memory + * are not included in the array. + */ +struct csv_mem *csv_smr; +EXPORT_SYMBOL_GPL(csv_smr); +/* Number of entries in the @csv_smr array */ +unsigned int csv_smr_num; +EXPORT_SYMBOL_GPL(csv_smr_num); +/** + * The kernel cmdline parameter 'csv_smcr_size=' can be used to + * reserve memory for CSV3 VM NPT on the host platform. When this parameter is + * specified, the kernel reserves physical memory of the given size. These + * reserved memory are described by @csv_smcr. + * Optional when CSV3 private memory is allocated from CSV-CMA. + * Required when CSV3 private memory is allocated from 1G hugetlb pages. + */ +struct csv_mem *csv_smcr; +EXPORT_SYMBOL_GPL(csv_smcr); +/* Number of entries in the @csv_smcr array */ +unsigned int csv_smcr_num; +EXPORT_SYMBOL_GPL(csv_smcr_num); +/** + * @csv_cma_pct specifies the percentage of total system memory to be managed by + * CMA, while the @csv_cma_size specifies the absolute size of CMA-managed + * memory. + * These values can be set via the mutually exclusive kernel cmdline parameter: + * - 'csv_mem_percentage=' sets @csv_cma_pct + * - 'csv_mem_size=' sets @csv_cma_size. + * Required when the CSV3 private memory is allocated from CSV-CMA. + */ +static unsigned char csv_cma_pct; +static unsigned long csv_cma_size; +/** + * Number of CSV3 VMs that the system is intended to support. + * The kernel cmdline parameter 'csv_use_hugetlb=' specify the value of + * @csv_use_hugetlb. + * Required when CSV3 private memory is allocated from 1G hugetlb pages. + */ +static unsigned int csv_use_hugetlb; +/** + * When CSV3 private memory is allocated from 1G hugetlb, metadata blocks are + * reserved for per-VM VMSAs. + * The @csv_use_hugetlb indicates how many metadata blocks to reserve. + */ +struct csv_metadata { + struct list_head list; + unsigned long hpa; + bool used; +}; +static LIST_HEAD(csv_metadata_list); +DEFINE_SPINLOCK(csv_metadata_lock); +/** + * The memory unit size managed by the hardware. Do not confuse this with + * @csv_smr or @csv_smr_num. + */ +static unsigned int smr_entry_shift; -/* 0 percent of total memory by default*/ -static unsigned char csv_mem_percentage; -static unsigned long csv_mem_size; +#ifdef CONFIG_SYSFS -static int __init cmdline_parse_csv_mem_size(char *str) +/** + * Global counters exposed via /sys/kernel/mm/csv3_cma/mem_info. Update + * atomically during VM creation/destruction. + * + * csv3_npt_size: total size of NPT tables allocated. + * csv3_pri_mem: total private memory allocated for CSV guests. + * csv3_meta: metadata overhead for CSV memory regions. + * csv3_shared_mem: size of all the CSV3 VMs' shared memory. + */ +atomic_long_t csv3_npt_size = ATOMIC_LONG_INIT(0); +EXPORT_SYMBOL_GPL(csv3_npt_size); + +atomic_long_t csv3_pri_mem = ATOMIC_LONG_INIT(0); +EXPORT_SYMBOL_GPL(csv3_pri_mem); + +unsigned long csv3_meta; +EXPORT_SYMBOL_GPL(csv3_meta); + +atomic_long_t csv3_shared_mem[MAX_NUMNODES]; +EXPORT_SYMBOL_GPL(csv3_shared_mem); + +#endif /* CONFIG_SYSFS */ + +static int __init cmdline_parse_csv_cma_size(char *str) { unsigned long size; char *endp; if (str) { - size = memparse(str, &endp); - csv_mem_size = size; - if (!csv_mem_size) - csv_mem_percentage = 0; + size = memparse(str, &endp); + csv_cma_size = size; + if (!csv_cma_size) + csv_cma_pct = 0; } return 0; } -early_param("csv_mem_size", cmdline_parse_csv_mem_size); +early_param("csv_mem_size", cmdline_parse_csv_cma_size); -static int __init cmdline_parse_csv_mem_percentage(char *str) +static int __init cmdline_parse_csv_cma_pct(char *str) { unsigned char percentage; int ret; @@ -170,59 +258,214 @@ static int __init cmdline_parse_csv_mem_percentage(char *str) if (!str) return 0; - ret = kstrtou8(str, 10, &percentage); + ret = kstrtou8(str, 10, &percentage); if (!ret) { - csv_mem_percentage = min_t(unsigned char, percentage, CSV_MEM_PCT_MAX); - if (csv_mem_percentage != percentage) + csv_cma_pct = min_t(unsigned char, percentage, MAX_CSV_CMA_PCT); + if (csv_cma_pct != percentage) pr_warn("csv_mem_percentage is limited to %d.\n", - CSV_MEM_PCT_MAX); + MAX_CSV_CMA_PCT); } else { /* Disable CSV CMA. */ - csv_mem_percentage = 0; - pr_err("csv_mem_percentage is invalid. (0 - %d) is expected.\n", - CSV_MEM_PCT_MAX); + csv_cma_pct = 0; + pr_err("csv_mem_percentage is invalid. [0 - %d] is expected.\n", + MAX_CSV_CMA_PCT); } return ret; } -early_param("csv_mem_percentage", cmdline_parse_csv_mem_percentage); +early_param("csv_mem_percentage", cmdline_parse_csv_cma_pct); -#define NUM_SMR_ENTRIES (8 * 1024) -#define CSV_CMA_SHIFT PUD_SHIFT -#define CSV_CMA_SIZE (1 << CSV_CMA_SHIFT) -#define MIN_SMR_ENTRY_SHIFT 23 -#define CSV_SMR_INFO_SIZE (nr_node_ids * sizeof(struct csv_mem)) +static int __init cmdline_parse_csv_smcr_size(char *str) +{ + unsigned long size; + char *endp; -struct csv_mem *csv_smr; -EXPORT_SYMBOL_GPL(csv_smr); + if (str) { + size = memparse(str, &endp); + if (size) { + csv_smcr_num = size >> CSV_MR_ALIGN_BITS; + if (csv_smcr_num < 2) { + csv_smcr_num = 0; + pr_err("CSV-SMCR: csv_smcr_size must be >= 512MB\n"); + } + if (csv_smcr_num > CSV_SMCR_MAX_ENTRIES) { + csv_smcr_num = CSV_SMCR_MAX_ENTRIES; + pr_warn("CSV-SMCR: csv_smcr_size is limited to 16GB\n"); + } + } else + pr_err("CSV-SMCR: csv_smcr_size is invalid\n"); + } -unsigned int csv_smr_num; -EXPORT_SYMBOL_GPL(csv_smr_num); + return 0; +} +early_param("csv_smcr_size", cmdline_parse_csv_smcr_size); -#ifdef CONFIG_SYSFS +static int __init cmdline_parse_csv_use_hugetlb(char *str) +{ + unsigned int count, limit = 0; + int ret; -/** - * Global counters exposed via /sys/kernel/mm/csv3_cma/mem_info. Update - * atomically during VM creation/destruction. - * - * csv3_npt_size: total size of NPT tables allocated. - * csv3_pri_mem: total private memory allocated for CSV guests. - * csv3_meta: metadata overhead for CSV memory regions. - * csv3_shared_mem: size of all the CSV3 VMs' shared memory. - */ -atomic_long_t csv3_npt_size = ATOMIC_LONG_INIT(0); -EXPORT_SYMBOL_GPL(csv3_npt_size); + if (!str) { + csv_use_hugetlb = DEFAULT_MAX_CSV_NUMBER; + return 0; + } -atomic_long_t csv3_pri_mem = ATOMIC_LONG_INIT(0); -EXPORT_SYMBOL_GPL(csv3_pri_mem); + if (is_x86_vendor_hygon() && boot_cpu_data.x86_model >= 0x4) + limit = 500; -unsigned long csv3_meta; -EXPORT_SYMBOL_GPL(csv3_meta); + ret = kstrtou32(str, 10, &count); + if (!ret) { + if (limit < count) { + pr_info("csv_use_hugetlb is limited to %d\n", limit); + count = limit; + } + csv_use_hugetlb = count; + } else { + /* Disable CSV hugetlb. */ + csv_use_hugetlb = 0; + pr_err("csv_use_hugetlb is invalid. [0 - %d] is expected.\n", limit); + } -atomic_long_t csv3_shared_mem[MAX_NUMNODES]; -EXPORT_SYMBOL_GPL(csv3_shared_mem); + return ret; +} +early_param("csv_use_hugetlb", cmdline_parse_csv_use_hugetlb); -#endif /* CONFIG_SYSFS */ +static void csv_set_smr_entry_shift(unsigned int shift) +{ + smr_entry_shift = max_t(unsigned int, shift, MIN_SMR_ENTRY_SHIFT); + pr_info("CSV: SMR entry size is 0x%x\n", 1 << smr_entry_shift); +} + +unsigned int csv_get_smr_entry_shift(void) +{ + return smr_entry_shift; +} +EXPORT_SYMBOL_GPL(csv_get_smr_entry_shift); + +static void __init csv_smcr_free_mem(void) +{ + unsigned int i; + + if (!csv_smcr_num) + return; + + for (i = 0; i < csv_smcr_num; i++) { + if (csv_smcr[i].start && csv_smcr[i].size) { + memblock_phys_free(csv_smcr[i].start, csv_smcr[i].size); + pr_info("CSV-SMCR: free mem - paddr 0x%016llx, size 0x%016llx\n", + csv_smcr[i].start, csv_smcr[i].size); + } + } + + if (csv_smcr) { + memblock_free(csv_smcr, sizeof(struct csv_mem) * csv_smcr_num); + csv_smcr = NULL; + csv_smcr_num = 0; + } +} + +static int __init csv_smcr_reserve_mem(void) +{ + unsigned int i; + + if (!csv_smcr_num) + return 0; + + csv_smcr = memblock_alloc_node(sizeof(struct csv_mem) * csv_smcr_num, + SMP_CACHE_BYTES, NUMA_NO_NODE); + if (!csv_smcr) { + pr_err("CSV-SMCR: Fail to allocate memory\n"); + return -ENOMEM; + } + + memset(csv_smcr, 0, sizeof(struct csv_mem) * csv_smcr_num); + for (i = 0; i < csv_smcr_num; i++) { + csv_smcr[i].size = 1UL << CSV_MR_ALIGN_BITS; + csv_smcr[i].start = memblock_phys_alloc_try_nid(csv_smcr[i].size, + csv_smcr[i].size, + NUMA_NO_NODE); + if (csv_smcr[i].start == 0) { + csv_smcr[i].size = 0; + pr_err("CSV-SMCR: Fail to reserve memory\n"); + goto failure; + } + csv_smcr[i].nid = phys_to_target_node(csv_smcr[i].start); + + pr_info("CSV-SMCR: reserve mem - paddr 0x%016llx, size 0x%016llx\n", + csv_smcr[i].start, csv_smcr[i].size); + } + + return 0; + +failure: + csv_smcr_free_mem(); + + return -ENOMEM; +} + +static unsigned long __init smallest_pfn_in_node(int nid) +{ + unsigned long range_start_pfn, range_end_pfn; + unsigned long smallest = -1; + int i; + + for_each_mem_pfn_range(i, nid, &range_start_pfn, &range_end_pfn, NULL) { + if (range_start_pfn < smallest) + smallest = range_start_pfn; + } + + return smallest; +} + +static unsigned long __init largest_pfn_in_node(int nid) +{ + unsigned long range_start_pfn, range_end_pfn; + unsigned long largest = 0; + int i; + + for_each_mem_pfn_range(i, nid, &range_start_pfn, &range_end_pfn, NULL) { + if (range_end_pfn > largest) + largest = range_end_pfn; + } + + return largest; +} + +static unsigned long __init largest_pfn(void) +{ + unsigned long range_start_pfn, range_end_pfn; + unsigned long largest = 0; + int node, i; + + for_each_node_state(node, N_ONLINE) { + for_each_mem_pfn_range(i, node, &range_start_pfn, &range_end_pfn, NULL) { + if (range_end_pfn > largest) + largest = range_end_pfn; + } + } + + return largest; +} + +static struct csv_mem * __init find_csv_smcr_mem_nid(int nid) +{ + int i; + struct csv_mem *smcr = NULL; + + if (!csv_smcr) + return NULL; + + for (i = 0; i < csv_smcr_num; i++) { + if (csv_smcr[i].nid == nid) { + smcr = &csv_smcr[i]; + return smcr; + } + } + + return NULL; +} + +#ifdef CONFIG_CMA struct csv_cma { int nid; @@ -237,21 +480,8 @@ struct cma_array { struct csv_cma csv_cma[]; }; -static unsigned int smr_entry_shift; static struct cma_array *csv_contiguous_pernuma_area[MAX_NUMNODES]; -static void csv_set_smr_entry_shift(unsigned int shift) -{ - smr_entry_shift = max_t(unsigned int, shift, MIN_SMR_ENTRY_SHIFT); - pr_info("CSV-CMA: SMR entry size is 0x%x\n", 1 << smr_entry_shift); -} - -unsigned int csv_get_smr_entry_shift(void) -{ - return smr_entry_shift; -} -EXPORT_SYMBOL_GPL(csv_get_smr_entry_shift); - static unsigned long __init present_pages_in_node(int nid) { unsigned long range_start_pfn, range_end_pfn; @@ -266,10 +496,10 @@ static unsigned long __init present_pages_in_node(int nid) static phys_addr_t __init csv_early_percent_memory_on_node(int nid) { - return (present_pages_in_node(nid) * csv_mem_percentage / 100) << PAGE_SHIFT; + return (present_pages_in_node(nid) * csv_cma_pct / 100) << PAGE_SHIFT; } -static void __init csv_cma_reserve_mem(void) +static int __init csv_cma_reserve_mem(void) { int node, i; unsigned long size; @@ -278,12 +508,6 @@ static void __init csv_cma_reserve_mem(void) int cma_array_size; unsigned long max_spanned_size = 0; - csv_smr = memblock_alloc_node(CSV_SMR_INFO_SIZE, SMP_CACHE_BYTES, NUMA_NO_NODE); - if (!csv_smr) { - pr_err("CSV-CMA: Fail to allocate csv_smr\n"); - return; - } - for_each_node_state(node, N_ONLINE) { int ret; char name[CMA_MAX_NAME]; @@ -334,6 +558,20 @@ static void __init csv_cma_reserve_mem(void) continue; array->count = i; + + /** + * If CSV3 private memory is allocated from CSV-CMA and + * @csv_smcr is specified, the reserved SMCRs are taken into + * account. + */ + if (find_csv_smcr_mem_nid(node)) { + pr_info("CSV-CMA: Node %d has smcr reserved, set all mem as SMR\n", node); + start = ALIGN(smallest_pfn_in_node(node) << PAGE_SHIFT, + 1ull << CSV_MR_ALIGN_BITS); + end = ALIGN_DOWN(largest_pfn_in_node(node) << PAGE_SHIFT, + 1ull << CSV_MR_ALIGN_BITS); + } + spanned_size = end - start; if (spanned_size > max_spanned_size) max_spanned_size = spanned_size; @@ -350,42 +588,12 @@ static void __init csv_cma_reserve_mem(void) WARN_ON((max_spanned_size / NUM_SMR_ENTRIES) < 1); if (likely((max_spanned_size / NUM_SMR_ENTRIES) >= 1)) csv_set_smr_entry_shift(ilog2(max_spanned_size / NUM_SMR_ENTRIES - 1) + 1); -} - -#define CSV_CMA_AREAS 2458 -void __init early_csv_reserve_mem(void) -{ - unsigned long total_pages; - - /* Only reserve memory on the host that enabled CSV3 feature */ - if (!csv3_check_cpu_support()) - return; - - if (cma_alloc_areas(CSV_CMA_AREAS)) - return; - - total_pages = PHYS_PFN(memblock_phys_mem_size()); - if (csv_mem_size) { - if (csv_mem_size < (total_pages << PAGE_SHIFT)) { - csv_mem_percentage = div_u64((u64)csv_mem_size * 100, - (u64)total_pages << PAGE_SHIFT); - if (csv_mem_percentage > CSV_MEM_PCT_MAX) - csv_mem_percentage = CSV_MEM_PCT_MAX; /* Maximum percentage */ - } else - csv_mem_percentage = CSV_MEM_PCT_MAX; /* Maximum percentage */ - } - - if (!csv_mem_percentage) { - pr_warn("CSV-CMA: Don't reserve any memory\n"); - return; - } - - csv_cma_reserve_mem(); + return csv_smr_num ? 0 : -ENOMEM; } phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, - unsigned int align) + unsigned int align) { int nid; int nr_nodes; @@ -483,6 +691,274 @@ void csv_release_to_contiguous(phys_addr_t pa, size_t size) } EXPORT_SYMBOL_GPL(csv_release_to_contiguous); +#else /* !CONFIG_CMA */ + +phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, + unsigned int align) +{ + return 0; +} +EXPORT_SYMBOL_GPL(csv_alloc_from_contiguous); + +void csv_release_to_contiguous(phys_addr_t pa, size_t size) +{ +} +EXPORT_SYMBOL_GPL(csv_release_to_contiguous); + +#endif /* CONFIG_CMA */ + +static int __init csv_mark_secure_mem_region(void) +{ + int node; + int idx = 0; + unsigned long max_spanned_size = 0; + + for_each_node_state(node, N_ONLINE) { + unsigned long spanned_size; + unsigned long start = 0, end = 0; + + start = ALIGN(smallest_pfn_in_node(node) << PAGE_SHIFT, + 1ull << CSV_MR_ALIGN_BITS); + end = ALIGN_DOWN(largest_pfn_in_node(node) << PAGE_SHIFT, + 1ull << CSV_MR_ALIGN_BITS); + + if (start >= end) + continue; + + spanned_size = end - start; + if (spanned_size > max_spanned_size) + max_spanned_size = spanned_size; + + csv_smr[idx].start = start; + csv_smr[idx].size = end - start; + idx++; + + pr_info("CSV: Node %d - secure range 0x%016lx ~ 0x%016lx\n", + node, start, end); + } + + csv_smr_num = idx; + WARN_ON((max_spanned_size / NUM_SMR_ENTRIES) < 1); + if (likely((max_spanned_size / NUM_SMR_ENTRIES) >= 1)) + csv_set_smr_entry_shift(ilog2(max_spanned_size / NUM_SMR_ENTRIES - 1) + 1); + + return csv_smr_num ? 0 : -ENOMEM; +} + +static int __init csv_reserve_metadata(void) +{ + unsigned int i; + struct csv_metadata *metadata, *cur; + u64 hpa; + u64 smr_size; + struct list_head *pos, *q; + + if (WARN_ON_ONCE(!csv_smr)) + return -EFAULT; + + metadata = memblock_alloc_node(sizeof(*metadata) * csv_use_hugetlb, + SMP_CACHE_BYTES, NUMA_NO_NODE); + if (WARN_ON(!metadata)) + return -ENOMEM; + + smr_size = 1 << smr_entry_shift; + for (i = 0; i < csv_use_hugetlb; i++) { + hpa = memblock_phys_alloc_range(smr_size, smr_size, 0, + ALIGN_DOWN((largest_pfn() << PAGE_SHIFT) - PUD_SIZE, + PUD_SIZE)); + if (WARN_ON(!hpa)) + goto err; + + metadata[i].hpa = hpa; + metadata[i].used = false; + list_add_tail(&metadata[i].list, &csv_metadata_list); + } + + return 0; +err: + list_for_each_safe(pos, q, &csv_metadata_list) { + cur = list_entry(pos, struct csv_metadata, list); + memblock_phys_free(cur->hpa, 1 << smr_entry_shift); + list_del(&cur->list); + } + memblock_free(metadata, sizeof(*metadata) * csv_use_hugetlb); + + pr_warn("CSV: Fail to reserve metadata.\n"); + + return -ENOMEM; +} + +/** + * The helper functions csv_alloc_metadata() and csv_free_metadata() are used + * for the VMSA of CSV3 VMs only when CSV3 private memory is allocated from 1G + * hugetlb pages. In this case, the host should set the kernel cmdline parameter + * 'csv_use_hugetlb=', which causes the kernel to reserve metadata blocks + * at boot time. The function csv_alloc_metadata() allocates one block from this + * reserved pool, and csv_free_metadata() returns it. + * + * If CSV3 private memory is allocated from CSV-CMA, the VMSA is allocated + * from CSV-CMA, and these helper functions are not used. The host must not + * specify 'csv_use_hugetlb' on the kernel cmdline in this scenario. + */ +phys_addr_t csv_alloc_metadata(void) +{ + struct csv_metadata *metadata; + struct list_head *pos, *q; + u64 hpa = 0; + + spin_lock(&csv_metadata_lock); + + list_for_each_safe(pos, q, &csv_metadata_list) { + metadata = list_entry(pos, struct csv_metadata, list); + if (metadata) { + if (!metadata->used) { + metadata->used = true; + hpa = metadata->hpa; + break; + } + } + } + + spin_unlock(&csv_metadata_lock); + + return hpa; +} +EXPORT_SYMBOL_GPL(csv_alloc_metadata); + +void csv_free_metadata(u64 hpa) +{ + struct csv_metadata *metadata; + struct list_head *pos, *q; + + spin_lock(&csv_metadata_lock); + + list_for_each_safe(pos, q, &csv_metadata_list) { + metadata = list_entry(pos, struct csv_metadata, list); + if (metadata) { + if (metadata->hpa == hpa) { + WARN_ON(metadata->used != true); + metadata->used = false; + break; + } + } + } + + spin_unlock(&csv_metadata_lock); +} +EXPORT_SYMBOL_GPL(csv_free_metadata); + +void __init early_csv_reserve_mem(void) +{ + unsigned long total_pages; + int ret; + + /* Only reserve memory on the host that enabled CSV3 feature */ + if (!csv3_check_cpu_support()) + return; + + total_pages = PHYS_PFN(memblock_phys_mem_size()); + if (csv_cma_size) { + if (csv_cma_size < (total_pages << PAGE_SHIFT)) { + csv_cma_pct = div_u64((u64)csv_cma_size * 100, + (u64)total_pages << PAGE_SHIFT); + /* Maximum percentage */ + if (csv_cma_pct > MAX_CSV_CMA_PCT) + csv_cma_pct = MAX_CSV_CMA_PCT; + } else + /* Maximum percentage */ + csv_cma_pct = MAX_CSV_CMA_PCT; + } + + if (!csv_cma_pct && !(csv_smcr_num && csv_use_hugetlb)) { + pr_warn("CSV: Fail, either kernel param csv_mem_percentage or csv_smcr_size&csv_use_hugetlb is required\n"); + pr_warn("CSV: Don't reserve any memory\n"); + return; + } + + csv_smr = memblock_alloc_node(CSV_SMR_INFO_SIZE, SMP_CACHE_BYTES, NUMA_NO_NODE); + if (!csv_smr) { + pr_err("CSV: Fail to allocate csv_smr\n"); + return; + } + memset(csv_smr, 0, CSV_SMR_INFO_SIZE); + + /* SMCR memory for CSV3 NPT/context. */ + ret = csv_smcr_reserve_mem(); + if (ret) + pr_warn("CSV: Fail to reserve SMCR!\n"); + + /** + * The kernel cmdline parameter csv_mem_percentage= take precedence + * over csv_use_hugetlb=. + */ + if (csv_cma_pct) { +#ifdef CONFIG_CMA + /** + * If reach here, the CSV3 private memory should be allocated + * from CMA. + */ + csv_use_hugetlb = 0; + + ret = cma_alloc_areas(CSV_CMA_AREAS); + if (ret) + goto err_free_smcr; + + ret = csv_cma_reserve_mem(); + if (ret) + goto err_free_smcr; + + return; +#else + if (csv_smcr && csv_use_hugetlb) { + pr_info("CSV: Fallback to csv_use_hugetlb logic\n"); + } else { + pr_warn("CSV: Fail, csv_mem_percentage depends on CONFIG_CMA\n"); + goto err_free_smcr; + } +#endif + } + + /** + * If reach here, the CSV3 private memory should be allocated from + * 1G hugetlb. + */ + csv_cma_pct = 0; + + if (csv_smcr && csv_use_hugetlb) { + ret = csv_mark_secure_mem_region(); + if (ret) + goto err_free_smcr; + + ret = csv_reserve_metadata(); + if (ret) + goto err_free_smcr; + + return; + } + +err_free_smcr: + csv_smcr_free_mem(); + + memblock_free(csv_smr, CSV_SMR_INFO_SIZE); + csv_smr = NULL; + csv_smr_num = 0; + + csv_cma_pct = 0; + csv_use_hugetlb = 0; +} + +enum csv_smr_source get_csv_smr_source(void) +{ + if (csv_cma_pct) + return USE_CMA; + + if (csv_use_hugetlb) + return USE_HUGETLB; + + return NOT_SUPPORTED; +} +EXPORT_SYMBOL_GPL(get_csv_smr_source); + #ifdef CONFIG_SYSFS /** @@ -493,67 +969,57 @@ static ssize_t mem_info_show(struct kobject *kobj, { int node; int offset = 0; - unsigned long csv_used_size, total_used_size = 0; - unsigned long csv_size, total_csv_size = 0; - unsigned long shared_mem, total_shared_mem = 0; - unsigned long npt_size, pri_mem; - struct cma_array *array = NULL; - unsigned long bytes_per_mib = 1UL << 20; + unsigned long cma_cur_used, cma_total_used = 0; + unsigned long cma_cur, cma_total = 0; + unsigned long shared_mem, shared_mem_total = 0; + unsigned long npt_total, priv_mem_total; for_each_node_state(node, N_ONLINE) { - array = csv_contiguous_pernuma_area[node]; - if (array == NULL) { - csv_size = 0; - csv_used_size = 0; - shared_mem = 0; - - offset += snprintf(buf + offset, PAGE_SIZE - offset, "Node%d:\n", node); - offset += snprintf(buf + offset, PAGE_SIZE - offset, - " csv3 shared size:%10lu MiB\n", shared_mem); - offset += snprintf(buf + offset, PAGE_SIZE - offset, - " total cma size:%12lu MiB\n", csv_size); - offset += snprintf(buf + offset, PAGE_SIZE - offset, - " csv3 cma used:%13lu MiB\n", csv_used_size); - continue; +#ifdef CONFIG_CMA + struct cma_array *array = csv_contiguous_pernuma_area[node]; +#endif + cma_cur = 0; + cma_cur_used = 0; +#ifdef CONFIG_CMA + if (array) { + cma_cur = DIV_ROUND_UP(array->count * CSV_CMA_SIZE, SZ_1M); + cma_cur_used = DIV_ROUND_UP(atomic64_read(&array->csv_used_size), + SZ_1M); } - +#endif shared_mem = DIV_ROUND_UP(atomic_long_read(&csv3_shared_mem[node]), - bytes_per_mib); - csv_size = DIV_ROUND_UP(array->count * CSV_CMA_SIZE, - bytes_per_mib); - csv_used_size = DIV_ROUND_UP(atomic64_read(&array->csv_used_size), - bytes_per_mib); - - total_shared_mem += shared_mem; - total_csv_size += csv_size; - total_used_size += csv_used_size; + SZ_1M); offset += snprintf(buf + offset, PAGE_SIZE - offset, "Node%d:\n", node); offset += snprintf(buf + offset, PAGE_SIZE - offset, " csv3 shared size:%10lu MiB\n", shared_mem); offset += snprintf(buf + offset, PAGE_SIZE - offset, - " total cma size:%12lu MiB\n", csv_size); + " total cma size:%12lu MiB\n", cma_cur); offset += snprintf(buf + offset, PAGE_SIZE - offset, - " csv3 cma used:%13lu MiB\n", csv_used_size); + " csv3 cma used:%13lu MiB\n", cma_cur_used); + + shared_mem_total += shared_mem; + cma_total += cma_cur; + cma_total_used += cma_cur_used; } - npt_size = DIV_ROUND_UP(atomic_long_read(&csv3_npt_size), bytes_per_mib); - pri_mem = DIV_ROUND_UP(atomic_long_read(&csv3_pri_mem), bytes_per_mib); + npt_total = DIV_ROUND_UP(atomic_long_read(&csv3_npt_size), SZ_1M); + priv_mem_total = DIV_ROUND_UP(atomic_long_read(&csv3_pri_mem), SZ_1M); offset += snprintf(buf + offset, PAGE_SIZE - offset, "All Nodes:\n"); offset += snprintf(buf + offset, PAGE_SIZE - offset, - " csv3 shared size:%10lu MiB\n", total_shared_mem); + " csv3 shared size:%10lu MiB\n", shared_mem_total); offset += snprintf(buf + offset, PAGE_SIZE - offset, - " total cma size:%12lu MiB\n", total_csv_size); + " total cma size:%12lu MiB\n", cma_total); offset += snprintf(buf + offset, PAGE_SIZE - offset, - " csv3 cma used:%13lu MiB\n", total_used_size); + " csv3 cma used:%13lu MiB\n", cma_total_used); offset += snprintf(buf + offset, PAGE_SIZE - offset, - " npt table:%16lu MiB\n", npt_size); + " npt table:%16lu MiB\n", npt_total); offset += snprintf(buf + offset, PAGE_SIZE - offset, - " csv3 private memory:%6lu MiB\n", pri_mem); + " csv3 private memory:%6lu MiB\n", priv_mem_total); offset += snprintf(buf + offset, PAGE_SIZE - offset, " meta data:%16lu MiB\n", - DIV_ROUND_UP(csv3_meta, bytes_per_mib)); + DIV_ROUND_UP(csv3_meta, SZ_1M)); return offset; } @@ -605,8 +1071,10 @@ static void __exit csv_cma_sysfs_exit(void) if (!is_x86_vendor_hygon() || !boot_cpu_has(X86_FEATURE_CSV3)) return; - if (csv_cma_kobj_root != NULL) + if (csv_cma_kobj_root) { + sysfs_remove_group(csv_cma_kobj_root, &csv_cma_attr_group); kobject_put(csv_cma_kobj_root); + } } module_init(csv_cma_sysfs_init); diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 5d88ab87f6f53695fe20bb77a06f587e74be3721..0256b04d919af6de1b36e3e313b4dd1f4f513704 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -690,15 +690,18 @@ int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error { int ret = 0; unsigned int i = 0; - struct csv3_data_set_smr *cmd_set_smr; - struct csv3_data_set_smcr *cmd_set_smcr; - struct csv3_data_memory_region *smr_regions; + struct csv3_data_set_smr *cmd_set_smr = NULL; + struct csv3_data_set_smcr *cmd_set_smcr = NULL; + struct csv3_data_memory_region *smr_regions = NULL; + unsigned int smr_num; + enum csv_smr_source source = get_csv_smr_source(); if (!hygon_psp_hooks.sev_dev_hooks_installed) { ret = -ENODEV; goto l_end; } + /* Initialize global SMRs */ if (!csv_smr || !csv_smr_num) { ret = -EINVAL; goto l_end; @@ -710,7 +713,8 @@ int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error goto l_end; } - smr_regions = kcalloc(csv_smr_num, sizeof(*smr_regions), GFP_KERNEL); + smr_num = max_t(unsigned int, csv_smr_num, csv_smcr_num); + smr_regions = kcalloc(smr_num, sizeof(*smr_regions), GFP_KERNEL); if (!smr_regions) { ret = -ENOMEM; goto e_free_cmd_set_smr; @@ -729,37 +733,60 @@ int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error goto e_free_smr_area; } - cmd_set_smcr = kzalloc(sizeof(*cmd_set_smcr), GFP_KERNEL); - if (!cmd_set_smcr) { - ret = -ENOMEM; - goto e_free_smr_area; - } - - cmd_set_smcr->base_address = csv_alloc_from_contiguous(1UL << CSV_MR_ALIGN_BITS, - &node_online_map, - get_order(1 << CSV_MR_ALIGN_BITS)); - if (!cmd_set_smcr->base_address) { - pr_err("Fail to alloc SMCR memory\n"); - ret = -ENOMEM; - goto e_free_cmd_set_smcr; - } - - cmd_set_smcr->size = 1UL << CSV_MR_ALIGN_BITS; - ret = hygon_psp_hooks.sev_do_cmd(CSV3_CMD_SET_SMCR, cmd_set_smcr, error); - if (ret) { - if (*error == SEV_RET_INVALID_COMMAND) - ret = 0; + /* Initialize global SMCRs */ + if (csv_smcr && csv_smcr_num && csv_version_greater_or_equal(2407)) { + for (i = 0; i < csv_smcr_num; i++) { + smr_regions[i].base_address = csv_smcr[i].start; + smr_regions[i].size = csv_smcr[i].size; + } + cmd_set_smr->smcr_flag = 0; /* 0 as SMCR memory flag */ + cmd_set_smr->regions_paddr = __psp_pa(smr_regions); + cmd_set_smr->nregions = csv_smcr_num; + ret = hygon_psp_hooks.sev_do_cmd(CSV3_CMD_SET_SMR, cmd_set_smr, error); + if (ret) + pr_err("Fail to set SMCR, ret %#x, error %#x\n", ret, *error); else - pr_err("set smcr ret %#x, error %#x\n", ret, *error); + pr_info("CSV: manage CSV3 VM by SMCR pool,%s\n", + source == USE_CMA ? "CMA" : + source == USE_HUGETLB ? "1G hugetlb,Metadata pool" : "?"); + goto e_free_smr_area; + } else if (source == USE_CMA) { + cmd_set_smcr = kzalloc(sizeof(*cmd_set_smcr), GFP_KERNEL); + if (!cmd_set_smcr) { + ret = -ENOMEM; + goto e_free_smr_area; + } - csv_release_to_contiguous(cmd_set_smcr->base_address, - 1UL << CSV_MR_ALIGN_BITS); - goto e_free_cmd_set_smcr; - } + cmd_set_smcr->base_address = csv_alloc_from_contiguous(1UL << CSV_MR_ALIGN_BITS, + &node_online_map, + get_order(1 << CSV_MR_ALIGN_BITS)); + if (!cmd_set_smcr->base_address) { + pr_err("Fail to alloc SMCR memory\n"); + ret = -ENOMEM; + goto e_free_cmd_set_smcr; + } + cmd_set_smcr->size = 1UL << CSV_MR_ALIGN_BITS; + ret = hygon_psp_hooks.sev_do_cmd(CSV3_CMD_SET_SMCR, cmd_set_smcr, error); + if (ret) { + if (*error == SEV_RET_INVALID_COMMAND) + ret = 0; + else + pr_err("Fail to set SMCR, ret %#x, error %#x\n", ret, *error); + + csv_release_to_contiguous(cmd_set_smcr->base_address, + 1UL << CSV_MR_ALIGN_BITS); + goto e_free_cmd_set_smcr; + } else { + pr_info("CSV: manage CSV3 VM by CMA\n"); #ifdef CONFIG_SYSFS - csv3_meta = cmd_set_smcr->size; + csv3_meta = cmd_set_smcr->size; #endif + } + } else { + pr_err("Unable to set hardware SMCR\n"); + goto e_free_smr_area; + } e_free_cmd_set_smcr: kfree((void *)cmd_set_smcr); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d8bf19c1adb17258d2fa144c3f32bbeb2cab47a4..e10783e75d7521690da7a2140265049794c8796a 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -752,6 +752,7 @@ struct huge_bootmem_page { int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn); void wait_for_freed_hugetlb_folios(void); +struct folio *dequeue_1G_hugetlb_folio_nodemask(gfp_t gfp_mask, int node, nodemask_t *nodemask); struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, @@ -1076,6 +1077,12 @@ static inline void wait_for_freed_hugetlb_folios(void) { } +static inline struct folio *dequeue_1G_hugetlb_folio_nodemask(gfp_t gfp_mask, + int node, nodemask_t *nodemask) +{ + return NULL; +} + static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 74f5d750292d9d4de7fe4fcc3c5c775b676661e3..e5b5ef693d2f68a0efdcb392a417d3a01c95c252 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -280,11 +280,15 @@ struct csv3_data_set_guest_private_memory { * struct csv3_data_set_smr - CSV3_CMD_SET_SMR command parameters * * @smr_entry_size: size of SMR entry + * @smcr_flag: must be 0 for SMCR memory region * @nregions: number of memory regions * @regions_paddr: address of memory containing multiple memory regions */ struct csv3_data_set_smr { - u32 smr_entry_size; /* In */ + union { + u32 smr_entry_size; /* In */ + u32 smcr_flag; /* In */ + }; u32 nregions; /* In */ u64 regions_paddr; /* In */ } __packed; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 94e1df675927a0e23bd6e146ad42b2d1326508fc..48ddb5cd658243e539e22fe32fd742db76aaa00c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1440,6 +1440,32 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, return NULL; } +struct folio *dequeue_1G_hugetlb_folio_nodemask(gfp_t gfp_mask, int node, nodemask_t *nodemask) +{ + struct folio *folio = NULL; + struct hstate *h; + + h = size_to_hstate(1 << PUD_SHIFT); + if (!h) + return NULL; + + if (node == NUMA_NO_NODE) + node = numa_mem_id(); + + spin_lock_irq(&hugetlb_lock); + + if (!available_huge_pages(h)) + goto err; + + folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, node, nodemask); + +err: + spin_unlock_irq(&hugetlb_lock); + + return folio; +} +EXPORT_SYMBOL(dequeue_1G_hugetlb_folio_nodemask); + /* * common helper functions for hstate_next_node_to_{alloc|free}. * We may have allocated or freed a huge page based on a different