From 9cbebdaafea876d8eec9de313e54552a9c9495de Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Wed, 4 Jun 2025 10:52:58 +0800 Subject: [PATCH 01/13] KVM: arm64: Probe Hisi CPU TYPE from ACPI/DTB Parse ACPI/DTB to get where the hypervisor is running. Signed-off-by: Zenghui Yu Signed-off-by: Yanan Wang Signed-off-by: lishusen Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/asm/hisi_cpu_model.h | 19 ++++++ arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/Makefile | 1 + arch/arm64/kvm/hisi_cpu_model.c | 83 +++++++++++++++++++++++++ virt/kvm/arm/arm.c | 6 ++ 5 files changed, 110 insertions(+) create mode 100644 arch/arm64/include/asm/hisi_cpu_model.h create mode 100644 arch/arm64/kvm/hisi_cpu_model.c diff --git a/arch/arm64/include/asm/hisi_cpu_model.h b/arch/arm64/include/asm/hisi_cpu_model.h new file mode 100644 index 000000000000..003a3a53cf33 --- /dev/null +++ b/arch/arm64/include/asm/hisi_cpu_model.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright(c) 2019 Huawei Technologies Co., Ltd + */ + +#ifndef __HISI_CPU_MODEL_H__ +#define __HISI_CPU_MODEL_H__ + +enum hisi_cpu_type { + HI_1612, + HI_1616, + HI_1620, + UNKNOWN_HI_TYPE +}; + +extern enum hisi_cpu_type hi_cpu_type; + +void probe_hisi_cpu_type(void); +#endif /* __HISI_CPU_MODEL_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 01886b83d120..3d9d4665aa69 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -26,6 +26,7 @@ #include #include #include +#include #define __KVM_HAVE_ARCH_INTC_INITIALIZED diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 410c084984d0..4a609b5683ed 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -19,6 +19,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o pmu.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o +kvm-$(CONFIG_KVM_ARM_HOST) += hisi_cpu_model.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o diff --git a/arch/arm64/kvm/hisi_cpu_model.c b/arch/arm64/kvm/hisi_cpu_model.c new file mode 100644 index 000000000000..4d5a099bc27a --- /dev/null +++ b/arch/arm64/kvm/hisi_cpu_model.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright(c) 2019 Huawei Technologies Co., Ltd + */ + +#include +#include +#include +#include + +#ifdef CONFIG_ACPI + +/* ACPI Hisi oem table id str */ +const char *oem_str[] = { + "HIP06", /* Hisi 1612 */ + "HIP07", /* Hisi 1616 */ + "HIP08" /* Hisi 1620 */ +}; + +/* + * Get Hisi oem table id. + */ +static void acpi_get_hw_cpu_type(void) +{ + struct acpi_table_header *table; + acpi_status status; + int i, str_size = ARRAY_SIZE(oem_str); + + /* Get oem table id from ACPI table header */ + status = acpi_get_table(ACPI_SIG_DSDT, 0, &table); + if (ACPI_FAILURE(status)) { + pr_err("Failed to get ACPI table: %s\n", + acpi_format_exception(status)); + return; + } + + for (i = 0; i < str_size; ++i) { + if (!strncmp(oem_str[i], table->oem_table_id, 5)) { + hi_cpu_type = i; + return; + } + } +} + +#else +static void acpi_get_hw_cpu_type(void) {} +#endif + +/* of Hisi cpu model str */ +const char *of_model_str[] = { + "Hi1612", + "Hi1616" +}; + +static void of_get_hw_cpu_type(void) +{ + const char *cpu_type; + int ret, i, str_size = ARRAY_SIZE(of_model_str); + + ret = of_property_read_string(of_root, "model", &cpu_type); + if (ret < 0) { + pr_err("Failed to get Hisi cpu model by OF.\n"); + return; + } + + for (i = 0; i < str_size; ++i) { + if (strstr(cpu_type, of_model_str[i])) { + hi_cpu_type = i; + return; + } + } +} + +void probe_hisi_cpu_type(void) +{ + if (!acpi_disabled) + acpi_get_hw_cpu_type(); + else + of_get_hw_cpu_type(); + + if (hi_cpu_type == UNKNOWN_HI_TYPE) + pr_warn("UNKNOWN Hisi cpu type.\n"); +} diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 28cdd2f4d1ef..465631a8f4a4 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -57,6 +57,9 @@ static DEFINE_SPINLOCK(kvm_vmid_lock); static bool vgic_present; +/* Hisi cpu type enum */ +enum hisi_cpu_type hi_cpu_type = UNKNOWN_HI_TYPE; + static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) @@ -1769,6 +1772,9 @@ int kvm_arch_init(void *opaque) return -ENODEV; } + /* Probe the Hisi CPU type */ + probe_hisi_cpu_type(); + in_hyp_mode = is_kernel_in_hyp_mode(); if (!in_hyp_mode && kvm_arch_requires_vhe()) { -- Gitee From 5a1225c638acae25511374355acc01f3198a5384 Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Sat, 7 Jun 2025 16:06:27 +0800 Subject: [PATCH 02/13] KVM: arm64: Add support for probing Hisi ncsnp capability Kunpeng 920 offers the HHA ncsnp capability, with which hypervisor doesn't need to perform a lot of cache maintenance like before (in case the guest has some non-cacheable Stage-1 mappings). Currently we apply this hardware capability when - vCPU switching MMU+caches on/off - creating Stage-2 mappings for Daborts Signed-off-by: Zenghui Yu Signed-off-by: Yanan Wang Signed-off-by: lishusen Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/asm/hisi_cpu_model.h | 2 ++ arch/arm64/include/asm/kvm_mmu.h | 2 +- arch/arm64/kvm/hisi_cpu_model.c | 34 +++++++++++++++++++++++++ virt/kvm/arm/arm.c | 2 ++ 4 files changed, 39 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/hisi_cpu_model.h b/arch/arm64/include/asm/hisi_cpu_model.h index 003a3a53cf33..67008d17416e 100644 --- a/arch/arm64/include/asm/hisi_cpu_model.h +++ b/arch/arm64/include/asm/hisi_cpu_model.h @@ -14,6 +14,8 @@ enum hisi_cpu_type { }; extern enum hisi_cpu_type hi_cpu_type; +extern bool kvm_ncsnp_support; void probe_hisi_cpu_type(void); +void probe_hisi_ncsnp_support(void); #endif /* __HISI_CPU_MODEL_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index ffe0aad96b17..ad78b0047b2c 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -317,7 +317,7 @@ static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) * faulting in pages. Furthermore, FWB implies IDC, so cleaning to * PoU is not required either in this case. */ - if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) + if (kvm_ncsnp_support || cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) return; kvm_flush_dcache_to_poc(va, size); diff --git a/arch/arm64/kvm/hisi_cpu_model.c b/arch/arm64/kvm/hisi_cpu_model.c index 4d5a099bc27a..52eecf1ba1cf 100644 --- a/arch/arm64/kvm/hisi_cpu_model.c +++ b/arch/arm64/kvm/hisi_cpu_model.c @@ -81,3 +81,37 @@ void probe_hisi_cpu_type(void) if (hi_cpu_type == UNKNOWN_HI_TYPE) pr_warn("UNKNOWN Hisi cpu type.\n"); } + +#define NCSNP_MMIO_BASE 0x20107E238 + +/* + * We have the fantastic HHA ncsnp capability on Kunpeng 920, + * with which hypervisor doesn't need to perform a lot of cache + * maintenance like before (in case the guest has non-cacheable + * Stage-1 mappings). + */ +void probe_hisi_ncsnp_support(void) +{ + void __iomem *base; + unsigned int high; + + kvm_ncsnp_support = false; + + if (hi_cpu_type != HI_1620) + goto out; + + base = ioremap(NCSNP_MMIO_BASE, 4); + if (!base) { + pr_err("Unable to map MMIO region when probing ncsnp!\n"); + goto out; + } + + high = readl_relaxed(base) >> 28; + iounmap(base); + if (high != 0x1) + kvm_ncsnp_support = true; + +out: + kvm_info("Hisi ncsnp: %s\n", kvm_ncsnp_support ? "enabled" : + "disabled"); +} diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 465631a8f4a4..fcbf0eaa35aa 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -59,6 +59,7 @@ static bool vgic_present; /* Hisi cpu type enum */ enum hisi_cpu_type hi_cpu_type = UNKNOWN_HI_TYPE; +bool kvm_ncsnp_support; static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); @@ -1774,6 +1775,7 @@ int kvm_arch_init(void *opaque) /* Probe the Hisi CPU type */ probe_hisi_cpu_type(); + probe_hisi_ncsnp_support(); in_hyp_mode = is_kernel_in_hyp_mode(); -- Gitee From 931af56a522918f4702799e890435726eb84bb28 Mon Sep 17 00:00:00 2001 From: Yanan Wang Date: Sat, 7 Jun 2025 16:28:20 +0800 Subject: [PATCH 03/13] KVM: arm64: Only probe Hisi ncsnp feature on Hisi CPUs The "ncsnp" is an implementation specific CPU virtualization feature on Hisi 1620 series CPUs. This feature works just like ARM standard S2FWB to reduce some cache management operations in virtualization. Given that it's Hisi specific feature, let's restrict the detection only to Hisi CPUs. To realize this: 1) Add a sub-directory `hisilicon/` within arch/arm64/kvm to hold code for Hisi specific virtualization features. 2) Add a new kconfig option `CONFIG_KVM_HISI_VIRT` for users to select the whole Hisi specific virtualization features. 3) Add a generic global KVM variable `kvm_ncsnp_support` which is `false` by default. Only re-initialize it when we have `CONFIG_KVM_HISI_VIRT` enabled. Signed-off-by: Yanan Wang Signed-off-by: lishusen Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/configs/tencent.config | 1 + arch/arm64/include/asm/kvm_host.h | 3 +- arch/arm64/kvm/Kconfig | 1 + arch/arm64/kvm/Makefile | 2 +- arch/arm64/kvm/hisilicon/Kconfig | 7 ++ arch/arm64/kvm/hisilicon/Makefile | 2 + .../hisi_virt.c} | 93 ++++++++++--------- .../hisilicon/hisi_virt.h} | 14 ++- virt/kvm/arm/arm.c | 13 ++- 9 files changed, 79 insertions(+), 57 deletions(-) create mode 100644 arch/arm64/kvm/hisilicon/Kconfig create mode 100644 arch/arm64/kvm/hisilicon/Makefile rename arch/arm64/kvm/{hisi_cpu_model.c => hisilicon/hisi_virt.c} (44%) rename arch/arm64/{include/asm/hisi_cpu_model.h => kvm/hisilicon/hisi_virt.h} (39%) diff --git a/arch/arm64/configs/tencent.config b/arch/arm64/configs/tencent.config index 8347032c1be7..f73a2f984ec6 100644 --- a/arch/arm64/configs/tencent.config +++ b/arch/arm64/configs/tencent.config @@ -1502,3 +1502,4 @@ CONFIG_HISI_SOC_CACHE=m CONFIG_HISI_SOC_HHA=m CONFIG_ARM64_HAFT=y CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG=y +CONFIG_KVM_HISI_VIRT=y diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 3d9d4665aa69..73c260b417c6 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -26,7 +26,6 @@ #include #include #include -#include #define __KVM_HAVE_ARCH_INTC_INITIALIZED @@ -693,4 +692,6 @@ static inline enum mitigation_state kvm_arm_get_spectre_bhb_state(void) return arm64_get_spectre_bhb_state(); } +extern bool kvm_ncsnp_support; + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 1d0a3791c017..b065318df925 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -5,6 +5,7 @@ source "virt/kvm/Kconfig" source "virt/lib/Kconfig" +source "arch/arm64/kvm/hisilicon/Kconfig" menuconfig VIRTUALIZATION bool "Virtualization" diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 4a609b5683ed..58c568dc52d0 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -19,7 +19,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o pmu.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o -kvm-$(CONFIG_KVM_ARM_HOST) += hisi_cpu_model.o +obj-$(CONFIG_KVM_ARM_HOST) += hisilicon/ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o diff --git a/arch/arm64/kvm/hisilicon/Kconfig b/arch/arm64/kvm/hisilicon/Kconfig new file mode 100644 index 000000000000..6536f897a32e --- /dev/null +++ b/arch/arm64/kvm/hisilicon/Kconfig @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +config KVM_HISI_VIRT + bool "HiSilicon SoC specific virtualization features" + depends on ARCH_HISI + help + Support for HiSilicon SoC specific virtualization features. + On non-HiSilicon platforms, say N here. diff --git a/arch/arm64/kvm/hisilicon/Makefile b/arch/arm64/kvm/hisilicon/Makefile new file mode 100644 index 000000000000..849f99d1526d --- /dev/null +++ b/arch/arm64/kvm/hisilicon/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_KVM_HISI_VIRT) += hisi_virt.o diff --git a/arch/arm64/kvm/hisi_cpu_model.c b/arch/arm64/kvm/hisilicon/hisi_virt.c similarity index 44% rename from arch/arm64/kvm/hisi_cpu_model.c rename to arch/arm64/kvm/hisilicon/hisi_virt.c index 52eecf1ba1cf..9587f9508a79 100644 --- a/arch/arm64/kvm/hisi_cpu_model.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -1,26 +1,34 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright(c) 2019 Huawei Technologies Co., Ltd + * Copyright(c) 2022 Huawei Technologies Co., Ltd */ #include #include #include #include +#include "hisi_virt.h" -#ifdef CONFIG_ACPI +static enum hisi_cpu_type cpu_type = UNKNOWN_HI_TYPE; + +static const char * const hisi_cpu_type_str[] = { + "Hisi1612", + "Hisi1616", + "Hisi1620", + "Unknown" +}; /* ACPI Hisi oem table id str */ -const char *oem_str[] = { +static const char * const oem_str[] = { "HIP06", /* Hisi 1612 */ "HIP07", /* Hisi 1616 */ "HIP08" /* Hisi 1620 */ }; /* - * Get Hisi oem table id. + * Probe Hisi CPU type form ACPI. */ -static void acpi_get_hw_cpu_type(void) +static enum hisi_cpu_type acpi_get_hisi_cpu_type(void) { struct acpi_table_header *table; acpi_status status; @@ -29,89 +37,88 @@ static void acpi_get_hw_cpu_type(void) /* Get oem table id from ACPI table header */ status = acpi_get_table(ACPI_SIG_DSDT, 0, &table); if (ACPI_FAILURE(status)) { - pr_err("Failed to get ACPI table: %s\n", - acpi_format_exception(status)); - return; + pr_warn("Failed to get ACPI table: %s\n", + acpi_format_exception(status)); + return UNKNOWN_HI_TYPE; } for (i = 0; i < str_size; ++i) { - if (!strncmp(oem_str[i], table->oem_table_id, 5)) { - hi_cpu_type = i; - return; - } + if (!strncmp(oem_str[i], table->oem_table_id, 5)) + return i; } -} -#else -static void acpi_get_hw_cpu_type(void) {} -#endif + return UNKNOWN_HI_TYPE; +} /* of Hisi cpu model str */ -const char *of_model_str[] = { +static const char * const of_model_str[] = { "Hi1612", "Hi1616" }; -static void of_get_hw_cpu_type(void) +/* + * Probe Hisi CPU type from DT. + */ +static enum hisi_cpu_type of_get_hisi_cpu_type(void) { - const char *cpu_type; + const char *model; int ret, i, str_size = ARRAY_SIZE(of_model_str); - ret = of_property_read_string(of_root, "model", &cpu_type); + /* + * Note: There may not be a "model" node in FDT, which + * is provided by the vendor. In this case, we are not + * able to get CPU type information through this way. + */ + ret = of_property_read_string(of_root, "model", &model); if (ret < 0) { - pr_err("Failed to get Hisi cpu model by OF.\n"); - return; + pr_warn("Failed to get Hisi cpu model by OF.\n"); + return UNKNOWN_HI_TYPE; } for (i = 0; i < str_size; ++i) { - if (strstr(cpu_type, of_model_str[i])) { - hi_cpu_type = i; - return; - } + if (strstr(model, of_model_str[i])) + return i; } + + return UNKNOWN_HI_TYPE; } void probe_hisi_cpu_type(void) { if (!acpi_disabled) - acpi_get_hw_cpu_type(); + cpu_type = acpi_get_hisi_cpu_type(); else - of_get_hw_cpu_type(); + cpu_type = of_get_hisi_cpu_type(); - if (hi_cpu_type == UNKNOWN_HI_TYPE) - pr_warn("UNKNOWN Hisi cpu type.\n"); + kvm_info("detected: Hisi CPU type '%s'\n", hisi_cpu_type_str[cpu_type]); } -#define NCSNP_MMIO_BASE 0x20107E238 - /* * We have the fantastic HHA ncsnp capability on Kunpeng 920, * with which hypervisor doesn't need to perform a lot of cache * maintenance like before (in case the guest has non-cacheable * Stage-1 mappings). */ -void probe_hisi_ncsnp_support(void) +#define NCSNP_MMIO_BASE 0x20107E238 +bool hisi_ncsnp_supported(void) { void __iomem *base; unsigned int high; + bool supported = false; - kvm_ncsnp_support = false; - - if (hi_cpu_type != HI_1620) - goto out; + if (cpu_type != HI_1620) + return supported; base = ioremap(NCSNP_MMIO_BASE, 4); if (!base) { - pr_err("Unable to map MMIO region when probing ncsnp!\n"); - goto out; + pr_warn("Unable to map MMIO region when probing ncsnp!\n"); + return supported; } high = readl_relaxed(base) >> 28; iounmap(base); if (high != 0x1) - kvm_ncsnp_support = true; + supported = true; -out: - kvm_info("Hisi ncsnp: %s\n", kvm_ncsnp_support ? "enabled" : - "disabled"); + return supported; } diff --git a/arch/arm64/include/asm/hisi_cpu_model.h b/arch/arm64/kvm/hisilicon/hisi_virt.h similarity index 39% rename from arch/arm64/include/asm/hisi_cpu_model.h rename to arch/arm64/kvm/hisilicon/hisi_virt.h index 67008d17416e..c4b5acc93fec 100644 --- a/arch/arm64/include/asm/hisi_cpu_model.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -1,10 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* - * Copyright(c) 2019 Huawei Technologies Co., Ltd + * Copyright(c) 2022 Huawei Technologies Co., Ltd */ -#ifndef __HISI_CPU_MODEL_H__ -#define __HISI_CPU_MODEL_H__ +#ifndef __HISI_VIRT_H__ +#define __HISI_VIRT_H__ enum hisi_cpu_type { HI_1612, @@ -13,9 +13,7 @@ enum hisi_cpu_type { UNKNOWN_HI_TYPE }; -extern enum hisi_cpu_type hi_cpu_type; -extern bool kvm_ncsnp_support; - void probe_hisi_cpu_type(void); -void probe_hisi_ncsnp_support(void); -#endif /* __HISI_CPU_MODEL_H__ */ +bool hisi_ncsnp_supported(void); + +#endif /* __HISI_VIRT_H__ */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index fcbf0eaa35aa..cae8fb6d3de9 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -44,6 +44,10 @@ __asm__(".arch_extension virt"); #endif +#ifdef CONFIG_KVM_HISI_VIRT +#include "hisilicon/hisi_virt.h" +#endif + DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data); static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); @@ -57,8 +61,7 @@ static DEFINE_SPINLOCK(kvm_vmid_lock); static bool vgic_present; -/* Hisi cpu type enum */ -enum hisi_cpu_type hi_cpu_type = UNKNOWN_HI_TYPE; +/* Capability of non-cacheable snooping */ bool kvm_ncsnp_support; static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); @@ -1773,9 +1776,11 @@ int kvm_arch_init(void *opaque) return -ENODEV; } - /* Probe the Hisi CPU type */ +#ifdef CONFIG_KVM_HISI_VIRT probe_hisi_cpu_type(); - probe_hisi_ncsnp_support(); + kvm_ncsnp_support = hisi_ncsnp_supported(); +#endif + kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); in_hyp_mode = is_kernel_in_hyp_mode(); -- Gitee From fe332dec8521368ffae947edf49335c6c23a0c29 Mon Sep 17 00:00:00 2001 From: lishusen Date: Sat, 7 Jun 2025 16:30:20 +0800 Subject: [PATCH 04/13] KVM: arm64: Support a new HiSi CPU type Add a new entry ("HIP09") in oem_str[] to support detection of the new HiSi CPU type. Signed-off-by: Quan Zhou Signed-off-by: lishusen Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hisilicon/hisi_virt.c | 4 +++- arch/arm64/kvm/hisilicon/hisi_virt.h | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 9587f9508a79..90c363ed642e 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -15,6 +15,7 @@ static const char * const hisi_cpu_type_str[] = { "Hisi1612", "Hisi1616", "Hisi1620", + "HIP09", "Unknown" }; @@ -22,7 +23,8 @@ static const char * const hisi_cpu_type_str[] = { static const char * const oem_str[] = { "HIP06", /* Hisi 1612 */ "HIP07", /* Hisi 1616 */ - "HIP08" /* Hisi 1620 */ + "HIP08", /* Hisi 1620 */ + "HIP09" /* HIP09 */ }; /* diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index c4b5acc93fec..9231b1dca7f2 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -10,6 +10,7 @@ enum hisi_cpu_type { HI_1612, HI_1616, HI_1620, + HI_IP09, UNKNOWN_HI_TYPE }; -- Gitee From 16c4a71b4a989e57534a4e81984a146d4ac0df4d Mon Sep 17 00:00:00 2001 From: lishusen Date: Sat, 7 Jun 2025 17:12:13 +0800 Subject: [PATCH 05/13] KVM: arm64: Probe and configure DVMBM capability on HiSi CPUs DVMBM is an virtualization extension since HIP09, which allows TLBI executed at NS EL1 to be broadcast in a configurable range of physical CPUs (even with HCR_EL2.FB set). It will bring TLBI broadcast optimization. Introduce the method to detect and enable this feature. Also add a kernel command parameter "kvm-arm.dvmbm_enabled" (=0 on default) so that users can {en,dis}able DVMBM on need. The parameter description is added under Documentation/. Signed-off-by: Quan Zhou Signed-off-by: lishusen Signed-off-by: Xie Xiaodong <624338359@qq.com> --- .../admin-guide/kernel-parameters.txt | 4 ++ arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/hisilicon/hisi_virt.c | 49 +++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 6 +++ virt/kvm/arm/arm.c | 6 +++ 5 files changed, 66 insertions(+) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 810b9f5cbf9b..828c42bd3de6 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2172,6 +2172,10 @@ [KVM,ARM] Allow use of GICv4 for direct injection of LPIs. + kvm-arm.dvmbm_enabled= + [KVM,ARM] Allow use of HiSilicon DVMBM capability. + Default: 0 + kvm-intel.ept= [KVM,Intel] Disable extended page tables (virtualized MMU) support on capable Intel chips. Default is 1 (enabled) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 73c260b417c6..4a17c971f4f0 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -693,5 +693,6 @@ static inline enum mitigation_state kvm_arm_get_spectre_bhb_state(void) } extern bool kvm_ncsnp_support; +extern bool kvm_dvmbm_support; #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 90c363ed642e..b81488cd663b 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -11,6 +11,8 @@ static enum hisi_cpu_type cpu_type = UNKNOWN_HI_TYPE; +static bool dvmbm_enabled; + static const char * const hisi_cpu_type_str[] = { "Hisi1612", "Hisi1616", @@ -124,3 +126,50 @@ bool hisi_ncsnp_supported(void) return supported; } + +static int __init early_dvmbm_enable(char *buf) +{ + return strtobool(buf, &dvmbm_enabled); +} +early_param("kvm-arm.dvmbm_enabled", early_dvmbm_enable); + +static void hardware_enable_dvmbm(void *data) +{ + u64 val; + + val = read_sysreg_s(SYS_LSUDVM_CTRL_EL2); + val |= LSUDVM_CTLR_EL2_MASK; + write_sysreg_s(val, SYS_LSUDVM_CTRL_EL2); +} + +static void hardware_disable_dvmbm(void *data) +{ + u64 val; + + val = read_sysreg_s(SYS_LSUDVM_CTRL_EL2); + val &= ~LSUDVM_CTLR_EL2_MASK; + write_sysreg_s(val, SYS_LSUDVM_CTRL_EL2); +} + +bool hisi_dvmbm_supported(void) +{ + if (cpu_type != HI_IP09) + return false; + + /* Determine whether DVMBM is supported by the hardware */ + if (!(read_sysreg(aidr_el1) & AIDR_EL1_DVMBM_MASK)) + return false; + + /* User provided kernel command-line parameter */ + if (!dvmbm_enabled || !is_kernel_in_hyp_mode()) { + on_each_cpu(hardware_disable_dvmbm, NULL, 1); + return false; + } + + /* + * Enable TLBI Broadcast optimization by setting + * LSUDVM_CTRL_EL2's bit[0]. + */ + on_each_cpu(hardware_enable_dvmbm, NULL, 1); + return true; +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 9231b1dca7f2..f505d44e386f 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -14,7 +14,13 @@ enum hisi_cpu_type { UNKNOWN_HI_TYPE }; +/* HIP09 */ +#define AIDR_EL1_DVMBM_MASK GENMASK_ULL(13, 12) +#define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) +#define LSUDVM_CTLR_EL2_MASK BIT_ULL(0) + void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); +bool hisi_dvmbm_supported(void); #endif /* __HISI_VIRT_H__ */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index cae8fb6d3de9..c0ab4a66a023 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -49,6 +49,10 @@ __asm__(".arch_extension virt"); #endif DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data); + +/* Capability of DVMBM */ +bool kvm_dvmbm_support; + static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); /* Per-CPU variable containing the currently running vcpu. */ @@ -1779,8 +1783,10 @@ int kvm_arch_init(void *opaque) #ifdef CONFIG_KVM_HISI_VIRT probe_hisi_cpu_type(); kvm_ncsnp_support = hisi_ncsnp_supported(); + kvm_dvmbm_support = hisi_dvmbm_supported(); #endif kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); + kvm_info("KVM dvmbm %s\n", kvm_dvmbm_support ? "enabled" : "disabled"); in_hyp_mode = is_kernel_in_hyp_mode(); -- Gitee From 3bb8fe0e62ccf3eb8354527abfc11014273e3130 Mon Sep 17 00:00:00 2001 From: lishusen Date: Sat, 7 Jun 2025 17:29:49 +0800 Subject: [PATCH 06/13] KVM: arm64: Add kvm_vcpu_arch::sched_cpus and pre_sched_cpus We already have cpus_ptr in current thread struct now, through which we can know the pcpu range the thread is allowed to run on. So in kvm_arch_vcpu_{load,put}, we can also know the pcpu range the vcpu thread is allowed to be scheduled on, and that is the range we want to configure for TLBI broadcast. Introduce two variables sched_cpus and pre_sched_cpus in struct kvm_vcpu_arch. @sched_cpus always comes from current->cpus_ptr and @pre_sched_cpus always comes from @sched_cpus. Signed-off-by: Quan Zhou Signed-off-by: lishusen Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/asm/kvm_host.h | 8 ++++++ arch/arm64/kvm/hisilicon/hisi_virt.c | 37 ++++++++++++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 25 +++++++++++++++++++ virt/kvm/arm/arm.c | 14 ++++++++--- 4 files changed, 80 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 4a17c971f4f0..cab28c1db15c 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -341,6 +341,14 @@ struct kvm_vcpu_arch { /* True when deferrable sysregs are loaded on the physical CPU, * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ bool sysregs_loaded_on_cpu; + +#ifdef CONFIG_KVM_HISI_VIRT + /* pCPUs this vCPU can be scheduled on. Pure copy of + * current->cpus_ptr + */ + cpumask_var_t sched_cpus; + cpumask_var_t pre_sched_cpus; +#endif }; /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index b81488cd663b..ac12fc54a6b4 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -173,3 +173,40 @@ bool hisi_dvmbm_supported(void) on_each_cpu(hardware_enable_dvmbm, NULL, 1); return true; } + +int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu) +{ + if (!kvm_dvmbm_support) + return 0; + + if (!zalloc_cpumask_var(&vcpu->arch.sched_cpus, GFP_ATOMIC) || + !zalloc_cpumask_var(&vcpu->arch.pre_sched_cpus, GFP_ATOMIC)) + return -ENOMEM; + + return 0; +} + +void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + if (!kvm_dvmbm_support) + return; + + free_cpumask_var(vcpu->arch.sched_cpus); + free_cpumask_var(vcpu->arch.pre_sched_cpus); +} + +void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) +{ + if (!kvm_dvmbm_support) + return; + + cpumask_copy(vcpu->arch.sched_cpus, current->cpus_ptr); +} + +void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) +{ + if (!kvm_dvmbm_support) + return; + + cpumask_copy(vcpu->arch.pre_sched_cpus, vcpu->arch.sched_cpus); +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index f505d44e386f..8d8ef6aa165a 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -6,6 +6,7 @@ #ifndef __HISI_VIRT_H__ #define __HISI_VIRT_H__ +#ifdef CONFIG_KVM_HISI_VIRT enum hisi_cpu_type { HI_1612, HI_1616, @@ -23,4 +24,28 @@ void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); +int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu); +void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu); +void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu); +void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu); +#else +static inline void probe_hisi_cpu_type(void) {} +static inline bool hisi_ncsnp_supported(void) +{ + return false; +} +static inline bool hisi_dvmbm_supported(void) +{ + return false; +} + +static inline int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu) +{ + return 0; +} +static inline void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu) {} +static inline void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) {} +static inline void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) {} +#endif /* CONFIG_KVM_HISI_VIRT */ + #endif /* __HISI_VIRT_H__ */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index c0ab4a66a023..8e2427ac39a7 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -44,9 +44,7 @@ __asm__(".arch_extension virt"); #endif -#ifdef CONFIG_KVM_HISI_VIRT #include "hisilicon/hisi_virt.h" -#endif DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data); @@ -294,6 +292,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) if (err) goto free_vcpu; + err = kvm_sched_affinity_vcpu_init(vcpu); + if (err) + goto free_vcpu; + err = create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP); if (err) goto vcpu_uninit; @@ -326,6 +328,8 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { kvm_arch_vcpu_free(vcpu); + + kvm_sched_affinity_vcpu_destroy(vcpu); } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) @@ -433,6 +437,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu_ptrauth_disable(vcpu); } + + kvm_tlbi_dvmbm_vcpu_load(vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) @@ -446,6 +452,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) vcpu->cpu = -1; kvm_arm_set_running_vcpu(NULL); + + kvm_tlbi_dvmbm_vcpu_put(vcpu); } static void vcpu_power_off(struct kvm_vcpu *vcpu) @@ -1780,11 +1788,9 @@ int kvm_arch_init(void *opaque) return -ENODEV; } -#ifdef CONFIG_KVM_HISI_VIRT probe_hisi_cpu_type(); kvm_ncsnp_support = hisi_ncsnp_supported(); kvm_dvmbm_support = hisi_dvmbm_supported(); -#endif kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); kvm_info("KVM dvmbm %s\n", kvm_dvmbm_support ? "enabled" : "disabled"); -- Gitee From c1d57a3781faafb250582fc184c82c0133dceaba Mon Sep 17 00:00:00 2001 From: lishusen Date: Sat, 7 Jun 2025 17:49:31 +0800 Subject: [PATCH 07/13] KVM: arm64: Add kvm_arch::sched_cpus and sched_lock Introduce sched_cpus and sched_lock in struct kvm_arch. sched_cpus will store the union of all vcpus' cpus_ptr in a VM and will be used for the TLBI broadcast range for this VM. sched_lock ensures a exclusive manipulation of sched_cpus. In vcpu_load, we should decide whether to perform the subsequent update operation by checking whether sched_cpus has changed. Signed-off-by: Quan Zhou Signed-off-by: lishusen Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/asm/kvm_host.h | 5 +++ arch/arm64/kvm/hisilicon/hisi_virt.c | 52 ++++++++++++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 7 ++++ virt/kvm/arm/arm.c | 5 +++ 4 files changed, 69 insertions(+) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index cab28c1db15c..1cd8575b365f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -84,6 +84,11 @@ struct kvm_arch { /* Mandated version of PSCI */ u32 psci_version; + +#ifdef CONFIG_KVM_HISI_VIRT + spinlock_t sched_lock; + cpumask_var_t sched_cpus; /* Union of all vcpu's cpus_ptr */ +#endif }; #define KVM_NR_MEM_OBJS 40 diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index ac12fc54a6b4..fe0515c20989 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -197,10 +197,42 @@ void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) { + struct kvm *kvm = vcpu->kvm; + struct kvm_vcpu *tmp; + cpumask_t mask; + unsigned long i; + + /* Don't bother on old hardware */ if (!kvm_dvmbm_support) return; cpumask_copy(vcpu->arch.sched_cpus, current->cpus_ptr); + + if (likely(cpumask_equal(vcpu->arch.sched_cpus, + vcpu->arch.pre_sched_cpus))) + return; + + /* Re-calculate sched_cpus for this VM */ + spin_lock(&kvm->arch.sched_lock); + + cpumask_clear(&mask); + kvm_for_each_vcpu(i, tmp, kvm) { + /* + * We may get the stale sched_cpus if another thread + * is concurrently changing its affinity. It'll + * eventually go through vcpu_load() and we rely on + * the last sched_lock holder to make things correct. + */ + cpumask_or(&mask, &mask, tmp->arch.sched_cpus); + } + + if (cpumask_equal(kvm->arch.sched_cpus, &mask)) + goto out_unlock; + + cpumask_copy(kvm->arch.sched_cpus, &mask); + +out_unlock: + spin_unlock(&kvm->arch.sched_lock); } void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) @@ -210,3 +242,23 @@ void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) cpumask_copy(vcpu->arch.pre_sched_cpus, vcpu->arch.sched_cpus); } + +int kvm_sched_affinity_vm_init(struct kvm *kvm) +{ + if (!kvm_dvmbm_support) + return 0; + + spin_lock_init(&kvm->arch.sched_lock); + if (!zalloc_cpumask_var(&kvm->arch.sched_cpus, GFP_ATOMIC)) + return -ENOMEM; + + return 0; +} + +void kvm_sched_affinity_vm_destroy(struct kvm *kvm) +{ + if (!kvm_dvmbm_support) + return; + + free_cpumask_var(kvm->arch.sched_cpus); +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 8d8ef6aa165a..3de270ad2da5 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -26,6 +26,8 @@ bool hisi_dvmbm_supported(void); int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu); void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu); +int kvm_sched_affinity_vm_init(struct kvm *kvm); +void kvm_sched_affinity_vm_destroy(struct kvm *kvm); void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu); void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu); #else @@ -44,6 +46,11 @@ static inline int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu) return 0; } static inline void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu) {} +static inline int kvm_sched_affinity_vm_init(struct kvm *kvm) +{ + return 0; +} +static inline void kvm_sched_affinity_vm_destroy(struct kvm *kvm) {} static inline void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) {} static inline void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) {} #endif /* CONFIG_KVM_HISI_VIRT */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 8e2427ac39a7..fe1cef08098f 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -116,6 +116,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int ret, cpu; + ret = kvm_sched_affinity_vm_init(kvm); + if (ret) + return ret; + ret = kvm_arm_setup_stage2(kvm, type); if (ret) return ret; @@ -172,6 +176,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) { int i; + kvm_sched_affinity_vm_destroy(kvm); kvm_vgic_destroy(kvm); free_percpu(kvm->arch.last_vcpu_ran); -- Gitee From 530f86a08cab40aa11e757dd3204f3d58a848c91 Mon Sep 17 00:00:00 2001 From: lishusen Date: Sat, 7 Jun 2025 18:08:20 +0800 Subject: [PATCH 08/13] KVM: arm64: Implement the capability of DVMBM Implement the capability of DVMBM. Before each vcpu is loaded, we re-calculate the VM-wide sched_cpus, and if it's changed we will kick all other vcpus out to reload the latest LSUDVMBM value to the register, and a new request KVM_REQ_RELOAD_TLBI_DVMBM is added to implement this. Otherwise if the sched_cpus is not changed by this single vcpu, in order to ensure the correctness of the contents in the register, we reload the LSUDVMBM value to the register and nothing else will be done. Signed-off-by: Quan Zhou Signed-off-by: lishusen Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/asm/kvm_host.h | 2 + arch/arm64/kvm/hisilicon/hisi_virt.c | 114 ++++++++++++++++++++++++++- arch/arm64/kvm/hisilicon/hisi_virt.h | 29 +++++++ virt/kvm/arm/arm.c | 3 + 4 files changed, 147 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 1cd8575b365f..cc892cfeec5d 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -45,6 +45,7 @@ #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4) +#define KVM_REQ_RELOAD_TLBI_DVMBM KVM_ARCH_REQ(8) DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); @@ -88,6 +89,7 @@ struct kvm_arch { #ifdef CONFIG_KVM_HISI_VIRT spinlock_t sched_lock; cpumask_var_t sched_cpus; /* Union of all vcpu's cpus_ptr */ + u64 tlbi_dvmbm; #endif }; diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index fe0515c20989..662ddf5b124b 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -195,6 +195,96 @@ void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu) free_cpumask_var(vcpu->arch.pre_sched_cpus); } +static void __kvm_write_lsudvmbm(struct kvm *kvm) +{ + write_sysreg_s(kvm->arch.tlbi_dvmbm, SYS_LSUDVMBM_EL2); +} + +static void kvm_write_lsudvmbm(struct kvm *kvm) +{ + spin_lock(&kvm->arch.sched_lock); + __kvm_write_lsudvmbm(kvm); + spin_unlock(&kvm->arch.sched_lock); +} + +static int kvm_dvmbm_get_dies_info(struct kvm *kvm, u64 *vm_aff3s, int size) +{ + int num = 0, cpu; + + for_each_cpu(cpu, kvm->arch.sched_cpus) { + bool found = false; + u64 aff3; + int i; + + if (num >= size) + break; + + aff3 = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 3); + for (i = 0; i < num; i++) { + if (vm_aff3s[i] == aff3) { + found = true; + break; + } + } + + if (!found) + vm_aff3s[num++] = aff3; + } + + return num; +} + +static void kvm_update_vm_lsudvmbm(struct kvm *kvm) +{ + u64 mpidr, aff3, aff2, aff1; + u64 vm_aff3s[DVMBM_MAX_DIES]; + u64 val; + int cpu, nr_dies; + + nr_dies = kvm_dvmbm_get_dies_info(kvm, vm_aff3s, DVMBM_MAX_DIES); + if (nr_dies > 2) { + val = DVMBM_RANGE_ALL_DIES << DVMBM_RANGE_SHIFT; + goto out_update; + } + + if (nr_dies == 1) { + val = DVMBM_RANGE_ONE_DIE << DVMBM_RANGE_SHIFT | + vm_aff3s[0] << DVMBM_DIE1_SHIFT; + + /* fulfill bits [52:0] */ + for_each_cpu(cpu, kvm->arch.sched_cpus) { + mpidr = cpu_logical_map(cpu); + aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1); + + val |= 1ULL << (aff2 * 4 + aff1); + } + + goto out_update; + } + + /* nr_dies == 2 */ + val = DVMBM_RANGE_TWO_DIES << DVMBM_RANGE_SHIFT | + DVMBM_GRAN_CLUSTER << DVMBM_GRAN_SHIFT | + vm_aff3s[0] << DVMBM_DIE1_SHIFT | + vm_aff3s[1] << DVMBM_DIE2_SHIFT; + + /* and fulfill bits [43:0] */ + for_each_cpu(cpu, kvm->arch.sched_cpus) { + mpidr = cpu_logical_map(cpu); + aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); + aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + + if (aff3 == vm_aff3s[0]) + val |= 1ULL << (aff2 + DVMBM_DIE1_CLUSTER_SHIFT); + else + val |= 1ULL << (aff2 + DVMBM_DIE2_CLUSTER_SHIFT); + } + +out_update: + kvm->arch.tlbi_dvmbm = val; +} + void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; @@ -209,8 +299,10 @@ void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) cpumask_copy(vcpu->arch.sched_cpus, current->cpus_ptr); if (likely(cpumask_equal(vcpu->arch.sched_cpus, - vcpu->arch.pre_sched_cpus))) + vcpu->arch.pre_sched_cpus))) { + kvm_write_lsudvmbm(kvm); return; + } /* Re-calculate sched_cpus for this VM */ spin_lock(&kvm->arch.sched_lock); @@ -231,7 +323,17 @@ void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) cpumask_copy(kvm->arch.sched_cpus, &mask); + kvm_flush_remote_tlbs(kvm); + + /* + * Re-calculate LSUDVMBM_EL2 for this VM and kick all vcpus + * out to reload the LSUDVMBM configuration. + */ + kvm_update_vm_lsudvmbm(kvm); + kvm_make_all_cpus_request(kvm, KVM_REQ_RELOAD_TLBI_DVMBM); + out_unlock: + __kvm_write_lsudvmbm(kvm); spin_unlock(&kvm->arch.sched_lock); } @@ -262,3 +364,13 @@ void kvm_sched_affinity_vm_destroy(struct kvm *kvm) free_cpumask_var(kvm->arch.sched_cpus); } + +void kvm_hisi_reload_lsudvmbm(struct kvm *kvm) +{ + if (WARN_ON_ONCE(!kvm_dvmbm_support)) + return; + + preempt_disable(); + kvm_write_lsudvmbm(kvm); + preempt_enable(); +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 3de270ad2da5..4e162b7f6688 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -20,6 +20,33 @@ enum hisi_cpu_type { #define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) #define LSUDVM_CTLR_EL2_MASK BIT_ULL(0) +/* + * MPIDR_EL1 layout on HIP09 + * + * Aff3[7:3] - socket ID [0-15] + * Aff3[2:0] - die ID [1,3] + * Aff2 - cluster ID [0-9] + * Aff1 - core ID [0-3] + * Aff0 - thread ID [0,1] + */ + +#define SYS_LSUDVMBM_EL2 sys_reg(3, 4, 15, 7, 5) +#define DVMBM_RANGE_SHIFT 62 +#define DVMBM_RANGE_ONE_DIE 0ULL +#define DVMBM_RANGE_TWO_DIES 1ULL +#define DVMBM_RANGE_ALL_DIES 3ULL + +#define DVMBM_GRAN_SHIFT 61 +#define DVMBM_GRAN_CLUSTER 0ULL +#define DVMBM_GRAN_DIE 1ULL + +#define DVMBM_DIE1_SHIFT 53 +#define DVMBM_DIE2_SHIFT 45 +#define DVMBM_DIE1_CLUSTER_SHIFT 22 +#define DVMBM_DIE2_CLUSTER_SHIFT 0 + +#define DVMBM_MAX_DIES 32 + void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); @@ -30,6 +57,7 @@ int kvm_sched_affinity_vm_init(struct kvm *kvm); void kvm_sched_affinity_vm_destroy(struct kvm *kvm); void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu); void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu); +void kvm_hisi_reload_lsudvmbm(struct kvm *kvm); #else static inline void probe_hisi_cpu_type(void) {} static inline bool hisi_ncsnp_supported(void) @@ -53,6 +81,7 @@ static inline int kvm_sched_affinity_vm_init(struct kvm *kvm) static inline void kvm_sched_affinity_vm_destroy(struct kvm *kvm) {} static inline void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) {} static inline void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) {} +static inline void kvm_hisi_reload_lsudvmbm(struct kvm *kvm) {} #endif /* CONFIG_KVM_HISI_VIRT */ #endif /* __HISI_VIRT_H__ */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index fe1cef08098f..c4aad0845dc6 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -720,6 +720,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu) vgic_v4_load(vcpu); preempt_enable(); } + + if (kvm_check_request(KVM_REQ_RELOAD_TLBI_DVMBM, vcpu)) + kvm_hisi_reload_lsudvmbm(vcpu->kvm); } } -- Gitee From a3a23e32bdcf8808621f7316127740096a82e5d3 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Thu, 12 Jun 2025 10:30:44 +0800 Subject: [PATCH 09/13] KVM: arm64: Translate logic cluster id to physical cluster id when updating lsudvmbm For dvmbm feature, MN requires physical cluster id while it is filled with logic cluster id right now. In some situations which physical cluster id is not equal to logic cluster id such as in PG boards, it will cause issues when enabling dvmbm. To avoid the issue, translate logic cluster id to physical cluster id when updating lsudvmbm. Signed-off-by: Xiang Chen Signed-off-by: Yanan Wang Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hisilicon/hisi_virt.c | 153 +++++++++++++++++++++++++-- arch/arm64/kvm/hisilicon/hisi_virt.h | 17 +++ virt/kvm/arm/arm.c | 3 + 3 files changed, 166 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 662ddf5b124b..a925c183aed6 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -234,12 +234,99 @@ static int kvm_dvmbm_get_dies_info(struct kvm *kvm, u64 *vm_aff3s, int size) return num; } +static u32 socket_num, die_num; + +static u32 kvm_get_socket_num(void) +{ + int socket_id[MAX_PG_CFG_SOCKETS], cpu; + u32 num = 0; + + for_each_cpu(cpu, cpu_possible_mask) { + bool found = false; + u64 aff3, socket; + int i; + + aff3 = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 3); + /* aff3[7:3]: socket ID */ + socket = (aff3 & SOCKET_ID_MASK) >> SOCKET_ID_SHIFT; + for (i = 0; i < num; i++) { + if (socket_id[i] == socket) { + found = true; + break; + } + } + if (!found) + socket_id[num++] = socket; + } + return num; +} + +static u32 kvm_get_die_num(void) +{ + int die_id[MAX_DIES_PER_SOCKET], cpu; + u32 num = 0; + + for_each_cpu(cpu, cpu_possible_mask) { + bool found = false; + u64 aff3, die; + int i; + + aff3 = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 3); + /* aff3[2:0]: die ID */ + die = aff3 & DIE_ID_MASK; + for (i = 0; i < num; i++) { + if (die_id[i] == die) { + found = true; + break; + } + } + if (!found) + die_id[num++] = die; + } + return num; +} + +static u32 g_die_pg[MAX_PG_CFG_SOCKETS * MAX_DIES_PER_SOCKET] + [MAX_CLUSTERS_PER_DIE]; + +static void kvm_get_die_pg(unsigned long pg_cfg, int socket_id, int die_id) +{ + u32 pg_num = 0, i, j; + u32 pg_flag[MAX_CLUSTERS_PER_DIE]; + u32 die_tmp = socket_id * die_num + die_id; + + for (i = 0; i < MAX_CLUSTERS_PER_DIE; i++) { + if (test_bit(i, &pg_cfg)) + pg_num++; + g_die_pg[die_tmp][i] = i; + pg_flag[i] = 0; + } + + for (i = 0; i < MAX_CLUSTERS_PER_DIE - pg_num; i++) { + if (test_bit(i, &pg_cfg)) { + for (j = 0; j < pg_num; j++) { + u32 cluster_bak = MAX_CLUSTERS_PER_DIE + - pg_num + j; + + if (!test_bit(cluster_bak, &pg_cfg) && + !pg_flag[cluster_bak]) { + pg_flag[cluster_bak] = 1; + g_die_pg[die_tmp][i] = cluster_bak; + g_die_pg[die_tmp][cluster_bak] = i; + break; + } + } + } + } +} + static void kvm_update_vm_lsudvmbm(struct kvm *kvm) { - u64 mpidr, aff3, aff2, aff1; + u64 mpidr, aff3, aff2, aff1, phy_aff2; u64 vm_aff3s[DVMBM_MAX_DIES]; u64 val; int cpu, nr_dies; + u32 socket_id, die_id; nr_dies = kvm_dvmbm_get_dies_info(kvm, vm_aff3s, DVMBM_MAX_DIES); if (nr_dies > 2) { @@ -254,10 +341,18 @@ static void kvm_update_vm_lsudvmbm(struct kvm *kvm) /* fulfill bits [52:0] */ for_each_cpu(cpu, kvm->arch.sched_cpus) { mpidr = cpu_logical_map(cpu); + aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1); - - val |= 1ULL << (aff2 * 4 + aff1); + socket_id = (aff3 & SOCKET_ID_MASK) >> SOCKET_ID_SHIFT; + die_id = (aff3 & DIE_ID_MASK) >> DIE_ID_SHIFT; + if (die_id == TOTEM_B_ID) + die_id = 0; + else + die_id = 1; + + phy_aff2 = g_die_pg[socket_id * die_num + die_id][aff2]; + val |= 1ULL << (phy_aff2 * 4 + aff1); } goto out_update; @@ -274,11 +369,20 @@ static void kvm_update_vm_lsudvmbm(struct kvm *kvm) mpidr = cpu_logical_map(cpu); aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); - - if (aff3 == vm_aff3s[0]) - val |= 1ULL << (aff2 + DVMBM_DIE1_CLUSTER_SHIFT); + socket_id = (aff3 & SOCKET_ID_MASK) >> SOCKET_ID_SHIFT; + die_id = (aff3 & DIE_ID_MASK) >> DIE_ID_SHIFT; + if (die_id == TOTEM_B_ID) + die_id = 0; else - val |= 1ULL << (aff2 + DVMBM_DIE2_CLUSTER_SHIFT); + die_id = 1; + + if (aff3 == vm_aff3s[0]) { + phy_aff2 = g_die_pg[socket_id * die_num + die_id][aff2]; + val |= 1ULL << (phy_aff2 + DVMBM_DIE1_CLUSTER_SHIFT); + } else { + phy_aff2 = g_die_pg[socket_id * die_num + die_id][aff2]; + val |= 1ULL << (phy_aff2 + DVMBM_DIE2_CLUSTER_SHIFT); + } } out_update: @@ -345,6 +449,41 @@ void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) cpumask_copy(vcpu->arch.pre_sched_cpus, vcpu->arch.sched_cpus); } +void kvm_get_pg_cfg(void) +{ + void __iomem *mn_base; + u32 i, j; + u32 pg_cfgs[MAX_PG_CFG_SOCKETS * MAX_DIES_PER_SOCKET]; + u64 mn_phy_base; + u32 val; + + socket_num = kvm_get_socket_num(); + die_num = kvm_get_die_num(); + + for (i = 0; i < socket_num; i++) { + for (j = 0; j < die_num; j++) { + + /* + * totem B means the first CPU DIE within a SOCKET, + * totem A means the second one. + */ + mn_phy_base = (j == 0) ? TB_MN_BASE : TA_MN_BASE; + mn_phy_base += CHIP_ADDR_OFFSET(i); + mn_phy_base += MN_ECO0_OFFSET; + + mn_base = ioremap(mn_phy_base, 4); + if (!mn_base) { + kvm_info("MN base addr ioremap failed\n"); + return; + } + val = readl_relaxed(mn_base); + pg_cfgs[j + i * die_num] = val & 0xff; + kvm_get_die_pg(pg_cfgs[j + i * die_num], i, j); + iounmap(mn_base); + } + } +} + int kvm_sched_affinity_vm_init(struct kvm *kvm) { if (!kvm_dvmbm_support) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 4e162b7f6688..6d43ad13dff7 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -20,6 +20,21 @@ enum hisi_cpu_type { #define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) #define LSUDVM_CTLR_EL2_MASK BIT_ULL(0) +#define MAX_CLUSTERS_PER_DIE 8 +#define TB_MN_BASE 0x00C6067f0000 +#define TA_MN_BASE 0x0046067F0000 +#define CHIP_ADDR_OFFSET(_chip) (((((_chip) >> 3) & 0x1) * 0x80000000000) + \ + ((((_chip) >> 2) & 0x1) * (0x100000000000)) + \ + (((_chip) & 0x3) * 0x200000000000)) +#define MAX_PG_CFG_SOCKETS 4 +#define MAX_DIES_PER_SOCKET 2 +#define MN_ECO0_OFFSET 0xc00 +#define SOCKET_ID_MASK 0xf8 +#define SOCKET_ID_SHIFT 3 +#define DIE_ID_MASK 0x7 +#define DIE_ID_SHIFT 0 +#define TOTEM_B_ID 3 + /* * MPIDR_EL1 layout on HIP09 * @@ -50,6 +65,7 @@ enum hisi_cpu_type { void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); +void kvm_get_pg_cfg(void); int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu); void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu); @@ -68,6 +84,7 @@ static inline bool hisi_dvmbm_supported(void) { return false; } +static inline void kvm_get_pg_cfg(void) {} static inline int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu) { diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index c4aad0845dc6..379dad09eefa 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -1802,6 +1802,9 @@ int kvm_arch_init(void *opaque) kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); kvm_info("KVM dvmbm %s\n", kvm_dvmbm_support ? "enabled" : "disabled"); + if (kvm_dvmbm_support) + kvm_get_pg_cfg(); + in_hyp_mode = is_kernel_in_hyp_mode(); if (!in_hyp_mode && kvm_arch_requires_vhe()) { -- Gitee From 3068ae97858d47016f42be3ab3e5b1b1192dfb20 Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Tue, 8 Oct 2024 19:21:36 +0800 Subject: [PATCH 10/13] KVM: arm64: Add new HiSi CPU type for supporting DVMBM Add new HiSi CPU type for supporting DVMBM, and expand ACPI hisi oem table id string to 8 bit. Signed-off-by: Zhou Wang Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hisilicon/hisi_virt.c | 16 ++++++++++------ arch/arm64/kvm/hisilicon/hisi_virt.h | 6 ++++-- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index a925c183aed6..00ee4fd20996 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -18,15 +18,19 @@ static const char * const hisi_cpu_type_str[] = { "Hisi1616", "Hisi1620", "HIP09", + "HIP10", + "HIP10C", "Unknown" }; /* ACPI Hisi oem table id str */ static const char * const oem_str[] = { - "HIP06", /* Hisi 1612 */ - "HIP07", /* Hisi 1616 */ - "HIP08", /* Hisi 1620 */ - "HIP09" /* HIP09 */ + "HIP06 ", /* Hisi 1612 */ + "HIP07 ", /* Hisi 1616 */ + "HIP08 ", /* Hisi 1620 */ + "HIP09 ", /* HIP09 */ + "HIP10 ", /* HIP10 */ + "HIP10C " /* HIP10C */ }; /* @@ -47,7 +51,7 @@ static enum hisi_cpu_type acpi_get_hisi_cpu_type(void) } for (i = 0; i < str_size; ++i) { - if (!strncmp(oem_str[i], table->oem_table_id, 5)) + if (!strncmp(oem_str[i], table->oem_table_id, 8)) return i; } @@ -153,7 +157,7 @@ static void hardware_disable_dvmbm(void *data) bool hisi_dvmbm_supported(void) { - if (cpu_type != HI_IP09) + if (cpu_type != HI_IP10 && cpu_type != HI_IP10C) return false; /* Determine whether DVMBM is supported by the hardware */ diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 6d43ad13dff7..a718156a8f4e 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -12,10 +12,12 @@ enum hisi_cpu_type { HI_1616, HI_1620, HI_IP09, + HI_IP10, + HI_IP10C, UNKNOWN_HI_TYPE }; -/* HIP09 */ +/* HIP10 */ #define AIDR_EL1_DVMBM_MASK GENMASK_ULL(13, 12) #define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) #define LSUDVM_CTLR_EL2_MASK BIT_ULL(0) @@ -36,7 +38,7 @@ enum hisi_cpu_type { #define TOTEM_B_ID 3 /* - * MPIDR_EL1 layout on HIP09 + * MPIDR_EL1 layout on HIP10 * * Aff3[7:3] - socket ID [0-15] * Aff3[2:0] - die ID [1,3] -- Gitee From f7f4d6e51705b8345910c1e23f0228bd36e97fbb Mon Sep 17 00:00:00 2001 From: yangjinqian Date: Thu, 27 Mar 2025 15:55:53 +0800 Subject: [PATCH 11/13] kvm: hisi_virt: fix kernel panic when enable DVMBM in nVHE When the kernel is in nvhe mode and is in EL1, the original judgment logic causes the hardware_disable_dvmbm function to read the EL2 register in EL1, causing a panic during kernel startup. Signed-off-by: yangjinqian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hisilicon/hisi_virt.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 00ee4fd20996..c02963588343 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -160,12 +160,17 @@ bool hisi_dvmbm_supported(void) if (cpu_type != HI_IP10 && cpu_type != HI_IP10C) return false; + if (!is_kernel_in_hyp_mode()) { + kvm_info("Hisi dvmbm not supported by KVM nVHE mode\n"); + return false; + } + /* Determine whether DVMBM is supported by the hardware */ if (!(read_sysreg(aidr_el1) & AIDR_EL1_DVMBM_MASK)) return false; /* User provided kernel command-line parameter */ - if (!dvmbm_enabled || !is_kernel_in_hyp_mode()) { + if (!dvmbm_enabled) { on_each_cpu(hardware_disable_dvmbm, NULL, 1); return false; } -- Gitee From 6109ca1a7bda8345077518949d9f52d86eb52342 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Thu, 27 Mar 2025 15:55:54 +0800 Subject: [PATCH 12/13] kvm: hisi_virt: Update TLBI broadcast feature for hip12 Compared with hip09, there are some differences on TLBI broadcast feature for hip12 including: - No need to translate logical cluster id to physical cluster id; - The minimum granularity of TLBI broadcast is cluster; - Some fields of register LSUDVMBM changes; So update for corresponding changes. MPIDR_EL1 layout on HIP12: Aff3[3:2] - socket ID [0-3] Aff3[1:0] - die ID [0,1] Aff2 - cluster ID [0-5] Aff1 - core ID [0-15] Aff0 - thread ID [0,1] Signed-off-by: Xiang Chen Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hisilicon/hisi_virt.c | 59 +++++++++++++++++++++++++++- arch/arm64/kvm/hisilicon/hisi_virt.h | 8 ++++ 2 files changed, 65 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index c02963588343..23c97598513f 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -20,6 +20,7 @@ static const char * const hisi_cpu_type_str[] = { "HIP09", "HIP10", "HIP10C", + "HIP12", "Unknown" }; @@ -30,7 +31,8 @@ static const char * const oem_str[] = { "HIP08 ", /* Hisi 1620 */ "HIP09 ", /* HIP09 */ "HIP10 ", /* HIP10 */ - "HIP10C " /* HIP10C */ + "HIP10C ", /* HIP10C */ + "HIP12 " /* HIP12 */ }; /* @@ -398,6 +400,56 @@ static void kvm_update_vm_lsudvmbm(struct kvm *kvm) kvm->arch.tlbi_dvmbm = val; } +static void kvm_update_vm_lsudvmbm_hip12(struct kvm *kvm) +{ + u64 mpidr, aff3, aff2; + u64 vm_aff3s[DVMBM_MAX_DIES_HIP12]; + u64 val; + int cpu, nr_dies; + + nr_dies = kvm_dvmbm_get_dies_info(kvm, vm_aff3s, DVMBM_MAX_DIES_HIP12); + if (nr_dies > 2) { + val = DVMBM_RANGE_ALL_DIES << DVMBM_RANGE_SHIFT; + goto out_update; + } + + if (nr_dies == 1) { + val = DVMBM_RANGE_ONE_DIE << DVMBM_RANGE_SHIFT | + vm_aff3s[0] << DVMBM_DIE1_VDIE_SHIFT_HIP12; + + /* fulfill bits [11:6] */ + for_each_cpu(cpu, kvm->arch.sched_cpus) { + mpidr = cpu_logical_map(cpu); + aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + + val |= 1ULL << (aff2 + DVMBM_DIE1_CLUSTER_SHIFT_HIP12); + } + + goto out_update; + } + + /* nr_dies == 2 */ + val = DVMBM_RANGE_TWO_DIES << DVMBM_RANGE_SHIFT | + DVMBM_GRAN_CLUSTER << DVMBM_GRAN_SHIFT | + vm_aff3s[0] << DVMBM_DIE1_VDIE_SHIFT_HIP12 | + vm_aff3s[1] << DVMBM_DIE2_VDIE_SHIFT_HIP12; + + /* and fulfill bits [11:0] */ + for_each_cpu(cpu, kvm->arch.sched_cpus) { + mpidr = cpu_logical_map(cpu); + aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); + aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + + if (aff3 == vm_aff3s[0]) + val |= 1ULL << (aff2 + DVMBM_DIE1_CLUSTER_SHIFT_HIP12); + else + val |= 1ULL << (aff2 + DVMBM_DIE2_CLUSTER_SHIFT_HIP12); + } + +out_update: + kvm->arch.tlbi_dvmbm = val; +} + void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; @@ -442,7 +494,10 @@ void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) * Re-calculate LSUDVMBM_EL2 for this VM and kick all vcpus * out to reload the LSUDVMBM configuration. */ - kvm_update_vm_lsudvmbm(kvm); + if (cpu_type == HI_IP12) + kvm_update_vm_lsudvmbm_hip12(kvm); + else + kvm_update_vm_lsudvmbm(kvm); kvm_make_all_cpus_request(kvm, KVM_REQ_RELOAD_TLBI_DVMBM); out_unlock: diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index a718156a8f4e..ace161dc2935 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -14,6 +14,7 @@ enum hisi_cpu_type { HI_IP09, HI_IP10, HI_IP10C, + HI_IP12, UNKNOWN_HI_TYPE }; @@ -64,6 +65,13 @@ enum hisi_cpu_type { #define DVMBM_MAX_DIES 32 +/* HIP12 */ +#define DVMBM_DIE1_VDIE_SHIFT_HIP12 57 +#define DVMBM_DIE2_VDIE_SHIFT_HIP12 53 +#define DVMBM_DIE1_CLUSTER_SHIFT_HIP12 6 +#define DVMBM_DIE2_CLUSTER_SHIFT_HIP12 0 +#define DVMBM_MAX_DIES_HIP12 8 + void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); -- Gitee From 60b176f9f9ca51b03570c2b175930d530837a2f2 Mon Sep 17 00:00:00 2001 From: yangjinqian Date: Thu, 27 Mar 2025 15:55:55 +0800 Subject: [PATCH 13/13] KVM: arm64: Add new HiSi CPU type to support DVMBM Add new HiSi CPU type HIP12 for supporting DVMBM. Function kvm_get_pg_cfg() is used to get configuration for translating logic cluster id to physical cluster id which is not needed by hip12, so skip it for hip12. Signed-off-by: yangjinqian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hisilicon/hisi_virt.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 23c97598513f..64b8b9f70140 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -159,7 +159,8 @@ static void hardware_disable_dvmbm(void *data) bool hisi_dvmbm_supported(void) { - if (cpu_type != HI_IP10 && cpu_type != HI_IP10C) + if (cpu_type != HI_IP10 && cpu_type != HI_IP10C && + cpu_type != HI_IP12) return false; if (!is_kernel_in_hyp_mode()) { @@ -521,6 +522,9 @@ void kvm_get_pg_cfg(void) u64 mn_phy_base; u32 val; + if (cpu_type == HI_IP12) + return; + socket_num = kvm_get_socket_num(); die_num = kvm_get_die_num(); -- Gitee