From 1cfddf0e2ee4234daef7dcfbbbe5f513a1fcf1bf Mon Sep 17 00:00:00 2001 From: Ma Wupeng Date: Tue, 14 Jun 2022 17:21:52 +0800 Subject: [PATCH 1/4] efi: Make code to find mirrored memory ranges generic Commit b05b9f5f9dcf ("x86, mirror: x86 enabling - find mirrored memory ranges") introduce the efi_find_mirror() function on x86. In order to reuse the API we make it public. Arm64 can support mirrored memory too, so function efi_find_mirror() is added to efi_init() to this support for arm64. Since efi_init() is shared by ARM, arm64 and riscv, this patch will bring mirror memory support for these architectures, but this support is only tested in arm64. Signed-off-by: Ma Wupeng Link: https://lore.kernel.org/r/20220614092156.1972846-2-mawupeng1@huawei.com [ardb: fix subject to better reflect the payload] Acked-by: Mike Rapoport Signed-off-by: Ard Biesheuvel Signed-off-by: Shi Yang --- drivers/firmware/efi/efi-init.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c index 86da3c7a5036..1aaac8a486da 100644 --- a/drivers/firmware/efi/efi-init.c +++ b/drivers/firmware/efi/efi-init.c @@ -236,6 +236,7 @@ void __init efi_init(void) } reserve_regions(); + efi_find_mirror(); efi_esrt_init(); efi_mokvar_table_init(); -- Gitee From a812a1a53f3d15f88c73817598e19fa0dd464de1 Mon Sep 17 00:00:00 2001 From: Ma Wupeng Date: Tue, 14 Jun 2022 17:21:54 +0800 Subject: [PATCH 2/4] mm: Limit warning message in vmemmap_verify() to once For a system only have limited mirrored memory or some numa node without mirrored memory, the per node vmemmap page_structs prefer to allocate memory from mirrored region, which will lead to vmemmap_verify() in vmemmap_populate_basepages() report lots of warning message. This patch change the frequency of "potential offnode page_structs" warning messages to only once to avoid a very long print during bootup. Signed-off-by: Ma Wupeng Acked-by: David Hildenbrand Link: https://lore.kernel.org/r/20220614092156.1972846-4-mawupeng1@huawei.com Acked-by: Mike Rapoport Signed-off-by: Ard Biesheuvel Signed-off-by: Shi Yang --- mm/sparse-vmemmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index b2536a3e5e2d..c153b22ed834 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -604,7 +604,7 @@ void __meminit vmemmap_verify(pte_t *pte, int node, int actual_node = early_pfn_to_nid(pfn); if (node_distance(actual_node, node) > LOCAL_DISTANCE) - pr_debug("[%lx-%lx] potential offnode page_structs\n", + pr_warn_once("[%lx-%lx] potential offnode page_structs\n", start, end - 1); } -- Gitee From cf8fb95ed6d55841b0d6362fbd9c9d2f4a51e99a Mon Sep 17 00:00:00 2001 From: Ma Wupeng Date: Tue, 14 Jun 2022 17:21:55 +0800 Subject: [PATCH 3/4] arm64: mm: Only remove nomap flag for initrd Commit 177e15f0c144 ("arm64: add the initrd region to the linear mapping explicitly") remove all the flags of the memory used by initrd. This is fine since MEMBLOCK_MIRROR is not used in arm64. However with mirrored feature introduced to arm64, this will clear the mirrored flag used by initrd, which will lead to error log printed by find_zone_movable_pfns_for_nodes() if the lower 4G range has some non-mirrored memory. To solve this problem, only MEMBLOCK_NOMAP flag will be removed via memblock_clear_nomap(). Signed-off-by: Ma Wupeng Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220614092156.1972846-5-mawupeng1@huawei.com Acked-by: Mike Rapoport Acked-by: Catalin Marinas Signed-off-by: Ard Biesheuvel Signed-off-by: Shi Yang --- arch/arm64/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index b350adfb2f77..75e83d2ea5bb 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -504,8 +504,8 @@ void __init arm64_memblock_init(void) "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) { phys_initrd_size = 0; } else { - memblock_remove(base, size); /* clear MEMBLOCK_ flags */ memblock_add(base, size); + memblock_clear_nomap(base, size); memblock_reserve(base, size); } } -- Gitee From c25648182f54f10fae9d50ffdbd3faf0c2118c76 Mon Sep 17 00:00:00 2001 From: Ma Wupeng Date: Tue, 14 Jun 2022 17:21:56 +0800 Subject: [PATCH 4/4] memblock: Disable mirror feature if kernelcore is not specified If system have some mirrored memory and mirrored feature is not specified in boot parameter, the basic mirrored feature will be enabled and this will lead to the following situations: - memblock memory allocation prefers mirrored region. This may have some unexpected influence on numa affinity. - contiguous memory will be split into several parts if parts of them is mirrored memory via memblock_mark_mirror(). To fix this, variable mirrored_kernelcore will be checked in memblock_mark_mirror(). Mark mirrored memory with flag MEMBLOCK_MIRROR iff kernelcore=mirror is added in the kernel parameters. Signed-off-by: Ma Wupeng Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220614092156.1972846-6-mawupeng1@huawei.com Acked-by: Mike Rapoport Signed-off-by: Ard Biesheuvel Signed-off-by: Shi Yang --- mm/internal.h | 2 ++ mm/memblock.c | 3 +++ mm/page_alloc.c | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/mm/internal.h b/mm/internal.h index 0f49e0e7a0aa..68175c5a71c6 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -677,4 +677,6 @@ DECLARE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, unsigned long addr, int page_nid, int *flags); +extern bool mirrored_kernelcore; + #endif /* __MM_INTERNAL_H */ diff --git a/mm/memblock.c b/mm/memblock.c index 94008ef55a82..534d9dc506d2 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -913,6 +913,9 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) */ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) { + if (!mirrored_kernelcore) + return 0; + system_has_some_mirror = true; return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7cf3cd1d028b..7ce0948a44c5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -380,7 +380,7 @@ static unsigned long required_kernelcore_percent __initdata; static unsigned long required_movablecore __initdata; static unsigned long required_movablecore_percent __initdata; static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata; -bool mirrored_kernelcore __meminitdata; +bool mirrored_kernelcore __initdata_memblock; /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ int movable_zone; -- Gitee