diff --git a/Documentation/x86/microcode.rst b/Documentation/x86/microcode.rst index a320d37982ed6dcfb772b78df934a490d2299f4d..1cc734412397175db742b3ca43abe21c7391b6e0 100644 --- a/Documentation/x86/microcode.rst +++ b/Documentation/x86/microcode.rst @@ -34,6 +34,8 @@ on Intel: kernel/x86/microcode/GenuineIntel.bin on AMD : kernel/x86/microcode/AuthenticAMD.bin +on Hygon: + kernel/x86/microcode/HygonGenuine.bin During BSP (BootStrapping Processor) boot (pre-SMP), the kernel scans the microcode file in the initrd. If microcode matching the @@ -68,6 +70,10 @@ here for future reference only). cd $TMPDIR mkdir -p $DSTDIR + if [ -d /lib/firmware/hygon-ucode ]; then + cat /lib/firmware/hygon-ucode/microcode_hygon*.bin > $DSTDIR/HygonGenuine.bin + fi + if [ -d /lib/firmware/amd-ucode ]; then cat /lib/firmware/amd-ucode/microcode_amd*.bin > $DSTDIR/AuthenticAMD.bin fi @@ -119,7 +125,8 @@ currently supported. Here's an example:: - CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 amd-ucode/microcode_amd_fam15h.bin" + CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 \ + amd-ucode/microcode_amd_fam15h.bin hygon-ucode/microcode_hygon_fam18h.bin" CONFIG_EXTRA_FIRMWARE_DIR="/lib/firmware" This basically means, you have the following tree structure locally:: @@ -129,6 +136,10 @@ This basically means, you have the following tree structure locally:: ... | |-- microcode_amd_fam15h.bin ... + |-- hygon-ucode + ... + | |-- microcode_hygon_fam18h.bin + ... |-- intel-ucode ... | |-- 06-3a-09 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 8a0da1cef972bae533ec5a377c5550c9f53f7f8b..8586456896da4b80f7b04aed73a9d876689cd787 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1313,15 +1313,15 @@ config X86_REBOOTFIXUPS config MICROCODE bool "CPU microcode loading support" default y - depends on CPU_SUP_AMD || CPU_SUP_INTEL + depends on CPU_SUP_AMD || CPU_SUP_INTEL || CPU_SUP_HYGON select FW_LOADER ---help--- - If you say Y here, you will be able to update the microcode on - Intel and AMD processors. The Intel support is for the IA32 family, + If you say Y here, you will be able to update the microcode on Intel, + AMD and Hygon processors. The Intel support is for the IA32 family, e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4, Xeon etc. The - AMD support is for families 0x10 and later. You will obviously need - the actual microcode binary data itself which is not shipped with - the Linux kernel. + AMD support is for families 0x10 and later. The Hygon support is for + families 0x18 and later. You will obviously need the actual microcode + binary data itself which is not shipped with the Linux kernel. The preferred method to load microcode from a detached initrd is described in Documentation/x86/microcode.rst. For that you need to enable @@ -1353,6 +1353,14 @@ config MICROCODE_AMD If you select this option, microcode patch loading support for AMD processors will be enabled. +config MICROCODE_HYGON + bool "Hygon microcode loading support" + depends on CPU_SUP_HYGON && MICROCODE + select MICROCODE_AMD + help + If you select this option, microcode patch loading support for Hygon + processors will be enabled. + config MICROCODE_OLD_INTERFACE bool "Ancient loading interface (DEPRECATED)" default n diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index 1ae4e5791afafbe56c188cf14a3223e06f724869..b6e5db1069e9bb3954ef03d47d8a7e2756b999ba 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -84,6 +84,10 @@ u16 amd_nb_num(void); bool amd_nb_has_feature(unsigned int feature); struct amd_northbridge *node_to_amd_nb(int node); +bool hygon_f18h_m4h(void); +u16 hygon_nb_num(void); +int get_df_id(struct pci_dev *misc, u8 *id); + static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev) { struct pci_dev *misc; @@ -121,6 +125,10 @@ static inline bool amd_gart_present(void) #define node_to_amd_nb(x) NULL #define amd_gart_present(x) false +#define hygon_f18h_m4h false +#define hygon_nb_num(x) 0 +#define get_df_id(x, y) NULL + #endif diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index b0b54e18aa8bdfcc315646fc19337c48fbf2d98c..57f52b01f65d0559f9891dba1fdf2a10482f47e3 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -212,12 +212,8 @@ void mce_setup(struct mce *m); void mce_log(struct mce *m); DECLARE_PER_CPU(struct device *, mce_device); -/* - * Maximum banks number. - * This is the limit of the current register layout on - * Intel CPUs. - */ -#define MAX_NR_BANKS 32 +/* Maximum number of MCA banks per CPU. */ +#define MAX_NR_BANKS 64 #ifdef CONFIG_X86_MCE_INTEL void mce_intel_feature_init(struct cpuinfo_x86 *c); diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 2b7cc5397f80df70f6197cc4257e2f5aedbfcce4..1323befa1dc345b15f4b09acd568a23e7a1a0f5c 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -78,6 +78,12 @@ static inline struct microcode_ops * __init init_amd_microcode(void) static inline void __exit exit_amd_microcode(void) {} #endif +#ifdef CONFIG_MICROCODE_HYGON +extern const struct microcode_ops * __init init_hygon_microcode(void); +#else +#define init_hygon_microcode() NULL +#endif + #define MAX_UCODE_COUNT 128 #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24)) @@ -87,6 +93,9 @@ static inline void __exit exit_amd_microcode(void) {} #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h') #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i') #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D') +#define CPUID_HYGON1 QCHAR('H', 'y', 'g', 'o') +#define CPUID_HYGON2 QCHAR('n', 'G', 'e', 'n') +#define CPUID_HYGON3 QCHAR('u', 'i', 'n', 'e') #define CPUID_IS(a, b, c, ebx, ecx, edx) \ (!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c)))) @@ -113,6 +122,9 @@ static inline int x86_cpuid_vendor(void) if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx)) return X86_VENDOR_AMD; + if (CPUID_IS(CPUID_HYGON1, CPUID_HYGON2, CPUID_HYGON3, ebx, ecx, edx)) + return X86_VENDOR_HYGON; + return X86_VENDOR_UNKNOWN; } diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 49ae4e1ac9cd8b58d8ad9eeb7d7f2483020711aa..7de599eba7f045139cbd1396e992ab4f257c8c19 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -197,7 +197,8 @@ static int __init ffh_cstate_init(void) struct cpuinfo_x86 *c = &boot_cpu_data; if (c->x86_vendor != X86_VENDOR_INTEL && - c->x86_vendor != X86_VENDOR_AMD) + c->x86_vendor != X86_VENDOR_AMD && + c->x86_vendor != X86_VENDOR_HYGON) return -1; cpu_cstate_entry = alloc_percpu(struct cstate_entry); diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index c4bc01da820ef76fee55e225d9f4bc27740cb92b..f503af7765a8ddf248eb7ab747bc82c115a4c3a6 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -26,10 +26,16 @@ #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444 #define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654 +#define PCI_DEVICE_ID_HYGON_18H_M05H_ROOT 0x14a0 +#define PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1 0x1491 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1 0x14b1 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4 0x14b4 + /* Protect the PCI config register pairs used for SMN and DF indirect access. */ static DEFINE_MUTEX(smn_mutex); static u32 *flush_words; +static u16 nb_num; static const struct pci_device_id amd_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, @@ -80,16 +86,22 @@ static const struct pci_device_id amd_nb_link_ids[] = { static const struct pci_device_id hygon_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_ROOT) }, {} }; static const struct pci_device_id hygon_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) }, {} }; static const struct pci_device_id hygon_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4) }, {} }; @@ -224,6 +236,209 @@ int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo) } EXPORT_SYMBOL_GPL(amd_df_indirect_read); +bool hygon_f18h_m4h(void) +{ + if (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + return false; + + if (boot_cpu_data.x86 == 0x18 && + boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(hygon_f18h_m4h); + +u16 hygon_nb_num(void) +{ + return nb_num; +} +EXPORT_SYMBOL_GPL(hygon_nb_num); + +static int get_df1_register(struct pci_dev *misc, int offset, u32 *value) +{ + struct pci_dev *df_f1 = NULL; + u32 device; + int err; + + switch (boot_cpu_data.x86_model) { + case 0x4: + device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; + break; + case 0x5: + if (misc->device == PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) + device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; + else + device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; + break; + case 0x6: + device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; + break; + default: + return -ENODEV; + } + + while ((df_f1 = pci_get_device(misc->vendor, device, df_f1))) + if (pci_domain_nr(df_f1->bus) == pci_domain_nr(misc->bus) && + df_f1->bus->number == misc->bus->number && + PCI_SLOT(df_f1->devfn) == PCI_SLOT(misc->devfn)) + break; + + if (!df_f1) { + pr_warn("Error getting DF F1 device.\n"); + return -ENODEV; + } + + err = pci_read_config_dword(df_f1, offset, value); + if (err) + pr_warn("Error reading DF F1 register.\n"); + + return err; +} + +int get_df_id(struct pci_dev *misc, u8 *id) +{ + u32 value; + int ret; + + /* F1x200[23:20]: DF ID */ + ret = get_df1_register(misc, 0x200, &value); + *id = (value >> 20) & 0xf; + + return ret; +} +EXPORT_SYMBOL_GPL(get_df_id); + +static u8 get_socket_num(struct pci_dev *misc) +{ + u32 value; + int ret; + + /* F1x200[7:0]: Which socket is present. */ + ret = get_df1_register(misc, 0x200, &value); + + return ret ? 0 : hweight8(value & 0xff); +} + +static int northbridge_init_f18h_m4h(const struct pci_device_id *root_ids, + const struct pci_device_id *misc_ids, + const struct pci_device_id *link_ids) +{ + struct pci_dev *root, *misc, *link; + struct pci_dev *root_first = NULL; + struct amd_northbridge *nb; + u16 roots_per_socket = 0; + u16 miscs_per_socket = 0; + u16 socket_num = 0; + u16 root_count = 0; + u16 misc_count = 0; + int err = -ENODEV; + u8 i, j, m, n; + u8 id; + + pr_info("Hygon Fam%xh Model%xh NB driver.\n", + boot_cpu_data.x86, boot_cpu_data.x86_model); + + misc = next_northbridge(NULL, misc_ids); + if (misc != NULL) { + socket_num = get_socket_num(misc); + pr_info("Socket number: %d\n", socket_num); + if (!socket_num) { + err = -ENODEV; + goto ret; + } + } else { + err = -ENODEV; + goto ret; + } + + misc = NULL; + while ((misc = next_northbridge(misc, misc_ids)) != NULL) + misc_count++; + + root = NULL; + while ((root = next_northbridge(root, root_ids)) != NULL) + root_count++; + + if (!root_count || !misc_count) { + err = -ENODEV; + goto ret; + } + + /* + * There should be _exactly_ N roots for each DF/SMN + * interface, and M DF/SMN interfaces in one socket. + */ + roots_per_socket = root_count / socket_num; + miscs_per_socket = misc_count / socket_num; + + if (!roots_per_socket || !miscs_per_socket) { + err = -ENODEV; + goto ret; + } + + nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); + if (!nb) { + err = -ENOMEM; + goto ret; + } + + amd_northbridges.nb = nb; + amd_northbridges.num = misc_count; + + link = misc = root = NULL; + j = m = n = 0; + for (i = 0; i < amd_northbridges.num; i++) { + misc = next_northbridge(misc, misc_ids); + link = next_northbridge(link, link_ids); + + /* Only save the first PCI root device for each socket. */ + if (!(i % miscs_per_socket)) { + root_first = next_northbridge(root, root_ids); + root = root_first; + j = 1; + } + + if (get_df_id(misc, &id)) { + err = -ENODEV; + goto err; + } + pr_info("DF ID: %d\n", id); + + if (id < 4) { + /* Add the devices with id<4 from the tail. */ + node_to_amd_nb(misc_count - m - 1)->misc = misc; + node_to_amd_nb(misc_count - m - 1)->link = link; + node_to_amd_nb(misc_count - m - 1)->root = root_first; + m++; + } else { + node_to_amd_nb(n)->misc = misc; + node_to_amd_nb(n)->link = link; + node_to_amd_nb(n)->root = root_first; + n++; + } + + /* Skip the redundant PCI root devices per socket. */ + while (j < roots_per_socket) { + root = next_northbridge(root, root_ids); + j++; + } + } + nb_num = n; + + return 0; + +err: + kfree(nb); + amd_northbridges.nb = NULL; + +ret: + pr_err("Hygon Fam%xh Model%xh northbridge init failed(%d)!\n", + boot_cpu_data.x86, boot_cpu_data.x86_model, err); + return err; +} + int amd_cache_northbridges(void) { const struct pci_device_id *misc_ids = amd_nb_misc_ids; @@ -243,6 +458,11 @@ int amd_cache_northbridges(void) root_ids = hygon_root_ids; misc_ids = hygon_nb_misc_ids; link_ids = hygon_nb_link_ids; + + if (boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) + return northbridge_init_f18h_m4h(root_ids, + misc_ids, link_ids); } misc = NULL; diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index 30f33b75209a1e771888ea1f4de66c58d7d707da..4e1738a8c331dc8d0fb84c8f89c92a38c5f5777a 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -693,11 +693,30 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu) if (!cpuid_edx(0x80000006)) return; - /* - * LLC is at the core complex level. - * Core complex ID is ApicId[3] for these processors. - */ - per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; + if (c->x86_model < 0x5) { + /* + * LLC is at the core complex level. + * Core complex ID is ApicId[3] for these processors. + */ + per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; + } else { + /* + * LLC ID is calculated from the number of threads + * sharing the cache. + */ + u32 eax, ebx, ecx, edx, num_sharing_cache = 0; + u32 llc_index = find_num_cache_leaves(c) - 1; + + cpuid_count(0x8000001d, llc_index, &eax, &ebx, &ecx, &edx); + if (eax) + num_sharing_cache = ((eax >> 14) & 0xfff) + 1; + + if (num_sharing_cache) { + int bits = get_count_order(num_sharing_cache); + + per_cpu(cpu_llc_id, cpu) = c->apicid >> bits; + } + } } void init_amd_cacheinfo(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 62e9a982adaf92bad1394142ff58d99d74f81f75..8aeb3ec7cf0b445fb112f10ea5a2c92b81cafdca 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -80,16 +80,29 @@ static void hygon_get_topology(struct cpuinfo_x86 *c) if (smp_num_siblings > 1) c->x86_max_cores /= smp_num_siblings; - /* - * In case leaf B is available, use it to derive - * topology information. - */ - err = detect_extended_topology(c); - if (!err) - c->x86_coreid_bits = get_count_order(c->x86_max_cores); - - /* Socket ID is ApicId[6] for these processors. */ - c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT; + switch (c->x86_model) { + case 0x0 ... 0x3: + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + break; + /* Socket ID is ApicId[6] for these processors. */ + c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT; + break; + case 0x4: + case 0x5: + case 0x6: + /* + * In case leaf 0xB is available, use it to derive + * topology information. + */ + err = detect_extended_topology(c); + if (!err) + c->x86_coreid_bits = + get_count_order(c->x86_max_cores); + __max_die_per_package = nodes_per_socket; + break; + default: + break; + } cacheinfo_hygon_init_llc_id(c, cpu); } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { @@ -234,12 +247,12 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) u32 ecx; ecx = cpuid_ecx(0x8000001e); - nodes_per_socket = ((ecx >> 8) & 7) + 1; + __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1; } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) { u64 value; rdmsrl(MSR_FAM10H_NODE_ID, value); - nodes_per_socket = ((value >> 3) & 7) + 1; + __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1; } if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 7d76f41591daa32806711ddcc528d95c6bfeb2d0..a72b1a99683a75ec308d37e72fed3299e5449dfe 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -192,10 +192,10 @@ EXPORT_SYMBOL_GPL(smca_banks); static char buf_mcatype[MAX_MCATYPE_NAME_LEN]; static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks); -static DEFINE_PER_CPU(unsigned int, bank_map); /* see which banks are on */ +static DEFINE_PER_CPU(u64, bank_map); /* see which banks are on */ /* Map of banks that have more than MCA_MISC0 available. */ -static DEFINE_PER_CPU(u32, smca_misc_banks_map); +static DEFINE_PER_CPU(u64, smca_misc_banks_map); static void amd_threshold_interrupt(void); static void amd_deferred_error_interrupt(void); @@ -224,7 +224,7 @@ static void smca_set_misc_banks_map(unsigned int bank, unsigned int cpu) return; if (low & MASK_BLKPTR_LO) - per_cpu(smca_misc_banks_map, cpu) |= BIT(bank); + per_cpu(smca_misc_banks_map, cpu) |= BIT_ULL(bank); } @@ -483,7 +483,7 @@ static u32 smca_get_block_address(unsigned int bank, unsigned int block, if (!block) return MSR_AMD64_SMCA_MCx_MISC(bank); - if (!(per_cpu(smca_misc_banks_map, cpu) & BIT(bank))) + if (!(per_cpu(smca_misc_banks_map, cpu) & BIT_ULL(bank))) return 0; return MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); @@ -527,7 +527,7 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr, int new; if (!block) - per_cpu(bank_map, cpu) |= (1 << bank); + per_cpu(bank_map, cpu) |= BIT_ULL(bank); memset(&b, 0, sizeof(b)); b.cpu = cpu; @@ -677,13 +677,21 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) u8 cs_mask, cs_id = 0; bool hash_enabled = false; - /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */ - if (amd_df_indirect_read(nid, 0, 0x1B4, umc, &tmp)) + /* Read DramOffset, check if base 1 is used. */ + if (hygon_f18h_m4h() && + amd_df_indirect_read(nid, 0, 0x214, umc, &tmp)) + goto out_err; + else if (amd_df_indirect_read(nid, 0, 0x1B4, umc, &tmp)) goto out_err; /* Remove HiAddrOffset from normalized address, if enabled: */ if (tmp & BIT(0)) { - u64 hi_addr_offset = (tmp & GENMASK_ULL(31, 20)) << 8; + u64 hi_addr_offset; + + if (hygon_f18h_m4h()) + hi_addr_offset = (tmp & GENMASK_ULL(31, 18)) << 8; + else + hi_addr_offset = (tmp & GENMASK_ULL(31, 20)) << 8; if (norm_addr >= hi_addr_offset) { ret_addr -= hi_addr_offset; @@ -702,6 +710,9 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) goto out_err; } + intlv_num_sockets = 0; + if (hygon_f18h_m4h()) + intlv_num_sockets = (tmp >> 2) & 0x3; lgcy_mmio_hole_en = tmp & BIT(1); intlv_num_chan = (tmp >> 4) & 0xF; intlv_addr_sel = (tmp >> 8) & 0x7; @@ -718,7 +729,8 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) if (amd_df_indirect_read(nid, 0, 0x114 + (8 * base), umc, &tmp)) goto out_err; - intlv_num_sockets = (tmp >> 8) & 0x1; + if (!hygon_f18h_m4h()) + intlv_num_sockets = (tmp >> 8) & 0x1; intlv_num_dies = (tmp >> 10) & 0x3; dram_limit_addr = ((tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0); @@ -728,6 +740,10 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) switch (intlv_num_chan) { case 0: intlv_num_chan = 0; break; case 1: intlv_num_chan = 1; break; + case 2: + if (hygon_f18h_m4h()) + intlv_num_chan = 2; + break; case 3: intlv_num_chan = 2; break; case 5: intlv_num_chan = 3; break; case 7: intlv_num_chan = 4; break; @@ -754,8 +770,9 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) /* Add a bit if sockets are interleaved. */ num_intlv_bits += intlv_num_sockets; - /* Assert num_intlv_bits <= 4 */ - if (num_intlv_bits > 4) { + /* Assert num_intlv_bits in the correct range. */ + if ((hygon_f18h_m4h() && num_intlv_bits > 7) || + (!hygon_f18h_m4h() && num_intlv_bits > 4)) { pr_err("%s: Invalid interleave bits %d.\n", __func__, num_intlv_bits); goto out_err; @@ -774,7 +791,10 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) if (amd_df_indirect_read(nid, 0, 0x50, umc, &tmp)) goto out_err; - cs_fabric_id = (tmp >> 8) & 0xFF; + if (hygon_f18h_m4h()) + cs_fabric_id = (tmp >> 8) & 0x7FF; + else + cs_fabric_id = (tmp >> 8) & 0xFF; die_id_bit = 0; /* If interleaved over more than 1 channel: */ @@ -794,8 +814,13 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) /* If interleaved over more than 1 die. */ if (intlv_num_dies) { sock_id_bit = die_id_bit + intlv_num_dies; - die_id_shift = (tmp >> 24) & 0xF; - die_id_mask = (tmp >> 8) & 0xFF; + if (hygon_f18h_m4h()) { + die_id_shift = (tmp >> 12) & 0xF; + die_id_mask = tmp & 0x7FF; + } else { + die_id_shift = (tmp >> 24) & 0xF; + die_id_mask = (tmp >> 8) & 0xFF; + } cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit; } @@ -803,7 +828,10 @@ int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) /* If interleaved over more than 1 socket. */ if (intlv_num_sockets) { socket_id_shift = (tmp >> 28) & 0xF; - socket_id_mask = (tmp >> 16) & 0xFF; + if (hygon_f18h_m4h()) + socket_id_mask = (tmp >> 16) & 0x7FF; + else + socket_id_mask = (tmp >> 16) & 0xFF; cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit; } @@ -1019,7 +1047,7 @@ static void amd_threshold_interrupt(void) unsigned int bank, cpu = smp_processor_id(); for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) { - if (!(per_cpu(bank_map, cpu) & (1 << bank))) + if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank))) continue; first_block = per_cpu(threshold_banks, cpu)[bank]->blocks; @@ -1472,7 +1500,7 @@ int mce_threshold_create_device(unsigned int cpu) per_cpu(threshold_banks, cpu) = bp; for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) { - if (!(per_cpu(bank_map, cpu) & (1 << bank))) + if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank))) continue; err = threshold_create_bank(cpu, bank); if (err) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index a0e52bd00ecceeaed98f61b36fd51de6543f140d..6b4dbe41e895144f489881b133ebdf0127f8cef6 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -458,11 +458,14 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) { #ifdef CONFIG_X86_64 - char fw_name[36] = "amd-ucode/microcode_amd.bin"; + char fw_name[40] = "amd-ucode/microcode_amd.bin"; - if (family >= 0x15) + if (x86_cpuid_vendor() == X86_VENDOR_AMD && family >= 0x15) snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", family); + else if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + snprintf(fw_name, sizeof(fw_name), + "hygon-ucode/microcode_hygon_fam%.2xh.bin", family); return get_builtin_firmware(cp, fw_name); #else @@ -479,11 +482,18 @@ static void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret) if (IS_ENABLED(CONFIG_X86_32)) { uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info); - path = (const char *)__pa_nodebug(ucode_path); + if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + path = (const char *)__pa_nodebug( + "kernel/x86/microcode/HygonGenuine.bin"); + else + path = (const char *)__pa_nodebug(ucode_path); use_pa = true; } else { uci = ucode_cpu_info; - path = ucode_path; + if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + path = "kernel/x86/microcode/HygonGenuine.bin"; + else + path = ucode_path; use_pa = false; } @@ -546,8 +556,14 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) struct cont_desc desc = { 0 }; enum ucode_state ret; struct cpio_data cp; + const char *path; - cp = find_microcode_in_initrd(ucode_path, false); + if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + path = "kernel/x86/microcode/HygonGenuine.bin"; + else + path = ucode_path; + + cp = find_microcode_in_initrd(path, false); if (!(cp.data && cp.size)) return -EINVAL; @@ -888,7 +904,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) static enum ucode_state request_microcode_amd(int cpu, struct device *device, bool refresh_fw) { - char fw_name[36] = "amd-ucode/microcode_amd.bin"; + char fw_name[40] = "amd-ucode/microcode_amd.bin"; struct cpuinfo_x86 *c = &cpu_data(cpu); bool bsp = c->cpu_index == boot_cpu_data.cpu_index; enum ucode_state ret = UCODE_NFOUND; @@ -898,8 +914,12 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, if (!refresh_fw || !bsp) return UCODE_OK; - if (c->x86 >= 0x15) - snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); + if (x86_cpuid_vendor() == X86_VENDOR_AMD && c->x86 >= 0x15) + snprintf(fw_name, sizeof(fw_name), + "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); + else if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + snprintf(fw_name, sizeof(fw_name), + "hygon-ucode/microcode_hygon_fam%.2xh.bin", c->x86); if (request_firmware_direct(&fw, (const char *)fw_name, device)) { pr_debug("failed to load file %s\n", fw_name); @@ -956,6 +976,22 @@ struct microcode_ops * __init init_amd_microcode(void) return µcode_amd_ops; } +#ifdef CONFIG_MICROCODE_HYGON +const struct microcode_ops * __init init_hygon_microcode(void) +{ + struct cpuinfo_x86 *c = &boot_cpu_data; + + if (c->x86_vendor != X86_VENDOR_HYGON) + return NULL; + + if (ucode_new_rev) + pr_info_once("microcode updated early to new patch_level=0x%08x\n", + ucode_new_rev); + + return µcode_amd_ops; +} +#endif + void __exit exit_amd_microcode(void) { cleanup(); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 4a4198b806b4e604097c85e783bf24d19a5965f4..edbf8fd2b3c71ec1f4d05c46f60e7f3e3ade3240 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -42,7 +42,11 @@ #define DRIVER_VERSION "2.2" +#ifdef CONFIG_MICROCODE_HYGON +static const struct microcode_ops *microcode_ops; +#else static struct microcode_ops *microcode_ops; +#endif static bool dis_ucode_ldr = true; bool initrd_gone; @@ -134,7 +138,8 @@ static bool __init check_loader_disabled_bsp(void) if (native_cpuid_ecx(1) & BIT(31)) return *res; - if (x86_cpuid_vendor() == X86_VENDOR_AMD) { + if (x86_cpuid_vendor() == X86_VENDOR_AMD || + x86_cpuid_vendor() == X86_VENDOR_HYGON) { if (amd_check_current_patch_level()) return *res; } @@ -186,6 +191,10 @@ void __init load_ucode_bsp(void) intel = false; break; + case X86_VENDOR_HYGON: + intel = false; + break; + default: return; } @@ -226,6 +235,9 @@ void load_ucode_ap(void) if (x86_family(cpuid_1_eax) >= 0x10) load_ucode_amd_ap(cpuid_1_eax); break; + case X86_VENDOR_HYGON: + load_ucode_amd_ap(cpuid_1_eax); + break; default: break; } @@ -245,6 +257,9 @@ static int __init save_microcode_in_initrd(void) if (c->x86 >= 0x10) ret = save_microcode_in_initrd_amd(cpuid_eax(1)); break; + case X86_VENDOR_HYGON: + ret = save_microcode_in_initrd_amd(cpuid_eax(1)); + break; default: break; } @@ -338,6 +353,9 @@ void reload_early_microcode(void) if (family >= 0x10) reload_ucode_amd(); break; + case X86_VENDOR_HYGON: + reload_ucode_amd(); + break; default: break; } @@ -839,6 +857,8 @@ int __init microcode_init(void) microcode_ops = init_intel_microcode(); else if (c->x86_vendor == X86_VENDOR_AMD) microcode_ops = init_amd_microcode(); + else if (c->x86_vendor == X86_VENDOR_HYGON) + microcode_ops = init_hygon_microcode(); else pr_err("no support for this CPU vendor\n"); diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index 24da5ee4f022052788b580578cae78bfd103d49a..ef39abd29b2434ce85937669e3ed59e7d77c9f43 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c @@ -96,6 +96,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c) unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width; unsigned int core_select_mask, core_level_siblings; unsigned int die_select_mask, die_level_siblings; + bool die_level_present = false; int leaf; leaf = detect_extended_topology_leaf(c); @@ -126,6 +127,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c) die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); } if (LEAFB_SUBTYPE(ecx) == DIE_TYPE) { + die_level_present = true; die_level_siblings = LEVEL_MAX_SIBLINGS(ebx); die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); } @@ -139,8 +141,12 @@ int detect_extended_topology(struct cpuinfo_x86 *c) c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width) & core_select_mask; - c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid, - core_plus_mask_width) & die_select_mask; + + if (die_level_present) { + c->cpu_die_id = apic->phys_pkg_id(c->initial_apicid, + core_plus_mask_width) & die_select_mask; + } + c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, die_plus_mask_width); /* diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 71511ae2dfcd9dc0227ad8cdda2a0424c2237c12..a504d3e1180367b494bae57aa17e1147546ca7ea 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -235,6 +235,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "AMDI0020", APD_ADDR(cz_uart_desc) }, { "AMD0030", }, { "AMD0040", APD_ADDR(st_misc_desc)}, + { "HYGO0010", APD_ADDR(wt_i2c_desc) }, #endif #ifdef CONFIG_ARM64 { "APMC0D0F", APD_ADDR(xgene_i2c_desc) }, diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index b96d48659114b3559a0e18bf3ca7f38d22e247a6..f6026581f922c69c98528134f498b7dce3e49690 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -16,12 +16,26 @@ module_param(ecc_enable_override, int, 0644); static struct msr __percpu *msrs; +static struct amd64_family_type *fam_type; + +static inline u32 get_umc_reg(u32 reg) +{ + if (!fam_type->flags.zn_regs_v2) + return reg; + + switch (reg) { + case UMCCH_ADDR_CFG: return UMCCH_ADDR_CFG_DDR5; + case UMCCH_ADDR_MASK_SEC: return UMCCH_ADDR_MASK_SEC_DDR5; + case UMCCH_DIMM_CFG: return UMCCH_DIMM_CFG_DDR5; + } + + WARN_ONCE(1, "%s: unknown register 0x%x", __func__, reg); + return 0; +} + /* Per-node stuff */ static struct ecc_settings **ecc_stngs; -/* Number of Unified Memory Controllers */ -static u8 num_umcs; - /* Device for the PCI component */ static struct device *pci_ctl_dev; @@ -87,6 +101,17 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, return err; } +static u32 get_umc_base_f18h_m4h(u16 node, u8 channel) +{ + struct pci_dev *f3 = node_to_amd_nb(node)->misc; + u8 df_id; + + get_df_id(f3, &df_id); + df_id -= 4; + + return get_umc_base(channel) + (0x80000000 + (0x10000000 * df_id)); +} + /* * Select DCT to which PCI cfg accesses are routed */ @@ -452,7 +477,7 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct, for (i = 0; i < pvt->csels[dct].m_cnt; i++) #define for_each_umc(i) \ - for (i = 0; i < num_umcs; i++) + for (i = 0; i < fam_type->max_mcs; i++) /* * @input_addr is an InputAddr associated with the node given by mci. Return the @@ -838,7 +863,10 @@ static void __dump_misc_regs_df(struct amd64_pvt *pvt) u32 i, tmp, umc_base; for_each_umc(i) { - umc_base = get_umc_base(i); + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, i); + else + umc_base = get_umc_base(i); umc = &pvt->umc[i]; edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg); @@ -863,8 +891,10 @@ static void __dump_misc_regs_df(struct amd64_pvt *pvt) edac_dbg(1, "UMC%d x16 DIMMs present: %s\n", i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no"); - if (pvt->dram_type == MEM_LRDDR4) { - amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp); + if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) { + amd_smn_read(pvt->mc_node_id, + umc_base + get_umc_reg(UMCCH_ADDR_CFG), + &tmp); edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n", i, 1 << ((tmp >> 4) & 0x3)); } @@ -939,7 +969,7 @@ static void prep_chip_selects(struct amd64_pvt *pvt) for_each_umc(umc) { pvt->csels[umc].b_cnt = 4; - pvt->csels[umc].m_cnt = 2; + pvt->csels[umc].m_cnt = fam_type->flags.zn_regs_v2 ? 4 : 2; } } else { @@ -956,11 +986,17 @@ static void read_umc_base_mask(struct amd64_pvt *pvt) u32 mask_reg, mask_reg_sec; u32 *base, *base_sec; u32 *mask, *mask_sec; + u32 umc_base; int cs, umc; for_each_umc(umc) { - umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR; - umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC; + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, umc); + else + umc_base = get_umc_base(umc); + + umc_base_reg = umc_base + UMCCH_BASE_ADDR; + umc_base_reg_sec = umc_base + UMCCH_BASE_ADDR_SEC; for_each_chip_select(cs, umc, pvt) { base = &pvt->csels[umc].csbases[cs]; @@ -978,8 +1014,8 @@ static void read_umc_base_mask(struct amd64_pvt *pvt) umc, cs, *base_sec, base_reg_sec); } - umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK; - umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC; + umc_mask_reg = umc_base + UMCCH_ADDR_MASK; + umc_mask_reg_sec = umc_base + get_umc_reg(UMCCH_ADDR_MASK_SEC); for_each_chip_select_mask(cs, umc, pvt) { mask = &pvt->csels[umc].csmasks[cs]; @@ -1050,19 +1086,50 @@ static void read_dct_base_mask(struct amd64_pvt *pvt) } } +static void determine_memory_type_df(struct amd64_pvt *pvt) +{ + struct amd64_umc *umc; + u32 i; + + for_each_umc(i) { + umc = &pvt->umc[i]; + + if (!(umc->sdp_ctrl & UMC_SDP_INIT)) { + umc->dram_type = MEM_EMPTY; + continue; + } + + /* + * Check if the system supports the "DDR Type" field in UMC Config + * and has DDR5 DIMMs in use. + */ + if ((fam_type->flags.zn_regs_v2 || hygon_f18h_m4h()) && + ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) { + if (umc->dimm_cfg & BIT(5)) + umc->dram_type = MEM_LRDDR5; + else if (umc->dimm_cfg & BIT(4)) + umc->dram_type = MEM_RDDR5; + else + umc->dram_type = MEM_DDR5; + } else { + if (umc->dimm_cfg & BIT(5)) + umc->dram_type = MEM_LRDDR4; + else if (umc->dimm_cfg & BIT(4)) + umc->dram_type = MEM_RDDR4; + else + umc->dram_type = MEM_DDR4; + } + + edac_dbg(1, " UMC%d DIMM type: %s\n", i, edac_mem_types[umc->dram_type]); + } +} + static void determine_memory_type(struct amd64_pvt *pvt) { u32 dram_ctrl, dcsm; - if (pvt->umc) { - if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5)) - pvt->dram_type = MEM_LRDDR4; - else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4)) - pvt->dram_type = MEM_RDDR4; - else - pvt->dram_type = MEM_DDR4; - return; - } + if (pvt->umc) + return determine_memory_type_df(pvt); switch (pvt->fam) { case 0xf: @@ -1583,6 +1650,7 @@ static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc, { u32 addr_mask_orig, addr_mask_deinterleaved; u32 msb, weight, num_zero_bits; + int cs_mask_nr = csrow_nr; int dimm, size = 0; /* No Chip Selects are enabled. */ @@ -1598,17 +1666,33 @@ static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc, return size; /* - * There is one mask per DIMM, and two Chip Selects per DIMM. - * CS0 and CS1 -> DIMM0 - * CS2 and CS3 -> DIMM1 + * Family 17h introduced systems with one mask per DIMM, + * and two Chip Selects per DIMM. + * + * CS0 and CS1 -> MASK0 / DIMM0 + * CS2 and CS3 -> MASK1 / DIMM1 + * + * Family 19h Model 10h introduced systems with one mask per Chip Select, + * and two Chip Selects per DIMM. + * + * CS0 -> MASK0 -> DIMM0 + * CS1 -> MASK1 -> DIMM0 + * CS2 -> MASK2 -> DIMM1 + * CS3 -> MASK3 -> DIMM1 + * + * Keep the mask number equal to the Chip Select number for newer systems, + * and shift the mask number for older systems. */ dimm = csrow_nr >> 1; + if (!fam_type->flags.zn_regs_v2) + cs_mask_nr >>= 1; + /* Asymmetric dual-rank DIMM support. */ if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY)) - addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm]; + addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr]; else - addr_mask_orig = pvt->csels[umc].csmasks[dimm]; + addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr]; /* * The number of zero bits in the mask is equal to the number of bits @@ -2222,6 +2306,7 @@ static struct amd64_family_type family_types[] = { .ctl_name = "K8", .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, + .max_mcs = 2, .ops = { .early_channel_count = k8_early_channel_count, .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, @@ -2232,6 +2317,7 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F10h", .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP, .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM, + .max_mcs = 2, .ops = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, @@ -2242,6 +2328,7 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F15h", .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1, .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2, + .max_mcs = 2, .ops = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, @@ -2252,6 +2339,7 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F15h_M30h", .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1, .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2, + .max_mcs = 2, .ops = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, @@ -2262,6 +2350,7 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F15h_M60h", .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1, .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2, + .max_mcs = 2, .ops = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, @@ -2272,6 +2361,7 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F16h", .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1, .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2, + .max_mcs = 2, .ops = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, @@ -2282,6 +2372,7 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F16h_M30h", .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1, .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2, + .max_mcs = 2, .ops = { .early_channel_count = f1x_early_channel_count, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, @@ -2292,6 +2383,7 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F17h", .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0, .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6, + .max_mcs = 2, .ops = { .early_channel_count = f17_early_channel_count, .dbam_to_cs = f17_addr_mask_to_cs_size, @@ -2301,6 +2393,7 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F17h_M10h", .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0, .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6, + .max_mcs = 2, .ops = { .early_channel_count = f17_early_channel_count, .dbam_to_cs = f17_addr_mask_to_cs_size, @@ -2310,6 +2403,7 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F17h_M30h", .f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0, .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6, + .max_mcs = 8, .ops = { .early_channel_count = f17_early_channel_count, .dbam_to_cs = f17_addr_mask_to_cs_size, @@ -2319,6 +2413,7 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F17h_M60h", .f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0, .f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6, + .max_mcs = 2, .ops = { .early_channel_count = f17_early_channel_count, .dbam_to_cs = f17_addr_mask_to_cs_size, @@ -2328,6 +2423,17 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F17h_M70h", .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0, .f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6, + .max_mcs = 2, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, + [F18_M06H_CPUS] = { + .ctl_name = "F18h_M06h", + .f0_id = PCI_DEVICE_ID_HYGON_18H_M06H_DF_F0, + .f6_id = PCI_DEVICE_ID_HYGON_18H_M06H_DF_F6, + .max_mcs = 2, .ops = { .early_channel_count = f17_early_channel_count, .dbam_to_cs = f17_addr_mask_to_cs_size, @@ -2337,6 +2443,18 @@ static struct amd64_family_type family_types[] = { .ctl_name = "F19h", .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0, .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6, + .max_mcs = 8, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, + [F19_M10H_CPUS] = { + .ctl_name = "F19h_M10h", + .f0_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F0, + .f6_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F6, + .max_mcs = 12, + .flags.zn_regs_v2 = 1, .ops = { .early_channel_count = f17_early_channel_count, .dbam_to_cs = f17_addr_mask_to_cs_size, @@ -2602,6 +2720,9 @@ static inline void decode_bus_error(int node_id, struct mce *m) */ static int find_umc_channel(struct mce *m) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) + return (m->ipid & GENMASK(23, 0)) >> 20; return (m->ipid & GENMASK(31, 0)) >> 20; } @@ -2725,6 +2846,14 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt) } } +static void determine_ecc_sym_sz_f18h_m4h(struct amd64_pvt *pvt, int channel) +{ + if (pvt->umc[channel].ecc_ctrl & BIT(8)) + pvt->ecc_sym_sz = 16; + else if (pvt->umc[channel].ecc_ctrl & BIT(7)) + pvt->ecc_sym_sz = 8; +} + static void determine_ecc_sym_sz(struct amd64_pvt *pvt) { pvt->ecc_sym_sz = 4; @@ -2735,6 +2864,15 @@ static void determine_ecc_sym_sz(struct amd64_pvt *pvt) for_each_umc(i) { /* Check enabled channels only: */ if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18 && + (boot_cpu_data.x86_model == 0x4 || + boot_cpu_data.x86_model == 0x5) && + (pvt->umc[i].umc_cfg & GENMASK(2, 0)) == 0x1) { + determine_ecc_sym_sz_f18h_m4h(pvt, i); + return; + } + if (pvt->umc[i].ecc_ctrl & BIT(9)) { pvt->ecc_sym_sz = 16; return; @@ -2769,11 +2907,14 @@ static void __read_mc_regs_df(struct amd64_pvt *pvt) /* Read registers from each UMC */ for_each_umc(i) { + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, i); + else + umc_base = get_umc_base(i); - umc_base = get_umc_base(i); umc = &pvt->umc[i]; - amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg); + amd_smn_read(nid, umc_base + get_umc_reg(UMCCH_DIMM_CFG), &umc->dimm_cfg); amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg); amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl); amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl); @@ -2857,7 +2998,9 @@ static void read_mc_regs(struct amd64_pvt *pvt) read_dct_base_mask(pvt); determine_memory_type(pvt); - edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]); + + if (!pvt->umc) + edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]); determine_ecc_sym_sz(pvt); @@ -2955,7 +3098,7 @@ static int init_csrows_df(struct mem_ctl_info *mci) pvt->mc_node_id, cs); dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs); - dimm->mtype = pvt->dram_type; + dimm->mtype = pvt->umc[umc].dram_type; dimm->edac_mode = edac_mode; dimm->dtype = dev_type; dimm->grain = 64; @@ -3302,8 +3445,7 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt) } } -static void setup_mci_misc_attrs(struct mem_ctl_info *mci, - struct amd64_family_type *fam) +static void setup_mci_misc_attrs(struct mem_ctl_info *mci) { struct amd64_pvt *pvt = mci->pvt_info; @@ -3322,7 +3464,7 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci, mci->edac_cap = determine_edac_cap(pvt); mci->mod_name = EDAC_MOD_STR; - mci->ctl_name = fam->ctl_name; + mci->ctl_name = fam_type->ctl_name; mci->dev_name = pci_name(pvt->F3); mci->ctl_page_to_phys = NULL; @@ -3336,8 +3478,6 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci, */ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) { - struct amd64_family_type *fam_type = NULL; - pvt->ext_model = boot_cpu_data.x86_model >> 4; pvt->stepping = boot_cpu_data.x86_stepping; pvt->model = boot_cpu_data.x86_model; @@ -3397,16 +3537,49 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) pvt->ops = &family_types[F17_M70H_CPUS].ops; break; } - /* fall through */ - case 0x18: fam_type = &family_types[F17_CPUS]; pvt->ops = &family_types[F17_CPUS].ops; + break; - if (pvt->fam == 0x18) - family_types[F17_CPUS].ctl_name = "F18h"; + case 0x18: + if (pvt->model == 0x4) { + fam_type = &family_types[F17_M30H_CPUS]; + pvt->ops = &family_types[F17_M30H_CPUS].ops; + family_types[F17_M30H_CPUS].max_mcs = 3; + family_types[F17_M30H_CPUS].ctl_name = "F18h_M04h"; + break; + } else if (pvt->model == 0x5) { + fam_type = &family_types[F17_M30H_CPUS]; + pvt->ops = &family_types[F17_M30H_CPUS].ops; + family_types[F17_M30H_CPUS].max_mcs = 1; + family_types[F17_M30H_CPUS].ctl_name = "F18h_M05h"; + break; + } else if (pvt->model == 0x6) { + fam_type = &family_types[F18_M06H_CPUS]; + pvt->ops = &family_types[F18_M06H_CPUS].ops; + break; + } + fam_type = &family_types[F17_CPUS]; + pvt->ops = &family_types[F17_CPUS].ops; + family_types[F17_CPUS].ctl_name = "F18h"; break; case 0x19: + if (pvt->model >= 0x10 && pvt->model <= 0x1f) { + fam_type = &family_types[F19_M10H_CPUS]; + pvt->ops = &family_types[F19_M10H_CPUS].ops; + break; + } else if (pvt->model >= 0x20 && pvt->model <= 0x2f) { + fam_type = &family_types[F17_M70H_CPUS]; + pvt->ops = &family_types[F17_M70H_CPUS].ops; + fam_type->ctl_name = "F19h_M20h"; + break; + } else if (pvt->model >= 0xa0 && pvt->model <= 0xaf) { + fam_type = &family_types[F19_M10H_CPUS]; + pvt->ops = &family_types[F19_M10H_CPUS].ops; + fam_type->ctl_name = "F19h_MA0h"; + break; + } fam_type = &family_types[F19_CPUS]; pvt->ops = &family_types[F19_CPUS].ops; family_types[F19_CPUS].ctl_name = "F19h"; @@ -3435,51 +3608,15 @@ static const struct attribute_group *amd64_edac_attr_groups[] = { NULL }; -/* Set the number of Unified Memory Controllers in the system. */ -static void compute_num_umcs(void) -{ - u8 model = boot_cpu_data.x86_model; - - if (boot_cpu_data.x86 < 0x17) - return; - - if (model >= 0x30 && model <= 0x3f) - num_umcs = 8; - else - num_umcs = 2; - - edac_dbg(1, "Number of UMCs: %x", num_umcs); -} - -static int init_one_instance(unsigned int nid) +static int hw_info_get(struct amd64_pvt *pvt) { - struct pci_dev *F3 = node_to_amd_nb(nid)->misc; - struct amd64_family_type *fam_type = NULL; - struct mem_ctl_info *mci = NULL; - struct edac_mc_layer layers[2]; - struct amd64_pvt *pvt = NULL; u16 pci_id1, pci_id2; - int err = 0, ret; - - ret = -ENOMEM; - pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); - if (!pvt) - goto err_ret; - - pvt->mc_node_id = nid; - pvt->F3 = F3; - - ret = -EINVAL; - fam_type = per_family_init(pvt); - if (!fam_type) - goto err_free; + int ret = -EINVAL; if (pvt->fam >= 0x17) { - pvt->umc = kcalloc(num_umcs, sizeof(struct amd64_umc), GFP_KERNEL); - if (!pvt->umc) { - ret = -ENOMEM; - goto err_free; - } + pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL); + if (!pvt->umc) + return -ENOMEM; pci_id1 = fam_type->f0_id; pci_id2 = fam_type->f6_id; @@ -3488,21 +3625,37 @@ static int init_one_instance(unsigned int nid) pci_id2 = fam_type->f2_id; } - err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2); - if (err) - goto err_post_init; + ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2); + if (ret) + return ret; read_mc_regs(pvt); + return 0; +} + +static void hw_info_put(struct amd64_pvt *pvt) +{ + if (pvt->F0 || pvt->F1) + free_mc_sibling_devs(pvt); + + kfree(pvt->umc); +} + +static int init_one_instance(struct amd64_pvt *pvt) +{ + struct mem_ctl_info *mci = NULL; + struct edac_mc_layer layers[2]; + int ret = -EINVAL; + /* * We need to determine how many memory channels there are. Then use * that information for calculating the size of the dynamic instance * tables in the 'mci' structure. */ - ret = -EINVAL; pvt->channel_count = pvt->ops->early_channel_count(pvt); if (pvt->channel_count < 0) - goto err_siblings; + return ret; ret = -ENOMEM; layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; @@ -3514,24 +3667,18 @@ static int init_one_instance(unsigned int nid) * Always allocate two channels since we can have setups with DIMMs on * only one channel. Also, this simplifies handling later for the price * of a couple of KBs tops. - * - * On Fam17h+, the number of controllers may be greater than two. So set - * the size equal to the maximum number of UMCs. */ - if (pvt->fam >= 0x17) - layers[1].size = num_umcs; - else - layers[1].size = 2; + layers[1].size = fam_type->max_mcs; layers[1].is_virt_csrow = false; - mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0); + mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0); if (!mci) - goto err_siblings; + return ret; mci->pvt_info = pvt; mci->pdev = &pvt->F3->dev; - setup_mci_misc_attrs(mci, fam_type); + setup_mci_misc_attrs(mci); if (init_csrows(mci)) mci->edac_cap = EDAC_FLAG_NONE; @@ -3539,31 +3686,17 @@ static int init_one_instance(unsigned int nid) ret = -ENODEV; if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) { edac_dbg(1, "failed edac_mc_add_mc()\n"); - goto err_add_mc; + edac_mc_free(mci); + return ret; } return 0; - -err_add_mc: - edac_mc_free(mci); - -err_siblings: - free_mc_sibling_devs(pvt); - -err_post_init: - if (pvt->fam >= 0x17) - kfree(pvt->umc); - -err_free: - kfree(pvt); - -err_ret: - return ret; } static int probe_one_instance(unsigned int nid) { struct pci_dev *F3 = node_to_amd_nb(nid)->misc; + struct amd64_pvt *pvt = NULL; struct ecc_settings *s; int ret; @@ -3574,6 +3707,21 @@ static int probe_one_instance(unsigned int nid) ecc_stngs[nid] = s; + pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); + if (!pvt) + goto err_settings; + + pvt->mc_node_id = nid; + pvt->F3 = F3; + + fam_type = per_family_init(pvt); + if (!fam_type) + goto err_enable; + + ret = hw_info_get(pvt); + if (ret < 0) + goto err_enable; + if (!ecc_enabled(F3, nid)) { ret = 0; @@ -3590,7 +3738,7 @@ static int probe_one_instance(unsigned int nid) goto err_enable; } - ret = init_one_instance(nid); + ret = init_one_instance(pvt); if (ret < 0) { amd64_err("Error probing instance: %d\n", nid); @@ -3603,6 +3751,10 @@ static int probe_one_instance(unsigned int nid) return ret; err_enable: + hw_info_put(pvt); + kfree(pvt); + +err_settings: kfree(s); ecc_stngs[nid] = NULL; @@ -3629,14 +3781,13 @@ static void remove_one_instance(unsigned int nid) restore_ecc_error_reporting(s, nid, F3); - free_mc_sibling_devs(pvt); - kfree(ecc_stngs[nid]); ecc_stngs[nid] = NULL; /* Free the EDAC CORE resources */ mci->pvt_info = NULL; + hw_info_put(pvt); kfree(pvt); edac_mc_free(mci); } @@ -3669,6 +3820,7 @@ static int __init amd64_edac_init(void) { const char *owner; int err = -ENODEV; + u16 instance_num; int i; owner = edac_get_owner(); @@ -3683,8 +3835,13 @@ static int __init amd64_edac_init(void) opstate_init(); + if (hygon_f18h_m4h()) + instance_num = hygon_nb_num(); + else + instance_num = amd_nb_num(); + err = -ENOMEM; - ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL); + ecc_stngs = kcalloc(instance_num, sizeof(ecc_stngs[0]), GFP_KERNEL); if (!ecc_stngs) goto err_free; @@ -3692,9 +3849,7 @@ static int __init amd64_edac_init(void) if (!msrs) goto err_free; - compute_num_umcs(); - - for (i = 0; i < amd_nb_num(); i++) { + for (i = 0; i < instance_num; i++) { err = probe_one_instance(i); if (err) { /* unwind properly */ @@ -3744,6 +3899,7 @@ static int __init amd64_edac_init(void) static void __exit amd64_edac_exit(void) { + u16 instance_num; int i; if (pci_ctl) @@ -3757,7 +3913,12 @@ static void __exit amd64_edac_exit(void) else amd_unregister_ecc_decoder(decode_bus_error); - for (i = 0; i < amd_nb_num(); i++) + if (hygon_f18h_m4h()) + instance_num = hygon_nb_num(); + else + instance_num = amd_nb_num(); + + for (i = 0; i < instance_num; i++) remove_one_instance(i); kfree(ecc_stngs); diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index d3e65745516e9323cefb9638319620a0705f3ad0..3c82cedc4eec9cf0aef9ae99b218869b90edf332 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -96,7 +96,7 @@ /* Hardware limit on ChipSelect rows per MC and processors per system */ #define NUM_CHIPSELECTS 8 #define DRAM_RANGES 8 -#define NUM_CONTROLLERS 8 +#define NUM_CONTROLLERS 12 #define ON true #define OFF false @@ -126,6 +126,11 @@ #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446 #define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650 #define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656 +#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F0 0x14ad +#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F6 0x14b3 + +#define PCI_DEVICE_ID_HYGON_18H_M06H_DF_F0 0x14b0 +#define PCI_DEVICE_ID_HYGON_18H_M06H_DF_F6 0x14b6 /* * Function 1 - Address Map @@ -269,8 +274,11 @@ #define UMCCH_BASE_ADDR_SEC 0x10 #define UMCCH_ADDR_MASK 0x20 #define UMCCH_ADDR_MASK_SEC 0x28 +#define UMCCH_ADDR_MASK_SEC_DDR5 0x30 #define UMCCH_ADDR_CFG 0x30 +#define UMCCH_ADDR_CFG_DDR5 0x40 #define UMCCH_DIMM_CFG 0x80 +#define UMCCH_DIMM_CFG_DDR5 0x90 #define UMCCH_UMC_CFG 0x100 #define UMCCH_SDP_CTRL 0x104 #define UMCCH_ECC_CTRL 0x14C @@ -297,7 +305,9 @@ enum amd_families { F17_M30H_CPUS, F17_M60H_CPUS, F17_M70H_CPUS, + F18_M06H_CPUS, F19_CPUS, + F19_M10H_CPUS, NUM_FAMILIES, }; @@ -338,6 +348,9 @@ struct amd64_umc { u32 sdp_ctrl; /* SDP Control reg */ u32 ecc_ctrl; /* DRAM ECC Control reg */ u32 umc_cap_hi; /* Capabilities High reg */ + + /* cache the dram_type */ + enum mem_type dram_type; }; struct amd64_pvt { @@ -385,7 +398,12 @@ struct amd64_pvt { /* place to store error injection parameters prior to issue */ struct error_injection injection; - /* cache the dram_type */ + /* + * cache the dram_type + * + * NOTE: Don't use this for Family 17h and later. + * Use dram_type in struct amd64_umc instead. + */ enum mem_type dram_type; struct amd64_umc *umc; /* UMC registers */ @@ -482,9 +500,22 @@ struct low_ops { unsigned cs_mode, int cs_mask_nr); }; +struct amd64_family_flags { + /* + * Indicates that the system supports the new register offsets, etc. + * first introduced with Family 19h Model 10h. + */ + __u64 zn_regs_v2 : 1, + + __reserved : 63; +}; + struct amd64_family_type { const char *ctl_name; u16 f0_id, f1_id, f2_id, f6_id; + /* Maximum number of memory controllers per die/node. */ + u8 max_mcs; + struct amd64_family_flags flags; struct low_ops ops; }; diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index e73ca303f1a7e37ebdfbb7b877986475f585857b..d37fead570396e7b46b610f31c176f6dec370320 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -214,6 +214,9 @@ const char * const edac_mem_types[] = { [MEM_DDR4] = "Unbuffered-DDR4", [MEM_RDDR4] = "Registered-DDR4", [MEM_LRDDR4] = "Load-Reduced-DDR4-RAM", + [MEM_DDR5] = "Unbuffered-DDR5", + [MEM_RDDR5] = "Registered-DDR5", + [MEM_LRDDR5] = "Load-Reduced-DDR5-RAM", [MEM_NVDIMM] = "Non-volatile-RAM", }; EXPORT_SYMBOL_GPL(edac_mem_types); diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index 4d002121ef5c916337a6656c5f651550c0de19b7..80a1fbcfad068981b2c41017b317b4afb2aff5f5 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -1002,8 +1002,13 @@ static void decode_smca_error(struct mce *m) pr_cont(", %s.\n", smca_mce_descs[bank_type].descs[xec]); } - if (bank_type == SMCA_UMC && xec == 0 && decode_dram_ecc) - decode_dram_ecc(topology_die_id(m->extcpu), m); + if (bank_type == SMCA_UMC && xec == 0 && decode_dram_ecc) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) + decode_dram_ecc(topology_logical_die_id(m->extcpu), m); + else + decode_dram_ecc(topology_die_id(m->extcpu), m); + } } static inline void amd_decode_err_code(u16 ec) diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index f96fd8efb45a1b27f4e5be35804d36603b2f4d4b..8c5cc3284d6eda4e7b9257c64cee95c791c007b0 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -1,13 +1,21 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h processor hardware monitoring + * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h/17h + * processor hardware monitoring * * Copyright (c) 2009 Clemens Ladisch + * Copyright (c) 2020 Guenter Roeck + * + * Implementation notes: + * - CCD register address information as well as the calculation to + * convert raw register values is from https://github.com/ocerman/zenpower. + * The information is not confirmed from chip datasheets, but experiments + * suggest that it provides reasonable temperature values. */ +#include #include #include -#include #include #include #include @@ -31,22 +39,22 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); #endif /* CPUID function 0x80000001, ebx */ -#define CPUID_PKGTYPE_MASK 0xf0000000 +#define CPUID_PKGTYPE_MASK GENMASK(31, 28) #define CPUID_PKGTYPE_F 0x00000000 #define CPUID_PKGTYPE_AM2R2_AM3 0x10000000 /* DRAM controller (PCI function 2) */ #define REG_DCT0_CONFIG_HIGH 0x094 -#define DDR3_MODE 0x00000100 +#define DDR3_MODE BIT(8) /* miscellaneous (PCI function 3) */ #define REG_HARDWARE_THERMAL_CONTROL 0x64 -#define HTC_ENABLE 0x00000001 +#define HTC_ENABLE BIT(0) #define REG_REPORTED_TEMPERATURE 0xa4 #define REG_NORTHBRIDGE_CAPABILITIES 0xe8 -#define NB_CAP_HTC 0x00000400 +#define NB_CAP_HTC BIT(10) /* * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL @@ -57,8 +65,41 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); #define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET 0xd8200c64 #define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4 -/* F17h M01h Access througn SMN */ -#define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800 +/* Common for Zen CPU families (Family 17h and 18h and 19h) */ +#define ZEN_REPORTED_TEMP_CTRL_BASE 0x00059800 + +#define ZEN_CCD_TEMP(offset, x) (ZEN_REPORTED_TEMP_CTRL_BASE + \ + (offset) + ((x) * 4)) +#define ZEN_CCD_TEMP_VALID BIT(11) +#define ZEN_CCD_TEMP_MASK GENMASK(10, 0) + +#define ZEN_CUR_TEMP_SHIFT 21 +#define ZEN_CUR_TEMP_RANGE_SEL_MASK BIT(19) + +#define ZEN_SVI_BASE 0x0005A000 + +/* F17h thermal registers through SMN */ +#define F17H_M01H_SVI_TEL_PLANE0 (ZEN_SVI_BASE + 0xc) +#define F17H_M01H_SVI_TEL_PLANE1 (ZEN_SVI_BASE + 0x10) +#define F17H_M31H_SVI_TEL_PLANE0 (ZEN_SVI_BASE + 0x14) +#define F17H_M31H_SVI_TEL_PLANE1 (ZEN_SVI_BASE + 0x10) + +#define F17H_M01H_CFACTOR_ICORE 1000000 /* 1A / LSB */ +#define F17H_M01H_CFACTOR_ISOC 250000 /* 0.25A / LSB */ +#define F17H_M31H_CFACTOR_ICORE 1000000 /* 1A / LSB */ +#define F17H_M31H_CFACTOR_ISOC 310000 /* 0.31A / LSB */ + +/* F19h thermal registers through SMN */ +#define F19H_M01_SVI_TEL_PLANE0 (ZEN_SVI_BASE + 0x14) +#define F19H_M01_SVI_TEL_PLANE1 (ZEN_SVI_BASE + 0x10) + +#define F19H_M01H_CFACTOR_ICORE 1000000 /* 1A / LSB */ +#define F19H_M01H_CFACTOR_ISOC 310000 /* 0.31A / LSB */ + +struct hygon_private { + u32 index_2nd; + u32 offset_2nd; +}; struct k10temp_data { struct pci_dev *pdev; @@ -66,9 +107,19 @@ struct k10temp_data { void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); int temp_offset; u32 temp_adjust_mask; - bool show_tdie; + u32 show_temp; + bool is_zen; + u32 ccd_offset; + void *priv; }; +#define TCTL_BIT 0 +#define TDIE_BIT 1 +#define TCCD_BIT(x) ((x) + 2) + +#define HAVE_TEMP(d, channel) ((d)->show_temp & BIT(channel)) +#define HAVE_TDIE(d) HAVE_TEMP(d, TDIE_BIT) + struct tctl_offset { u8 model; char const *id; @@ -117,136 +168,178 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval) F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval); } -static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval) +static void read_tempreg_nb_zen(struct pci_dev *pdev, u32 *regval) { amd_smn_read(amd_pci_dev_to_node_id(pdev), - F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval); + ZEN_REPORTED_TEMP_CTRL_BASE, regval); } -static unsigned int get_raw_temp(struct k10temp_data *data) +static long get_raw_temp(struct k10temp_data *data) { - unsigned int temp; u32 regval; + long temp; data->read_tempreg(data->pdev, ®val); - temp = (regval >> 21) * 125; + temp = (regval >> ZEN_CUR_TEMP_SHIFT) * 125; if (regval & data->temp_adjust_mask) temp -= 49000; return temp; } -static ssize_t temp1_input_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct k10temp_data *data = dev_get_drvdata(dev); - unsigned int temp = get_raw_temp(data); - - if (temp > data->temp_offset) - temp -= data->temp_offset; - else - temp = 0; - - return sprintf(buf, "%u\n", temp); -} +static const char *k10temp_temp_label[] = { + "Tctl", + "Tdie", + "Tccd1", + "Tccd2", + "Tccd3", + "Tccd4", + "Tccd5", + "Tccd6", + "Tccd7", + "Tccd8", +}; -static ssize_t temp2_input_show(struct device *dev, - struct device_attribute *devattr, char *buf) +static int k10temp_read_labels(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) { - struct k10temp_data *data = dev_get_drvdata(dev); - unsigned int temp = get_raw_temp(data); - - return sprintf(buf, "%u\n", temp); + switch (type) { + case hwmon_temp: + *str = k10temp_temp_label[channel]; + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t temp_label_show(struct device *dev, - struct device_attribute *devattr, char *buf) +static void hygon_read_temp(struct k10temp_data *data, int channel, + u32 *regval) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - - return sprintf(buf, "%s\n", attr->index ? "Tctl" : "Tdie"); -} + struct hygon_private *h_priv; -static ssize_t temp1_max_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "%d\n", 70 * 1000); + h_priv = (struct hygon_private *)data->priv; + if ((channel - 2) < h_priv->index_2nd) + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + ZEN_CCD_TEMP(data->ccd_offset, channel - 2), + regval); + else + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + ZEN_CCD_TEMP(h_priv->offset_2nd, + channel - 2 - h_priv->index_2nd), + regval); } -static ssize_t temp_crit_show(struct device *dev, - struct device_attribute *devattr, char *buf) +static int k10temp_read_temp(struct device *dev, u32 attr, int channel, + long *val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct k10temp_data *data = dev_get_drvdata(dev); - int show_hyst = attr->index; u32 regval; - int value; - data->read_htcreg(data->pdev, ®val); - value = ((regval >> 16) & 0x7f) * 500 + 52000; - if (show_hyst) - value -= ((regval >> 24) & 0xf) * 500; - return sprintf(buf, "%d\n", value); + switch (attr) { + case hwmon_temp_input: + switch (channel) { + case 0: /* Tctl */ + *val = get_raw_temp(data); + if (*val < 0) + *val = 0; + break; + case 1: /* Tdie */ + *val = get_raw_temp(data) - data->temp_offset; + if (*val < 0) + *val = 0; + break; + case 2 ... 9: /* Tccd{1-8} */ + if (hygon_f18h_m4h()) + hygon_read_temp(data, channel, ®val); + else + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + ZEN_CCD_TEMP(data->ccd_offset, channel - 2), + ®val); + *val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000; + break; + default: + return -EOPNOTSUPP; + } + break; + case hwmon_temp_max: + *val = 70 * 1000; + break; + case hwmon_temp_crit: + data->read_htcreg(data->pdev, ®val); + *val = ((regval >> 16) & 0x7f) * 500 + 52000; + break; + case hwmon_temp_crit_hyst: + data->read_htcreg(data->pdev, ®val); + *val = (((regval >> 16) & 0x7f) + - ((regval >> 24) & 0xf)) * 500 + 52000; + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static DEVICE_ATTR_RO(temp1_input); -static DEVICE_ATTR_RO(temp1_max); -static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_crit, 0); -static SENSOR_DEVICE_ATTR_RO(temp1_crit_hyst, temp_crit, 1); - -static SENSOR_DEVICE_ATTR_RO(temp1_label, temp_label, 0); -static DEVICE_ATTR_RO(temp2_input); -static SENSOR_DEVICE_ATTR_RO(temp2_label, temp_label, 1); +static int k10temp_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + switch (type) { + case hwmon_temp: + return k10temp_read_temp(dev, attr, channel, val); + default: + return -EOPNOTSUPP; + } +} -static umode_t k10temp_is_visible(struct kobject *kobj, - struct attribute *attr, int index) +static umode_t k10temp_is_visible(const void *_data, + enum hwmon_sensor_types type, + u32 attr, int channel) { - struct device *dev = container_of(kobj, struct device, kobj); - struct k10temp_data *data = dev_get_drvdata(dev); + const struct k10temp_data *data = _data; struct pci_dev *pdev = data->pdev; u32 reg; - switch (index) { - case 0 ... 1: /* temp1_input, temp1_max */ - default: - break; - case 2 ... 3: /* temp1_crit, temp1_crit_hyst */ - if (!data->read_htcreg) - return 0; - - pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, - ®); - if (!(reg & NB_CAP_HTC)) - return 0; - - data->read_htcreg(data->pdev, ®); - if (!(reg & HTC_ENABLE)) - return 0; - break; - case 4 ... 6: /* temp1_label, temp2_input, temp2_label */ - if (!data->show_tdie) + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + if (!HAVE_TEMP(data, channel)) + return 0; + break; + case hwmon_temp_max: + if (channel || data->is_zen) + return 0; + break; + case hwmon_temp_crit: + case hwmon_temp_crit_hyst: + if (channel || !data->read_htcreg) + return 0; + + pci_read_config_dword(pdev, + REG_NORTHBRIDGE_CAPABILITIES, + ®); + if (!(reg & NB_CAP_HTC)) + return 0; + + data->read_htcreg(data->pdev, ®); + if (!(reg & HTC_ENABLE)) + return 0; + break; + case hwmon_temp_label: + /* Show temperature labels only on Zen CPUs */ + if (!data->is_zen || !HAVE_TEMP(data, channel)) + return 0; + break; + default: return 0; + } break; + default: + return 0; } - return attr->mode; + return 0444; } -static struct attribute *k10temp_attrs[] = { - &dev_attr_temp1_input.attr, - &dev_attr_temp1_max.attr, - &sensor_dev_attr_temp1_crit.dev_attr.attr, - &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, - &sensor_dev_attr_temp1_label.dev_attr.attr, - &dev_attr_temp2_input.attr, - &sensor_dev_attr_temp2_label.dev_attr.attr, - NULL -}; - -static const struct attribute_group k10temp_group = { - .attrs = k10temp_attrs, - .is_visible = k10temp_is_visible, -}; -__ATTRIBUTE_GROUPS(k10temp); - static bool has_erratum_319(struct pci_dev *pdev) { u32 pkg_type, reg_dram_cfg; @@ -281,15 +374,96 @@ static bool has_erratum_319(struct pci_dev *pdev) (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2); } -static int k10temp_probe(struct pci_dev *pdev, - const struct pci_device_id *id) +static const struct hwmon_channel_info *k10temp_info[] = { + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MAX | + HWMON_T_CRIT | HWMON_T_CRIT_HYST | + HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL), + HWMON_CHANNEL_INFO(in, + HWMON_I_INPUT | HWMON_I_LABEL, + HWMON_I_INPUT | HWMON_I_LABEL), + HWMON_CHANNEL_INFO(curr, + HWMON_C_INPUT | HWMON_C_LABEL, + HWMON_C_INPUT | HWMON_C_LABEL), + NULL +}; + +static const struct hwmon_ops k10temp_hwmon_ops = { + .is_visible = k10temp_is_visible, + .read = k10temp_read, + .read_string = k10temp_read_labels, +}; + +static const struct hwmon_chip_info k10temp_chip_info = { + .ops = &k10temp_hwmon_ops, + .info = k10temp_info, +}; + +static void k10temp_get_ccd_support(struct pci_dev *pdev, + struct k10temp_data *data, int limit) +{ + u32 regval; + int i; + + for (i = 0; i < limit; i++) { + amd_smn_read(amd_pci_dev_to_node_id(pdev), + ZEN_CCD_TEMP(data->ccd_offset, i), ®val); + if (regval & ZEN_CCD_TEMP_VALID) + data->show_temp |= BIT(TCCD_BIT(i)); + } +} + +static void k10temp_get_ccd_support_2nd(struct pci_dev *pdev, + struct k10temp_data *data, int limit) +{ + struct hygon_private *h_priv; + u32 regval; + int i; + + h_priv = (struct hygon_private *)data->priv; + for (i = h_priv->index_2nd; i < limit; i++) { + amd_smn_read(amd_pci_dev_to_node_id(pdev), + ZEN_CCD_TEMP(h_priv->offset_2nd, + i - h_priv->index_2nd), + ®val); + if (regval & ZEN_CCD_TEMP_VALID) + data->show_temp |= BIT(TCCD_BIT(i)); + } +} + +static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int unreliable = has_erratum_319(pdev); struct device *dev = &pdev->dev; + struct hygon_private *h_priv; struct k10temp_data *data; struct device *hwmon_dev; + u8 df_id; int i; + if (hygon_f18h_m4h()) { + if (get_df_id(pdev, &df_id)) { + pr_err("Get DF ID failed.\n"); + return -ENODEV; + } + + /* + * The temperature should be get from the devices + * with id < 4. + */ + if (df_id >= 4) + return 0; + } + if (unreliable) { if (!force) { dev_err(dev, @@ -305,16 +479,64 @@ static int k10temp_probe(struct pci_dev *pdev, return -ENOMEM; data->pdev = pdev; + data->show_temp |= BIT(TCTL_BIT); /* Always show Tctl */ if (boot_cpu_data.x86 == 0x15 && ((boot_cpu_data.x86_model & 0xf0) == 0x60 || (boot_cpu_data.x86_model & 0xf0) == 0x70)) { data->read_htcreg = read_htcreg_nb_f15; data->read_tempreg = read_tempreg_nb_f15; - } else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { - data->temp_adjust_mask = 0x80000; - data->read_tempreg = read_tempreg_nb_f17; - data->show_tdie = true; + } else if (boot_cpu_data.x86 == 0x17) { + data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; + data->read_tempreg = read_tempreg_nb_zen; + data->is_zen = true; + + switch (boot_cpu_data.x86_model) { + case 0x1: /* Zen */ + case 0x8: /* Zen+ */ + case 0x11: /* Zen APU */ + case 0x18: /* Zen+ APU */ + data->ccd_offset = 0x154; + k10temp_get_ccd_support(pdev, data, 4); + break; + case 0x31: /* Zen2 Threadripper */ + case 0x71: /* Zen2 */ + data->ccd_offset = 0x154; + k10temp_get_ccd_support(pdev, data, 8); + break; + } + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; + data->read_tempreg = read_tempreg_nb_zen; + data->is_zen = true; + + switch (boot_cpu_data.x86_model) { + case 0x4: + case 0x5: + data->ccd_offset = 0x154; + data->priv = devm_kzalloc(dev, sizeof(*h_priv), + GFP_KERNEL); + if (!data->priv) + return -ENOMEM; + h_priv = (struct hygon_private *)data->priv; + h_priv->offset_2nd = 0x2f8; + h_priv->index_2nd = 3; + k10temp_get_ccd_support(pdev, data, h_priv->index_2nd); + k10temp_get_ccd_support_2nd(pdev, data, 8); + break; + } + } else if (boot_cpu_data.x86 == 0x19) { + data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; + data->read_tempreg = read_tempreg_nb_zen; + data->is_zen = true; + + switch (boot_cpu_data.x86_model) { + case 0x0 ... 0x1: /* Zen3 */ + data->ccd_offset = 0x154; + k10temp_get_ccd_support(pdev, data, 8); + break; + } } else { data->read_htcreg = read_htcreg_pci; data->read_tempreg = read_tempreg_pci; @@ -325,13 +547,15 @@ static int k10temp_probe(struct pci_dev *pdev, if (boot_cpu_data.x86 == entry->model && strstr(boot_cpu_data.x86_model_id, entry->id)) { + data->show_temp |= BIT(TDIE_BIT); /* show Tdie */ data->temp_offset = entry->offset; break; } } - hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data, - k10temp_groups); + hwmon_dev = devm_hwmon_device_register_with_info(dev, "k10temp", data, + &k10temp_chip_info, + NULL); return PTR_ERR_OR_ZERO(hwmon_dev); } @@ -351,7 +575,10 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index e5fe6fe0f96ae12b8fe0ca1c5688089c2aad0033..b0fb7839a33b36a977a28bf200c17291fd4e9925 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -130,6 +130,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = { { "APMC0D0F", 0 }, { "HISI02A1", 0 }, { "HISI02A2", 0 }, + { "HYGO0010", ACCESS_INTR_MASK }, { "PHYT0003", 0 }, { } }; diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 69740a4ff1db2bd7908f4f93ce6c61e384217040..7da713d610738cb3796f62328b8b27269ad16800 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -924,8 +924,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) bool notify_imc = false; is_sb800 = true; - if ((dev->vendor == PCI_VENDOR_ID_AMD || - dev->vendor == PCI_VENDOR_ID_HYGON) && + if (dev->vendor == PCI_VENDOR_ID_AMD && dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) { u8 imc; diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index be098f15131e2a898d68879702084445b7a84da3..6e1732f582a4f9bed76aecde4474255f2197a6ab 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -2484,6 +2484,9 @@ static void __init free_iommu_resources(void) /* SB IOAPIC is always on this device in AMD systems */ #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) +/* SB IOAPIC for Hygon family 18h model 4h is on the device 0xb */ +#define IOAPIC_SB_DEVID_FAM18H_M4H ((0x00 << 8) | PCI_DEVFN(0xb, 0)) + static bool __init check_ioapic_information(void) { const char *fw_bug = FW_BUG; @@ -2509,7 +2512,12 @@ static bool __init check_ioapic_information(void) pr_err("%s: IOAPIC[%d] not in IVRS table\n", fw_bug, id); ret = false; - } else if (devid == IOAPIC_SB_DEVID) { + } else if (devid == IOAPIC_SB_DEVID || + (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18 && + boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf && + devid == IOAPIC_SB_DEVID_FAM18H_M4H)) { has_sb_ioapic = true; ret = true; } diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c index abb37659de3437fc92d0c2c37c32024792561c5f..03a0f3c25864467bd5190ea65757f30ae2e03118 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.c +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c @@ -1140,6 +1140,7 @@ static const struct ntb_dev_data dev_data[] = { static const struct pci_device_id amd_ntb_pci_tbl[] = { { PCI_VDEVICE(AMD, 0x145b), (kernel_ulong_t)&dev_data[0] }, { PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] }, + { PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] }, { 0, } }; MODULE_DEVICE_TABLE(pci, amd_ntb_pci_tbl); diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index 2ecd8752b088b6cfe1bdb97fd57511d72778c562..9bfd71358f2f9d4cd70d1edd7b8c95945be76b41 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -99,6 +99,17 @@ unsigned int mc146818_get_time(struct rtc_time *time) } EXPORT_SYMBOL_GPL(mc146818_get_time); +/* AMD systems don't allow access to AltCentury with DV1 */ +static bool apply_amd_register_a_behavior(void) +{ +#ifdef CONFIG_X86 + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + return true; +#endif + return false; +} + /* Set the current date and time in the real time clock. */ int mc146818_set_time(struct rtc_time *time) { @@ -172,7 +183,10 @@ int mc146818_set_time(struct rtc_time *time) save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); save_freq_select = CMOS_READ(RTC_FREQ_SELECT); - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + if (apply_amd_register_a_behavior()) + CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT); + else + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); #ifdef CONFIG_MACH_DECSTATION CMOS_WRITE(real_yrs, RTC_DEC_YEAR); diff --git a/include/linux/edac.h b/include/linux/edac.h index c19483b900794cd8a478f265a6873bea054ac712..4bdde1a999ce0eb70d4f53b9098488a4aa8e4ac6 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -187,6 +187,9 @@ static inline char *mc_event_error_type(const unsigned int err_type) * @MEM_RDDR4: Registered DDR4 RAM * This is a variant of the DDR4 memories. * @MEM_LRDDR4: Load-Reduced DDR4 memory. + * @MEM_DDR5: Unbuffered DDR5 RAM + * @MEM_RDDR5: Registered DDR5 RAM + * @MEM_LRDDR5: Load-Reduced DDR5 memory. * @MEM_NVDIMM: Non-volatile RAM */ enum mem_type { @@ -211,6 +214,9 @@ enum mem_type { MEM_DDR4, MEM_RDDR4, MEM_LRDDR4, + MEM_DDR5, + MEM_RDDR5, + MEM_LRDDR5, MEM_NVDIMM, }; @@ -234,6 +240,9 @@ enum mem_type { #define MEM_FLAG_DDR4 BIT(MEM_DDR4) #define MEM_FLAG_RDDR4 BIT(MEM_RDDR4) #define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4) +#define MEM_FLAG_DDR5 BIT(MEM_DDR5) +#define MEM_FLAG_RDDR5 BIT(MEM_RDDR5) +#define MEM_FLAG_LRDDR5 BIT(MEM_LRDDR5) #define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM) /** diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h index 0661af17a75846cdc320f5a41fa0295922859dcf..1e02058113944797069861023e4f58b119835451 100644 --- a/include/linux/mc146818rtc.h +++ b/include/linux/mc146818rtc.h @@ -86,6 +86,8 @@ struct cmos_rtc_board_info { /* 2 values for divider stage reset, others for "testing purposes only" */ # define RTC_DIV_RESET1 0x60 # define RTC_DIV_RESET2 0x70 + /* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */ +# define RTC_AMD_BANK_SELECT 0x10 /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */ # define RTC_RATE_SELECT 0x0F diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 4145b6c965c397b80211d9f90df92eb884ebd7c5..5771821d63be87eceb68e44f1313e73d6314980a 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2595,6 +2595,7 @@ #define PCI_VENDOR_ID_ZHAOXIN 0x1d17 #define PCI_VENDOR_ID_HYGON 0x1d94 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3 0x14b3 #define PCI_VENDOR_ID_HXT 0x1dbf