lkml.org 
[lkml]   [2022]   [Feb]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v7 04/12] EDAC/amd64: Move struct fam_type variables into amd64_pvt structure
Date
From: Muralidhara M K <muralimk@amd.com>

On heterogeneous systems, the GPU nodes are probed after the CPU
nodes and will overwrites the family type set by CPU nodes.

Removed struct fam_type and made all family-specific assignments
dynamic and get rid of static definitions of family_types and ops,
This would simplify adding support for future platforms.

Signed-off-by: Muralidhara M K <muralimk@amd.com>
---
Link:
https://lkml.kernel.org/r/20211028130106.15701-5-nchatrad@amd.com

v6->v7:
* rebased on top of\
https://lore.kernel.org/all/20220202144307.2678405-1-yazen.ghannam@amd.com/

v4->v5:
* Added reviewed by Yazen

v1->v4:
* New change in v4


drivers/edac/amd64_edac.c | 384 +++++++++++++-------------------------
drivers/edac/amd64_edac.h | 64 +++----
2 files changed, 153 insertions(+), 295 deletions(-)

diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 782e286d5390..4cac43840ccc 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -13,11 +13,9 @@ module_param(ecc_enable_override, int, 0644);

static struct msr __percpu *msrs;

-static struct amd64_family_type *fam_type;
-
-static inline u32 get_umc_reg(u32 reg)
+static inline u32 get_umc_reg(u32 reg, struct amd64_pvt *pvt)
{
- if (!fam_type->flags.zn_regs_v2)
+ if (!pvt->flags.zn_regs_v2)
return reg;

switch (reg) {
@@ -463,7 +461,7 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
for (i = 0; i < pvt->csels[dct].m_cnt; i++)

#define for_each_umc(i) \
- for (i = 0; i < fam_type->max_mcs; i++)
+ for (i = 0; i < pvt->max_mcs; i++)

/*
* @input_addr is an InputAddr associated with the node given by mci. Return the
@@ -1859,7 +1857,7 @@ static void __dump_misc_regs_df(struct amd64_pvt *pvt)

if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) {
amd_smn_read(pvt->mc_node_id,
- umc_base + get_umc_reg(UMCCH_ADDR_CFG),
+ umc_base + get_umc_reg(UMCCH_ADDR_CFG, pvt),
&tmp);
edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
i, 1 << ((tmp >> 4) & 0x3));
@@ -1935,7 +1933,7 @@ static void prep_chip_selects(struct amd64_pvt *pvt)

for_each_umc(umc) {
pvt->csels[umc].b_cnt = 4;
- pvt->csels[umc].m_cnt = fam_type->flags.zn_regs_v2 ? 4 : 2;
+ pvt->csels[umc].m_cnt = pvt->flags.zn_regs_v2 ? 4 : 2;
}

} else {
@@ -1975,7 +1973,7 @@ static void read_umc_base_mask(struct amd64_pvt *pvt)
}

umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
- umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(UMCCH_ADDR_MASK_SEC);
+ umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(UMCCH_ADDR_MASK_SEC, pvt);

for_each_chip_select_mask(cs, umc, pvt) {
mask = &pvt->csels[umc].csmasks[cs];
@@ -2046,7 +2044,7 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
}
}

-static void _determine_memory_type_df(struct amd64_umc *umc)
+static void _determine_memory_type_df(struct amd64_pvt *pvt, struct amd64_umc *umc)
{
if (!(umc->sdp_ctrl & UMC_SDP_INIT)) {
umc->dram_type = MEM_EMPTY;
@@ -2057,7 +2055,7 @@ static void _determine_memory_type_df(struct amd64_umc *umc)
* Check if the system supports the "DDR Type" field in UMC Config
* and has DDR5 DIMMs in use.
*/
- if (fam_type->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) {
+ if (pvt->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) {
if (umc->dimm_cfg & BIT(5))
umc->dram_type = MEM_LRDDR5;
else if (umc->dimm_cfg & BIT(4))
@@ -2082,7 +2080,7 @@ static void determine_memory_type_df(struct amd64_pvt *pvt)
for_each_umc(i) {
umc = &pvt->umc[i];

- _determine_memory_type_df(umc);
+ _determine_memory_type_df(pvt, umc);
edac_dbg(1, " UMC%d DIMM type: %s\n", i, edac_mem_types[umc->dram_type]);
}
}
@@ -2648,7 +2646,7 @@ static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
*/
dimm = csrow_nr >> 1;

- if (!fam_type->flags.zn_regs_v2)
+ if (!pvt->flags.zn_regs_v2)
cs_mask_nr >>= 1;

/* Asymmetric dual-rank DIMM support. */
@@ -3268,167 +3266,6 @@ static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
}
}

-static struct amd64_family_type family_types[] = {
- [K8_CPUS] = {
- .ctl_name = "K8",
- .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
- .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = k8_early_channel_count,
- .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
- .dbam_to_cs = k8_dbam_to_chip_select,
- }
- },
- [F10_CPUS] = {
- .ctl_name = "F10h",
- .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
- .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f10_dbam_to_chip_select,
- }
- },
- [F15_CPUS] = {
- .ctl_name = "F15h",
- .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
- .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f15_dbam_to_chip_select,
- }
- },
- [F15_M30H_CPUS] = {
- .ctl_name = "F15h_M30h",
- .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
- .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f16_dbam_to_chip_select,
- }
- },
- [F15_M60H_CPUS] = {
- .ctl_name = "F15h_M60h",
- .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
- .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f15_m60h_dbam_to_chip_select,
- }
- },
- [F16_CPUS] = {
- .ctl_name = "F16h",
- .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
- .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f16_dbam_to_chip_select,
- }
- },
- [F16_M30H_CPUS] = {
- .ctl_name = "F16h_M30h",
- .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
- .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f16_dbam_to_chip_select,
- }
- },
- [F17_CPUS] = {
- .ctl_name = "F17h",
- .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_addr_mask_to_cs_size,
- }
- },
- [F17_M10H_CPUS] = {
- .ctl_name = "F17h_M10h",
- .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_addr_mask_to_cs_size,
- }
- },
- [F17_M30H_CPUS] = {
- .ctl_name = "F17h_M30h",
- .f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
- .max_mcs = 8,
- .ops = {
- .early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_addr_mask_to_cs_size,
- }
- },
- [F17_M60H_CPUS] = {
- .ctl_name = "F17h_M60h",
- .f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_addr_mask_to_cs_size,
- }
- },
- [F17_M70H_CPUS] = {
- .ctl_name = "F17h_M70h",
- .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_addr_mask_to_cs_size,
- }
- },
- [F19_CPUS] = {
- .ctl_name = "F19h",
- .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
- .max_mcs = 8,
- .ops = {
- .early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_addr_mask_to_cs_size,
- }
- },
- [F19_M10H_CPUS] = {
- .ctl_name = "F19h_M10h",
- .f0_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F6,
- .max_mcs = 12,
- .flags.zn_regs_v2 = 1,
- .ops = {
- .early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_addr_mask_to_cs_size,
- }
- },
- [F19_M50H_CPUS] = {
- .ctl_name = "F19h_M50h",
- .f0_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F6,
- .max_mcs = 2,
- .ops = {
- .early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_addr_mask_to_cs_size,
- }
- },
-};
-
/*
* These are tables of eigenvectors (one per line) which can be used for the
* construction of the syndrome tables. The modified syndrome search algorithm
@@ -3850,7 +3687,7 @@ static void __read_mc_regs_df(struct amd64_pvt *pvt)
umc_base = get_umc_base(i);
umc = &pvt->umc[i];

- amd_smn_read(nid, umc_base + get_umc_reg(UMCCH_DIMM_CFG), &umc->dimm_cfg);
+ amd_smn_read(nid, umc_base + get_umc_reg(UMCCH_DIMM_CFG, pvt), &umc->dimm_cfg);
amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
@@ -4380,7 +4217,7 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci)

mci->edac_cap = determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
- mci->ctl_name = fam_type->ctl_name;
+ mci->ctl_name = pvt->ctl_name;
mci->dev_name = pci_name(pvt->F3);
mci->ctl_page_to_phys = NULL;

@@ -4392,7 +4229,7 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
/*
* returns a pointer to the family descriptor on success, NULL otherwise.
*/
-static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
+static void per_family_init(struct amd64_pvt *pvt)
{
pvt->ext_model = boot_cpu_data.x86_model >> 4;
pvt->stepping = boot_cpu_data.x86_stepping;
@@ -4401,109 +4238,150 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)

switch (pvt->fam) {
case 0xf:
- fam_type = &family_types[K8_CPUS];
- pvt->ops = &family_types[K8_CPUS].ops;
+ pvt->ctl_name = "K8";
+ pvt->f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL;
+ pvt->max_mcs = 2;
+ pvt->ops->early_channel_count = k8_early_channel_count;
+ pvt->ops->map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow;
+ pvt->ops->dbam_to_cs = k8_dbam_to_chip_select;
break;

case 0x10:
- fam_type = &family_types[F10_CPUS];
- pvt->ops = &family_types[F10_CPUS].ops;
+ pvt->ctl_name = "F10h";
+ pvt->f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM;
+ pvt->max_mcs = 2;
+ pvt->ops->early_channel_count = f1x_early_channel_count;
+ pvt->ops->map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow;
+ pvt->ops->dbam_to_cs = f10_dbam_to_chip_select;
break;

case 0x15:
if (pvt->model == 0x30) {
- fam_type = &family_types[F15_M30H_CPUS];
- pvt->ops = &family_types[F15_M30H_CPUS].ops;
- break;
+ pvt->ctl_name = "F15h_M30h";
+ pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2;
+ pvt->ops->dbam_to_cs = f16_dbam_to_chip_select;
} else if (pvt->model == 0x60) {
- fam_type = &family_types[F15_M60H_CPUS];
- pvt->ops = &family_types[F15_M60H_CPUS].ops;
- break;
- /* Richland is only client */
+ pvt->ctl_name = "F15h_M60h";
+ pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2;
+ pvt->ops->dbam_to_cs = f15_m60h_dbam_to_chip_select;
} else if (pvt->model == 0x13) {
- return NULL;
+ /* Richland is only client */
+ return;
} else {
- fam_type = &family_types[F15_CPUS];
- pvt->ops = &family_types[F15_CPUS].ops;
+ pvt->ctl_name = "F15h";
+ pvt->f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2;
+ pvt->ops->dbam_to_cs = f15_dbam_to_chip_select;
}
+ pvt->max_mcs = 2;
+ pvt->ops->early_channel_count = f1x_early_channel_count;
+ pvt->ops->map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow;
break;

case 0x16:
if (pvt->model == 0x30) {
- fam_type = &family_types[F16_M30H_CPUS];
- pvt->ops = &family_types[F16_M30H_CPUS].ops;
- break;
+ pvt->ctl_name = "F16h_M30h";
+ pvt->f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2;
+ } else {
+ pvt->ctl_name = "F16h";
+ pvt->f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2;
}
- fam_type = &family_types[F16_CPUS];
- pvt->ops = &family_types[F16_CPUS].ops;
+ pvt->max_mcs = 2;
+ pvt->ops->early_channel_count = f1x_early_channel_count;
+ pvt->ops->map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow;
+ pvt->ops->dbam_to_cs = f16_dbam_to_chip_select;
break;

case 0x17:
if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
- fam_type = &family_types[F17_M10H_CPUS];
- pvt->ops = &family_types[F17_M10H_CPUS].ops;
- df_ops = &df2_ops;
- break;
+ pvt->ctl_name = "F17h_M10h";
+ pvt->f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0;
+ pvt->f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6;
+ pvt->max_mcs = 2;
+ df_ops = &df2_ops;
} else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
- fam_type = &family_types[F17_M30H_CPUS];
- pvt->ops = &family_types[F17_M30H_CPUS].ops;
- df_ops = &df3_ops;
- break;
+ pvt->ctl_name = "F17h_M30h";
+ pvt->f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0;
+ pvt->f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6;
+ pvt->max_mcs = 8;
+ df_ops = &df3_ops;
} else if (pvt->model >= 0x60 && pvt->model <= 0x6f) {
- fam_type = &family_types[F17_M60H_CPUS];
- pvt->ops = &family_types[F17_M60H_CPUS].ops;
- df_ops = &df3_ops;
- break;
+ pvt->ctl_name = "F17h_M60h";
+ pvt->f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0;
+ pvt->f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6;
+ pvt->max_mcs = 2;
+ df_ops = &df3_ops;
} else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
- fam_type = &family_types[F17_M70H_CPUS];
- pvt->ops = &family_types[F17_M70H_CPUS].ops;
- df_ops = &df3_ops;
- break;
+ pvt->ctl_name = "F17h_M70h";
+ pvt->f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0;
+ pvt->f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6;
+ pvt->max_mcs = 2;
+ df_ops = &df3_ops;
+ } else {
+ pvt->ctl_name = "F17h";
+ pvt->f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0;
+ pvt->f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6;
+ pvt->max_mcs = 2;
+ df_ops = &df2_ops;
}
fallthrough;
case 0x18:
- fam_type = &family_types[F17_CPUS];
- pvt->ops = &family_types[F17_CPUS].ops;
- df_ops = &df2_ops;
-
- if (pvt->fam == 0x18)
- family_types[F17_CPUS].ctl_name = "F18h";
+ pvt->ops->early_channel_count = f17_early_channel_count;
+ pvt->ops->dbam_to_cs = f17_addr_mask_to_cs_size;
+
+ if (pvt->fam == 0x18) {
+ pvt->ctl_name = "F18h";
+ pvt->f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0;
+ pvt->f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6;
+ pvt->max_mcs = 2;
+ df_ops = &df2_ops;
+ }
break;

case 0x19:
if (pvt->model >= 0x10 && pvt->model <= 0x1f) {
- fam_type = &family_types[F19_M10H_CPUS];
- pvt->ops = &family_types[F19_M10H_CPUS].ops;
- break;
+ pvt->ctl_name = "F19h_M10h";
+ pvt->f0_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F0;
+ pvt->f6_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F6;
+ pvt->max_mcs = 12;
+ pvt->flags.zn_regs_v2 = 1;
} else if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
- fam_type = &family_types[F17_M70H_CPUS];
- pvt->ops = &family_types[F17_M70H_CPUS].ops;
- fam_type->ctl_name = "F19h_M20h";
- df_ops = &df3_ops;
- break;
+ pvt->ctl_name = "F19h_M20h";
+ pvt->f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0;
+ pvt->f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6;
+ pvt->max_mcs = 2;
+ df_ops = &df3_ops;
} else if (pvt->model >= 0x50 && pvt->model <= 0x5f) {
- fam_type = &family_types[F19_M50H_CPUS];
- pvt->ops = &family_types[F19_M50H_CPUS].ops;
- fam_type->ctl_name = "F19h_M50h";
- break;
+ pvt->ctl_name = "F19h_M50h";
+ pvt->f0_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F0;
+ pvt->f6_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F6;
+ pvt->max_mcs = 2;
} else if (pvt->model >= 0xa0 && pvt->model <= 0xaf) {
- fam_type = &family_types[F19_M10H_CPUS];
- pvt->ops = &family_types[F19_M10H_CPUS].ops;
- fam_type->ctl_name = "F19h_MA0h";
- break;
+ pvt->ctl_name = "F19h_M10h";
+ pvt->f0_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F0;
+ pvt->f6_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F6;
+ pvt->max_mcs = 2;
+ } else {
+ pvt->ctl_name = "F19h";
+ pvt->f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0;
+ pvt->f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6;
+ pvt->max_mcs = 8;
+ df_ops = &df3_ops;
}
- fam_type = &family_types[F19_CPUS];
- pvt->ops = &family_types[F19_CPUS].ops;
- family_types[F19_CPUS].ctl_name = "F19h";
- df_ops = &df3_ops;
+ pvt->ops->early_channel_count = f17_early_channel_count;
+ pvt->ops->dbam_to_cs = f17_addr_mask_to_cs_size;
break;

default:
amd64_err("Unsupported family!\n");
- return NULL;
+ return;
}
-
- return fam_type;
}

static const struct attribute_group *amd64_edac_attr_groups[] = {
@@ -4520,15 +4398,15 @@ static int hw_info_get(struct amd64_pvt *pvt)
int ret;

if (pvt->fam >= 0x17) {
- pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
+ pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
if (!pvt->umc)
return -ENOMEM;

- pci_id1 = fam_type->f0_id;
- pci_id2 = fam_type->f6_id;
+ pci_id1 = pvt->f0_id;
+ pci_id2 = pvt->f6_id;
} else {
- pci_id1 = fam_type->f1_id;
- pci_id2 = fam_type->f2_id;
+ pci_id1 = pvt->f1_id;
+ pci_id2 = pvt->f2_id;
}

ret = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
@@ -4574,7 +4452,7 @@ static int init_one_instance(struct amd64_pvt *pvt)
* only one channel. Also, this simplifies handling later for the price
* of a couple of KBs tops.
*/
- layers[1].size = fam_type->max_mcs;
+ layers[1].size = pvt->max_mcs;
layers[1].is_virt_csrow = false;

mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
@@ -4604,7 +4482,7 @@ static bool instance_has_memory(struct amd64_pvt *pvt)
bool cs_enabled = false;
int cs = 0, dct = 0;

- for (dct = 0; dct < fam_type->max_mcs; dct++) {
+ for (dct = 0; dct < pvt->max_mcs; dct++) {
for_each_chip_select(cs, dct, pvt)
cs_enabled |= csrow_enabled(cs, dct, pvt);
}
@@ -4633,10 +4511,12 @@ static int probe_one_instance(unsigned int nid)
pvt->mc_node_id = nid;
pvt->F3 = F3;

+ pvt->ops = kzalloc(sizeof(*pvt->ops), GFP_KERNEL);
+ if (!pvt->ops)
+ goto err_out;
+
ret = -ENODEV;
- fam_type = per_family_init(pvt);
- if (!fam_type)
- goto err_enable;
+ per_family_init(pvt);

ret = hw_info_get(pvt);
if (ret < 0)
@@ -4674,8 +4554,8 @@ static int probe_one_instance(unsigned int nid)
goto err_enable;
}

- amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
- (pvt->fam == 0xf ?
+ amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name,
+ (pvt->fam == 0xf ?
(pvt->ext_model >= K8_REV_F ? "revF or later "
: "revE or earlier ")
: ""), pvt->mc_node_id);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 6a112270a84b..4e3f9755bc73 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -291,25 +291,6 @@

#define UMC_SDP_INIT BIT(31)

-enum amd_families {
- K8_CPUS = 0,
- F10_CPUS,
- F15_CPUS,
- F15_M30H_CPUS,
- F15_M60H_CPUS,
- F16_CPUS,
- F16_M30H_CPUS,
- F17_CPUS,
- F17_M10H_CPUS,
- F17_M30H_CPUS,
- F17_M60H_CPUS,
- F17_M70H_CPUS,
- F19_CPUS,
- F19_M10H_CPUS,
- F19_M50H_CPUS,
- NUM_FAMILIES,
-};
-
/* Error injection control structure */
struct error_injection {
u32 section;
@@ -352,6 +333,16 @@ struct amd64_umc {
enum mem_type dram_type;
};

+struct amd64_family_flags {
+ /*
+ * Indicates that the system supports the new register offsets, etc.
+ * first introduced with Family 19h Model 10h.
+ */
+ __u64 zn_regs_v2 : 1,
+
+ __reserved : 63;
+};
+
struct amd64_pvt {
struct low_ops *ops;

@@ -394,6 +385,12 @@ struct amd64_pvt {
/* x4, x8, or x16 syndromes in use */
u8 ecc_sym_sz;

+ const char *ctl_name;
+ u16 f0_id, f1_id, f2_id, f6_id;
+ /* Maximum number of memory controllers per die/node. */
+ u8 max_mcs;
+
+ struct amd64_family_flags flags;
/* place to store error injection parameters prior to issue */
struct error_injection injection;

@@ -479,30 +476,11 @@ struct ecc_settings {
* functions and per device encoding/decoding logic.
*/
struct low_ops {
- int (*early_channel_count) (struct amd64_pvt *pvt);
- void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
- struct err_info *);
- int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct,
- unsigned cs_mode, int cs_mask_nr);
-};
-
-struct amd64_family_flags {
- /*
- * Indicates that the system supports the new register offsets, etc.
- * first introduced with Family 19h Model 10h.
- */
- __u64 zn_regs_v2 : 1,
-
- __reserved : 63;
-};
-
-struct amd64_family_type {
- const char *ctl_name;
- u16 f0_id, f1_id, f2_id, f6_id;
- /* Maximum number of memory controllers per die/node. */
- u8 max_mcs;
- struct amd64_family_flags flags;
- struct low_ops ops;
+ int (*early_channel_count)(struct amd64_pvt *pvt);
+ void (*map_sysaddr_to_csrow)(struct mem_ctl_info *mci, u64 sys_addr,
+ struct err_info *err);
+ int (*dbam_to_cs)(struct amd64_pvt *pvt, u8 dct,
+ unsigned int cs_mode, int cs_mask_nr);
};

int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
--
2.25.1
\
 
 \ /
  Last update: 2022-02-03 18:51    [W:0.394 / U:0.060 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site