lkml.org 
[lkml]   [2021]   [Oct]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v3 33/33] EDAC/amd64: Add address translation support for DF3.5
Date
From: Muralidhara M K <muralimk@amd.com>

Add support for address translation on Data Fabric version 3.5.

Add new data fabric ops and interleaving modes. Also, adjust how the
DRAM address maps are found early in the translation for certain cases.

Signed-off-by: Muralidhara M K <muralimk@amd.com>
Co-developed-by: Yazen Ghannam <yazen.ghannam@amd.com>
Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
---
Link:
https://lkml.kernel.org/r/20210630152828.162659-7-nchatrad@amd.com

v2->v3:
* New in v3. Original version at link above.
* Squashed the following into this patch:
https://lkml.kernel.org/r/20210630152828.162659-8-nchatrad@amd.com
* Dropped "df_regs" use.
* Set "df_ops" during module init.
* Dropped hard-coded Node ID values.

drivers/edac/amd64_edac.c | 224 +++++++++++++++++++++++++++++++++++++-
1 file changed, 220 insertions(+), 4 deletions(-)

diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index bc1aa6292408..9728571f9d67 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -989,6 +989,7 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
/*
* Glossary of acronyms used in address translation for Zen-based systems
*
+ * CCM = CPU Core Coherent Module
* COD = Cluster-on-Die
* CS = Coherent Slave
* DF = Data Fabric
@@ -1064,9 +1065,14 @@ enum intlv_modes {
NOHASH_2CH = 0x01,
NOHASH_4CH = 0x03,
NOHASH_8CH = 0x05,
+ NOHASH_16CH = 0x07,
+ NOHASH_32CH = 0x08,
HASH_COD4_2CH = 0x0C,
HASH_COD2_4CH = 0x0D,
HASH_COD1_8CH = 0x0E,
+ HASH_8CH = 0x1C,
+ HASH_16CH = 0x1D,
+ HASH_32CH = 0x1E,
DF2_HASH_2CH = 0x21,
};

@@ -1080,6 +1086,7 @@ struct addr_ctx {
u32 reg_limit_addr;
u32 reg_fab_id_mask0;
u32 reg_fab_id_mask1;
+ u32 reg_fab_id_mask2;
u16 cs_fabric_id;
u16 die_id_mask;
u16 socket_id_mask;
@@ -1416,6 +1423,157 @@ struct data_fabric_ops df3_ops = {
.get_component_id_mask = get_component_id_mask_df3,
};

+static int dehash_addr_df35(struct addr_ctx *ctx)
+{
+ u8 hashed_bit, intlv_ctl_64k, intlv_ctl_2M, intlv_ctl_1G;
+ u8 num_intlv_bits = ctx->intlv_num_chan;
+ u32 i;
+
+ /* Read D18F0x3F8 (DfGlobalCtrl). */
+ if (df_indirect_read_broadcast(0, 0, 0x3F8, &ctx->tmp))
+ return -EINVAL;
+
+ intlv_ctl_64k = !!((ctx->tmp >> 20) & 0x1);
+ intlv_ctl_2M = !!((ctx->tmp >> 21) & 0x1);
+ intlv_ctl_1G = !!((ctx->tmp >> 22) & 0x1);
+
+ /*
+ * CSSelect[0] = XOR of addr{8, 16, 21, 30};
+ * CSSelect[1] = XOR of addr{9, 17, 22, 31};
+ * CSSelect[2] = XOR of addr{10, 18, 23, 32};
+ * CSSelect[3] = XOR of addr{11, 19, 24, 33}; - 16 and 32 channel only
+ * CSSelect[4] = XOR of addr{12, 20, 25, 34}; - 32 channel only
+ */
+ for (i = 0; i < num_intlv_bits; i++) {
+ hashed_bit = ((ctx->ret_addr >> (8 + i)) ^
+ ((ctx->ret_addr >> (16 + i)) & intlv_ctl_64k) ^
+ ((ctx->ret_addr >> (21 + i)) & intlv_ctl_2M) ^
+ ((ctx->ret_addr >> (30 + i)) & intlv_ctl_1G));
+
+ hashed_bit &= BIT(0);
+ if (hashed_bit != ((ctx->ret_addr >> (8 + i)) & BIT(0)))
+ ctx->ret_addr ^= BIT(8 + i);
+ }
+
+ return 0;
+}
+
+static int get_intlv_mode_df35(struct addr_ctx *ctx)
+{
+ ctx->intlv_mode = (ctx->reg_base_addr >> 2) & 0x1F;
+
+ if (ctx->intlv_mode == HASH_COD4_2CH ||
+ ctx->intlv_mode == HASH_COD2_4CH ||
+ ctx->intlv_mode == HASH_COD1_8CH) {
+ ctx->make_space_for_cs_id = make_space_for_cs_id_cod_hash;
+ ctx->insert_cs_id = insert_cs_id_cod_hash;
+ ctx->dehash_addr = dehash_addr_df3;
+ } else {
+ ctx->make_space_for_cs_id = make_space_for_cs_id_simple;
+ ctx->insert_cs_id = insert_cs_id_simple;
+ ctx->dehash_addr = dehash_addr_df35;
+ }
+
+ return 0;
+}
+
+static void get_intlv_num_dies_df35(struct addr_ctx *ctx)
+{
+ ctx->intlv_num_dies = (ctx->reg_base_addr >> 7) & 0x1;
+}
+
+static u8 get_die_id_shift_df35(struct addr_ctx *ctx)
+{
+ return ctx->node_id_shift;
+}
+
+static u8 get_socket_id_shift_df35(struct addr_ctx *ctx)
+{
+ return (ctx->reg_fab_id_mask1 >> 8) & 0xF;
+}
+
+static int get_masks_df35(struct addr_ctx *ctx)
+{
+ /* Read D18F1x150 (SystemFabricIdMask0). */
+ if (df_indirect_read_broadcast(0, 1, 0x150, &ctx->reg_fab_id_mask0))
+ return -EINVAL;
+
+ /* Read D18F1x154 (SystemFabricIdMask1). */
+ if (df_indirect_read_broadcast(0, 1, 0x154, &ctx->reg_fab_id_mask1))
+ return -EINVAL;
+
+ /* Read D18F1x158 (SystemFabricIdMask2). */
+ if (df_indirect_read_broadcast(0, 1, 0x158, &ctx->reg_fab_id_mask2))
+ return -EINVAL;
+
+ ctx->node_id_shift = ctx->reg_fab_id_mask1 & 0xF;
+
+ ctx->die_id_mask = ctx->reg_fab_id_mask2 & 0xFFFF;
+
+ ctx->socket_id_mask = (ctx->reg_fab_id_mask2 >> 16) & 0xFFFF;
+
+ return 0;
+}
+
+static u16 get_dst_fabric_id_df35(struct addr_ctx *ctx)
+{
+ return ctx->reg_limit_addr & 0xFFF;
+}
+
+/* Aldebaran nodes have an arbitrary UMC to CS mapping based on physical layout. */
+u8 umc_to_cs_mapping_aldebaran[] = { 28, 20, 24, 16, 12, 4, 8, 0,
+ 6, 30, 2, 26, 22, 14, 18, 10,
+ 19, 11, 15, 7, 3, 27, 31, 23,
+ 9, 1, 5, 29, 25, 17, 21, 13};
+
+int get_umc_to_cs_mapping(struct addr_ctx *ctx)
+{
+ if (ctx->inst_id >= sizeof(umc_to_cs_mapping_aldebaran))
+ return -EINVAL;
+
+ ctx->inst_id = umc_to_cs_mapping_aldebaran[ctx->inst_id];
+
+ return 0;
+}
+
+static int get_cs_fabric_id_df35(struct addr_ctx *ctx)
+{
+ u16 nid = ctx->nid;
+
+ /* Special handling for GPU nodes.*/
+ if (nid >= amd_cpu_node_count()) {
+ if (get_umc_to_cs_mapping(ctx))
+ return -EINVAL;
+
+ /* Need to convert back to the hardware-provided Node ID. */
+ nid -= amd_cpu_node_count();
+ nid += amd_gpu_node_start_id();
+ }
+
+ ctx->cs_fabric_id = ctx->inst_id | (nid << ctx->node_id_shift);
+
+ return 0;
+}
+
+static u16 get_component_id_mask_df35(struct addr_ctx *ctx)
+{
+ return ctx->reg_fab_id_mask0 & 0xFFFF;
+}
+
+struct data_fabric_ops df3point5_ops = {
+ .get_hi_addr_offset = get_hi_addr_offset_df3,
+ .get_intlv_mode = get_intlv_mode_df35,
+ .get_intlv_addr_sel = get_intlv_addr_sel_df3,
+ .get_intlv_num_dies = get_intlv_num_dies_df35,
+ .get_intlv_num_sockets = get_intlv_num_sockets_df3,
+ .get_masks = get_masks_df35,
+ .get_die_id_shift = get_die_id_shift_df35,
+ .get_socket_id_shift = get_socket_id_shift_df35,
+ .get_dst_fabric_id = get_dst_fabric_id_df35,
+ .get_cs_fabric_id = get_cs_fabric_id_df35,
+ .get_component_id_mask = get_component_id_mask_df35,
+};
+
struct data_fabric_ops *df_ops;

static int get_blk_inst_cnt(struct addr_ctx *ctx)
@@ -1512,8 +1670,17 @@ static void get_intlv_num_chan(struct addr_ctx *ctx)
break;
case NOHASH_8CH:
case HASH_COD1_8CH:
+ case HASH_8CH:
ctx->intlv_num_chan = 3;
break;
+ case NOHASH_16CH:
+ case HASH_16CH:
+ ctx->intlv_num_chan = 4;
+ break;
+ case NOHASH_32CH:
+ case HASH_32CH:
+ ctx->intlv_num_chan = 5;
+ break;
default:
/* Valid interleaving modes where checked earlier. */
break;
@@ -1618,6 +1785,42 @@ static int addr_over_limit(struct addr_ctx *ctx)
return 0;
}

+static int find_ccm_instance_id(struct addr_ctx *ctx)
+{
+ for (ctx->inst_id = 0; ctx->inst_id < ctx->num_blk_instances; ctx->inst_id++) {
+ /* Read D18F0x44 (FabricBlockInstanceInformation0). */
+ if (df_indirect_read_instance(0, 0, 0x44, ctx->inst_id, &ctx->tmp))
+ return -EINVAL;
+
+ if (ctx->tmp == 0)
+ continue;
+
+ if ((ctx->tmp & 0xF) == 0)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+#define DF_NUM_DRAM_MAPS_AVAILABLE 16
+static int find_map_reg_by_dstfabricid(struct addr_ctx *ctx)
+{
+ u16 node_id_mask = (ctx->reg_fab_id_mask0 >> 16) & 0xFFFF;
+ u16 dst_fabric_id;
+
+ for (ctx->map_num = 0; ctx->map_num < DF_NUM_DRAM_MAPS_AVAILABLE ; ctx->map_num++) {
+ if (get_dram_addr_map(ctx))
+ continue;
+
+ dst_fabric_id = df_ops->get_dst_fabric_id(ctx);
+
+ if ((dst_fabric_id & node_id_mask) == (ctx->cs_fabric_id & node_id_mask))
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static int umc_normaddr_to_sysaddr(u64 *addr, u16 nid, u8 df_inst_id)
{
struct addr_ctx ctx;
@@ -1633,6 +1836,9 @@ static int umc_normaddr_to_sysaddr(u64 *addr, u16 nid, u8 df_inst_id)
ctx.nid = nid;
ctx.inst_id = df_inst_id;

+ if (df_ops == &df3point5_ops)
+ ctx.late_hole_remove = true;
+
if (get_blk_inst_cnt(&ctx))
return -EINVAL;

@@ -1642,11 +1848,20 @@ static int umc_normaddr_to_sysaddr(u64 *addr, u16 nid, u8 df_inst_id)
if (df_ops->get_cs_fabric_id(&ctx))
return -EINVAL;

- if (remove_dram_offset(&ctx))
- return -EINVAL;
+ /* Special handling for GPU nodes.*/
+ if (ctx.nid >= amd_cpu_node_count()) {
+ if (find_ccm_instance_id(&ctx))
+ return -EINVAL;

- if (get_dram_addr_map(&ctx))
- return -EINVAL;
+ if (find_map_reg_by_dstfabricid(&ctx))
+ return -EINVAL;
+ } else {
+ if (remove_dram_offset(&ctx))
+ return -EINVAL;
+
+ if (get_dram_addr_map(&ctx))
+ return -EINVAL;
+ }

if (df_ops->get_intlv_mode(&ctx))
return -EINVAL;
@@ -4644,6 +4859,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
pvt->ops = &family_types[F19_CPUS].ops;
family_types[F19_CPUS].ctl_name = "F19h_M30h";
}
+ df_ops = &df3point5_ops;
} else {
pvt->fam_type = &family_types[F19_CPUS];
pvt->ops = &family_types[F19_CPUS].ops;
--
2.25.1
\
 
 \ /
  Last update: 2021-10-28 20:00    [W:0.212 / U:1.140 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site