soc/intel/tigerlake: Add support to initialize DDR4 Memory

Support to configure DDR4 memory variant.
	-Add support to read SPD data based on different memory topology.
	-Initialize FSP UPD's for DQ and DQS mapping.

BUG=b:151702387

Signed-off-by: Varun Joshi <varun.joshi@intel.corp-partner.google.com>
Change-Id: I47a5dcad3ee316871a6103b9d53ef7f6fc88d7d8
Reviewed-on: https://review.coreboot.org/c/coreboot/+/39847
Reviewed-by: Furquan Shaikh <furquan@google.com>
Reviewed-by: Tim Wawrzynczak <twawrzynczak@chromium.org>
Reviewed-by: EricR Lai <ericr_lai@compal.corp-partner.google.com>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
This commit is contained in:
Varun Joshi 2020-03-23 13:24:36 -07:00 committed by Furquan Shaikh
parent c2e796290a
commit 9734325f45
2 changed files with 172 additions and 1 deletions

View File

@ -87,7 +87,44 @@ struct lpddr4x_cfg {
* the array represents DQ pin# on the memory part.
*/
uint8_t dqs_map[LPDDR4X_CHANNELS][LPDDR4X_BYTES_PER_CHANNEL];
/*
* Early Command Training Enable/Disable Control
* 1 = enable, 0 = disable
*/
uint8_t ect;
};
/* Board-specific memory configuration information for DDR4 memory variant */
struct mb_ddr4_cfg {
/*
* DQ CPU<>DRAM map:
* DDR4 memory interface has 8 DQs per channel. Each DQ consists of 8 bits(1
* byte). Thus, dq_map is represented as DDR[1-0]_DQ[7-0][7:0], where
* DDR[1-0] : DDR4 channel #
* DQ[7-0] : DQ # within the channel
* [7:0] : Bits within the DQ
*
* Index of the array represents DQ pin# on the CPU, whereas value in
* the array represents DQ pin# on the memory part.
*/
uint8_t dq_map[DDR4_CHANNELS][DDR4_BYTES_PER_CHANNEL][BITS_PER_BYTE];
/*
* DQS CPU<>DRAM map:
* DDR4 memory interface has 8 DQS pairs per channel. Thus, dqs_map is represented as
* DDR[1-0]_DQS[7-0], where
* DDR[1-0] : DDR4 channel #
* DQS[7-0] : DQS # within the channel
*
* Index of the array represents DQS pin# on the CPU, whereas value in
* the array represents DQS pin# on the memory part.
*/
uint8_t dqs_map[DDR4_CHANNELS][DDR4_BYTES_PER_CHANNEL];
/*
* Indicates whether memory is interleaved.
* Set to 1 for an interleaved design,
* set to 0 for non-interleaved design.
*/
uint8_t dq_pins_interleaved;
/*
* Early Command Training Enable/Disable Control
* 1 = enable, 0 = disable
@ -97,5 +134,7 @@ struct lpddr4x_cfg {
void meminit_lpddr4x(FSP_M_CONFIG *mem_cfg, const struct lpddr4x_cfg *board_cfg,
const struct spd_info *spd, bool half_populated);
/* Initialize DDR4 memory configurations */
void meminit_ddr4(FSP_M_CONFIG *mem_cfg, const struct mb_ddr4_cfg *board_cfg,
const struct spd_info *spd, const bool half_populated);
#endif /* _SOC_TIGERLAKE_MEMINIT_H_ */

View File

@ -296,3 +296,135 @@ void meminit_lpddr4x(FSP_M_CONFIG *mem_cfg, const struct lpddr4x_cfg *board_cfg,
board_cfg->dqs_map[i][1]);
}
}
static void read_sodimm_spd(const struct spd_info *info, struct spd_block *blk)
{
unsigned int i;
blk->addr_map[0] = info->smbus_info[0].addr_dimm0;
blk->addr_map[1] = info->smbus_info[0].addr_dimm1;
blk->addr_map[2] = info->smbus_info[1].addr_dimm0;
blk->addr_map[3] = info->smbus_info[1].addr_dimm1;
get_spd_smbus(blk);
for (i = 0; i < ARRAY_SIZE(blk->addr_map); i++) {
if (blk->addr_map[i])
print_spd_info((unsigned char *)blk->spd_array[i]);
}
}
static void ddr4_get_spd(unsigned int channel, const uintptr_t *spd_md_data,
const struct spd_block *spd_sodimm_blk,
const struct spd_info *info,
const bool half_populated, uintptr_t *spd_dimm0,
uintptr_t *spd_dimm1)
{
if (channel == 0) {
/* For mixed topology, channel 0 can only be Memory_Down */
if ((info->topology == MEMORY_DOWN) || (info->topology == MIXED)) {
*spd_dimm0 = *spd_md_data;
*spd_dimm1 = 0;
} else if (info->topology == SODIMM) {
*spd_dimm0 = (uintptr_t)spd_sodimm_blk->spd_array[0];
*spd_dimm1 = (uintptr_t)spd_sodimm_blk->spd_array[1];
} else
die("Undefined memory topology on Channel 0.\n");
} else if (channel == 1) {
if (half_populated) {
*spd_dimm0 = *spd_dimm1 = 0;
} else if (info->topology == MEMORY_DOWN) {
*spd_dimm0 = *spd_md_data;
*spd_dimm1 = 0;
/* For mixed topology, channel 1 can only be SODIMM */
} else if ((info->topology == SODIMM) || (info->topology == MIXED)) {
*spd_dimm0 = (uintptr_t)spd_sodimm_blk->spd_array[2];
*spd_dimm1 = (uintptr_t)spd_sodimm_blk->spd_array[3];
} else
die("Undefined memory topology on channel 1.\n");
} else
die("Unsupported channels.\n");
}
/* Initialize DDR4 memory configurations */
void meminit_ddr4(FSP_M_CONFIG *mem_cfg, const struct mb_ddr4_cfg *board_cfg,
const struct spd_info *info, const bool half_populated)
{
uintptr_t spd_md_data;
size_t spd_md_len;
uintptr_t spd_dimm0 = 0;
uintptr_t spd_dimm1 = 0;
struct spd_block spd_sodimm_blk;
unsigned int i;
unsigned int index = 0;
/* Early Command Training Enabled */
mem_cfg->ECT = board_cfg->ect;
mem_cfg->DqPinsInterleaved = board_cfg->dq_pins_interleaved;
if ((info->topology == MEMORY_DOWN) || (info->topology == MIXED)) {
read_md_spd(info, &spd_md_data, &spd_md_len);
mem_cfg->MemorySpdDataLen = spd_md_len;
}
if ((info->topology == SODIMM) || (info->topology == MIXED)) {
read_sodimm_spd(info, &spd_sodimm_blk);
if ((info->topology == MIXED) &&
(mem_cfg->MemorySpdDataLen != spd_sodimm_blk.len))
die("Mixed topology has incorrect length.\n");
else
mem_cfg->MemorySpdDataLen = spd_sodimm_blk.len;
}
for (i = 0; i < DDR4_CHANNELS; i++) {
ddr4_get_spd(i, &spd_md_data, &spd_sodimm_blk, info,
half_populated, &spd_dimm0, &spd_dimm1);
init_spd_upds(mem_cfg, i, spd_dimm0, spd_dimm1);
}
/*
* DDR4 memory interface has 8 DQs per channel. Each DQ consists of 8 bits (1
* byte). However, FSP UPDs for DQ Map expect a DQ pair (i.e. mapping for 2 bytes) in
* each UPD.
*
* Thus, init_dq_upds() needs to be called for every dq pair of each channel.
* DqMapCpu2DramCh0 --> dq_map[CHAN=0][0-1]
* DqMapCpu2DramCh1 --> dq_map[CHAN=0][2-3]
* DqMapCpu2DramCh2 --> dq_map[CHAN=0][4-5]
* DqMapCpu2DramCh3 --> dq_map[CHAN=0][6-7]
* DqMapCpu2DramCh4 --> dq_map[CHAN=1][0-1]
* DqMapCpu2DramCh5 --> dq_map[CHAN=1][2-3]
* DqMapCpu2DramCh6 --> dq_map[CHAN=1][4-5]
* DqMapCpu2DramCh7 --> dq_map[CHAN=1][6-7]
*/
/*
* DDR4 memory interface has 8 DQS pairs per channel. FSP UPDs for DQS Map expect a
* pair in each UPD.
*
* Thus, init_dqs_upds() needs to be called for every dqs pair of each channel.
* DqsMapCpu2DramCh0 --> dqs_map[CHAN=0][0-1]
* DqsMapCpu2DramCh1 --> dqs_map[CHAN=0][2-3]
* DqsMapCpu2DramCh2 --> dqs_map[CHAN=0][4-5]
* DqsMapCpu2DramCh3 --> dqs_map[CHAN=0][6-7]
* DqsMapCpu2DramCh4 --> dqs_map[CHAN=1][0-1]
* DqsMapCpu2DramCh5 --> dqs_map[CHAN=1][2-3]
* DqsMapCpu2DramCh6 --> dqs_map[CHAN=1][4-5]
* DqsMapCpu2DramCh7 --> dqs_map[CHAN=1][6-7]
*/
for (i = 0; i < DDR4_CHANNELS; i++) {
for (int b = 0; b < DDR4_BYTES_PER_CHANNEL; b += 2) {
if (half_populated && (i == 1)) {
init_dq_upds_empty(mem_cfg, index);
init_dqs_upds_empty(mem_cfg, index);
} else {
init_dq_upds(mem_cfg, index, board_cfg->dq_map[i][b],
board_cfg->dq_map[i][b+1]);
init_dqs_upds(mem_cfg, index, board_cfg->dqs_map[i][b],
board_cfg->dqs_map[i][b+1]);
}
index++;
}
}
}