Following patch adds resume (exit from self refresh) support for AMD K8 revF
CPUs. It handles both type of erratas on those CPUs. Signed-off-by: Rudolf Marek <r.marek@assembler.cz> Acked-by: Peter Stuge <peter@stuge.se> git-svn-id: svn://svn.coreboot.org/coreboot/trunk@4102 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
This commit is contained in:
parent
33cafe5bfb
commit
15bf50d820
|
@ -73,7 +73,11 @@ static void enable_fid_change(void)
|
|||
// dword = 0x00070000; /* enable FID/VID change */
|
||||
pci_write_config32(PCI_DEV(0, 0x18+i, 3), 0x80, dword);
|
||||
|
||||
#if HAVE_ACPI_RESUME
|
||||
dword = 0x21132113;
|
||||
#else
|
||||
dword = 0x00132113;
|
||||
#endif
|
||||
pci_write_config32(PCI_DEV(0, 0x18+i, 3), 0x84, dword);
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,189 @@
|
|||
/*
|
||||
* This file is part of the coreboot project.
|
||||
*
|
||||
* Copyright (C) 2009 Rudolf Marek <r.marek@assembler.cz>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License v2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
void exit_from_self(int controllers, const struct mem_controller *ctrl,
|
||||
struct sys_info *sysinfo)
|
||||
{
|
||||
int i;
|
||||
u32 dcl, dch;
|
||||
u32 pcidev;
|
||||
u8 bitmask;
|
||||
u8 is_post_rev_g;
|
||||
u32 cpuid;
|
||||
|
||||
for (i = 0; i < controllers; i++) {
|
||||
if (!sysinfo->ctrl_present[i])
|
||||
continue;
|
||||
/* Skip everything if I don't have any memory on this controller */
|
||||
dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
|
||||
if (!(dch & DCH_MemClkFreqVal)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
cpuid = pci_read_config32(ctrl[i].f3, 0xfc);
|
||||
is_post_rev_g = ((cpuid & 0xfff00) > 0x50f00);
|
||||
|
||||
/* ChipKill */
|
||||
dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
|
||||
if (dcl & DCL_DimmEccEn) {
|
||||
u32 mnc;
|
||||
printk_spew("ECC enabled\n");
|
||||
mnc = pci_read_config32(ctrl[i].f3, MCA_NB_CONFIG);
|
||||
mnc |= MNC_ECC_EN;
|
||||
if (dcl & DCL_Width128) {
|
||||
mnc |= MNC_CHIPKILL_EN;
|
||||
}
|
||||
pci_write_config32(ctrl[i].f3, MCA_NB_CONFIG, mnc);
|
||||
}
|
||||
|
||||
printk_debug("before resume errata #%d\n",
|
||||
(is_post_rev_g) ? 270 : 125);
|
||||
/*
|
||||
1. Restore memory controller registers as normal.
|
||||
2. Set the DisAutoRefresh bit (Dev:2x8C[18]). (270 only)
|
||||
3. Set the EnDramInit bit (Dev:2x7C[31]), clear all other bits in the same register).
|
||||
4. Wait at least 750 us.
|
||||
5. Clear the EnDramInit bit.
|
||||
6. Clear the DisAutoRefresh bit. (270 only)
|
||||
7. Read the value of Dev:2x80 and write that value back to Dev:2x80.
|
||||
8. Set the exit from the self refresh bit (Dev:2x90[1]).
|
||||
9. Clear the exit from self refresh bit immediately.
|
||||
Note: Steps 8 and 9 must be executed in a single 64-byte aligned uninterrupted instruction stream.
|
||||
*/
|
||||
|
||||
enable_lapic();
|
||||
init_timer();
|
||||
|
||||
printk_debug("before exit errata - timer enabled\n");
|
||||
|
||||
if (is_post_rev_g) {
|
||||
dcl =
|
||||
pci_read_config32(ctrl[i].f2,
|
||||
DRAM_TIMING_HIGH);
|
||||
dcl |= (1 << 18);
|
||||
pci_write_config32(ctrl[i].f2, DRAM_TIMING_HIGH,
|
||||
dcl);
|
||||
}
|
||||
|
||||
dcl = DI_EnDramInit;
|
||||
pci_write_config32(ctrl[i].f2, DRAM_INIT, dcl);
|
||||
|
||||
udelay(800);
|
||||
|
||||
printk_debug("before exit errata - after mdelay\n");
|
||||
|
||||
dcl = pci_read_config32(ctrl[i].f2, DRAM_INIT);
|
||||
dcl &= ~DI_EnDramInit;
|
||||
pci_write_config32(ctrl[i].f2, DRAM_INIT, dcl);
|
||||
|
||||
if (is_post_rev_g) {
|
||||
dcl =
|
||||
pci_read_config32(ctrl[i].f2,
|
||||
DRAM_TIMING_HIGH);
|
||||
dcl &= ~(1 << 18);
|
||||
pci_write_config32(ctrl[i].f2, DRAM_TIMING_HIGH,
|
||||
dcl);
|
||||
}
|
||||
|
||||
dcl = pci_read_config32(ctrl[i].f2, DRAM_BANK_ADDR_MAP);
|
||||
pci_write_config32(ctrl[i].f2, DRAM_BANK_ADDR_MAP, dcl);
|
||||
|
||||
/* I was unable to do that like: ctrl[i].f2->path.pci.devfn << 8 */
|
||||
pcidev =
|
||||
0x80000000 | ((((ctrl[i].node_id + 0x18) << 3) | 0x2)
|
||||
<< 8) | 0x90;
|
||||
printk_debug("pcidev is %x\n", pcidev);
|
||||
bitmask = 2;
|
||||
__asm__ __volatile__("pushl %0\n\t"
|
||||
"movw $0xcf8, %%dx\n\t"
|
||||
"out %%eax, (%%dx)\n\t"
|
||||
"movw $0xcfc, %%dx\n\t"
|
||||
"inl %%dx, %%eax\n\t"
|
||||
"orb %1, %%al\n\t"
|
||||
"not %1\n\t"
|
||||
".align 64\n\t"
|
||||
"outl %%eax, (%%dx) \n\t"
|
||||
"andb %1, %%al\n\t"
|
||||
"outl %%eax, (%%dx)\n\t"
|
||||
"popl %0\n\t"::"a"(pcidev),
|
||||
"q"(bitmask):"edx");
|
||||
}
|
||||
|
||||
printk_debug("after exit errata\n");
|
||||
|
||||
|
||||
for (i = 0; i < controllers; i++) {
|
||||
u32 dcm;
|
||||
if (!sysinfo->ctrl_present[i])
|
||||
continue;
|
||||
/* Skip everything if I don't have any memory on this controller */
|
||||
if (sysinfo->meminfo[i].dimm_mask == 0x00)
|
||||
continue;
|
||||
|
||||
printk_debug("Exiting memory from self refresh: ");
|
||||
int loops = 0;
|
||||
do {
|
||||
loops++;
|
||||
if ((loops & 1023) == 0) {
|
||||
printk_debug(".");
|
||||
}
|
||||
dcm =
|
||||
pci_read_config32(ctrl[i].f2, DRAM_CTRL_MISC);
|
||||
} while (((dcm & DCM_MemClrStatus) ==
|
||||
0) /* || ((dcm & DCM_DramEnabled) == 0) */ );
|
||||
|
||||
if (loops >= TIMEOUT_LOOPS) {
|
||||
printk_debug("timeout with with cntrl[%d]\n", i);
|
||||
continue;
|
||||
}
|
||||
|
||||
printk_debug(" done\n");
|
||||
}
|
||||
|
||||
#if HW_MEM_HOLE_SIZEK != 0
|
||||
/* init hw mem hole here */
|
||||
/* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
|
||||
set_hw_mem_hole(controllers, ctrl);
|
||||
#endif
|
||||
|
||||
/* store tom to sysinfo, and it will be used by dqs_timing */
|
||||
{
|
||||
msr_t msr;
|
||||
//[1M, TOM)
|
||||
msr = rdmsr(TOP_MEM);
|
||||
sysinfo->tom_k = ((msr.hi << 24) | (msr.lo >> 8)) >> 2;
|
||||
|
||||
//[4G, TOM2)
|
||||
msr = rdmsr(TOP_MEM2);
|
||||
sysinfo->tom2_k = ((msr.hi << 24) | (msr.lo >> 8)) >> 2;
|
||||
}
|
||||
|
||||
for (i = 0; i < controllers; i++) {
|
||||
|
||||
if (!sysinfo->ctrl_present[i])
|
||||
continue;
|
||||
|
||||
/* Skip everything if I don't have any memory on this controller */
|
||||
if (sysinfo->meminfo[i].dimm_mask == 0x00)
|
||||
continue;
|
||||
|
||||
dqs_restore_MC_NVRAM((ctrl + i)->f2);
|
||||
sysinfo->mem_trained[i] = 1; // mem was trained
|
||||
}
|
||||
}
|
|
@ -3009,12 +3009,18 @@ static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
|
|||
}
|
||||
#endif
|
||||
|
||||
#include "exit_from_self.c"
|
||||
|
||||
static void sdram_enable(int controllers, const struct mem_controller *ctrl,
|
||||
struct sys_info *sysinfo)
|
||||
{
|
||||
int i;
|
||||
|
||||
#ifdef ACPI_IS_WAKEUP_EARLY
|
||||
int suspend = acpi_is_wakeup_early();
|
||||
#else
|
||||
int suspend = 0;
|
||||
#endif
|
||||
|
||||
#if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
|
||||
unsigned cpu_f0_f1[8];
|
||||
/* FIXME: How about 32 node machine later? */
|
||||
|
@ -3060,6 +3066,14 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl,
|
|||
printk_debug("\n");
|
||||
#endif
|
||||
|
||||
/* lets override the rest of the routine */
|
||||
if (suspend) {
|
||||
printk_debug("Wakeup!\n");
|
||||
exit_from_self(controllers, ctrl, sysinfo);
|
||||
printk_debug("Mem running !\n");
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < controllers; i++) {
|
||||
uint32_t dcl, dch;
|
||||
if (!sysinfo->ctrl_present[ i ])
|
||||
|
|
|
@ -1821,10 +1821,94 @@ static void set_sysinfo_in_ram(unsigned val)
|
|||
set_htic_bit(0, val, 9);
|
||||
}
|
||||
|
||||
#ifdef S3_NVRAM_EARLY
|
||||
int s3_save_nvram_early(u32 dword, int size, int nvram_pos);
|
||||
int s3_load_nvram_early(int size, u32 *old_dword, int nvram_pos);
|
||||
#else
|
||||
int s3_save_nvram_early(u32 dword, int size, int nvram_pos) {
|
||||
}
|
||||
|
||||
int s3_load_nvram_early(int size, u32 *old_dword, int nvram_pos) {
|
||||
die("No memory NVRAM loader for DQS data! Unable to restore memory state\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
static int save_index_to_pos(unsigned int dev, int size, int index, int nvram_pos) {
|
||||
u32 dword = pci_read_config32_index_wait(dev, 0x98, index);
|
||||
|
||||
return s3_save_nvram_early(dword, size, nvram_pos);
|
||||
}
|
||||
|
||||
static int load_index_to_pos(unsigned int dev, int size, int index, int nvram_pos) {
|
||||
|
||||
u32 old_dword = pci_read_config32_index_wait(dev, 0x98, index);
|
||||
nvram_pos = s3_load_nvram_early(size, &old_dword, nvram_pos);
|
||||
pci_write_config32_index_wait(dev, 0x98, index, old_dword);
|
||||
return nvram_pos;
|
||||
}
|
||||
|
||||
static int dqs_load_MC_NVRAM_ch(unsigned int dev, int ch, int pos) {
|
||||
/* 30 bytes per channel */
|
||||
ch *= 0x20;
|
||||
pos = load_index_to_pos(dev, 4, 0x00 + ch, pos);
|
||||
pos = load_index_to_pos(dev, 4, 0x01 + ch, pos);
|
||||
pos = load_index_to_pos(dev, 4, 0x02 + ch, pos);
|
||||
pos = load_index_to_pos(dev, 1, 0x03 + ch, pos);
|
||||
pos = load_index_to_pos(dev, 4, 0x04 + ch, pos);
|
||||
pos = load_index_to_pos(dev, 4, 0x05 + ch, pos);
|
||||
pos = load_index_to_pos(dev, 4, 0x06 + ch, pos);
|
||||
pos = load_index_to_pos(dev, 1, 0x07 + ch, pos);
|
||||
pos = load_index_to_pos(dev, 1, 0x10 + ch, pos);
|
||||
pos = load_index_to_pos(dev, 1, 0x13 + ch, pos);
|
||||
pos = load_index_to_pos(dev, 1, 0x16 + ch, pos);
|
||||
pos = load_index_to_pos(dev, 1, 0x19 + ch, pos);
|
||||
return pos;
|
||||
}
|
||||
|
||||
static int dqs_save_MC_NVRAM_ch(unsigned int dev, int ch, int pos) {
|
||||
/* 30 bytes per channel */
|
||||
ch *= 0x20;
|
||||
pos = save_index_to_pos(dev, 4, 0x00 + ch, pos);
|
||||
pos = save_index_to_pos(dev, 4, 0x01 + ch, pos);
|
||||
pos = save_index_to_pos(dev, 4, 0x02 + ch, pos);
|
||||
pos = save_index_to_pos(dev, 1, 0x03 + ch, pos);
|
||||
pos = save_index_to_pos(dev, 4, 0x04 + ch, pos);
|
||||
pos = save_index_to_pos(dev, 4, 0x05 + ch, pos);
|
||||
pos = save_index_to_pos(dev, 4, 0x06 + ch, pos);
|
||||
pos = save_index_to_pos(dev, 1, 0x07 + ch, pos);
|
||||
pos = save_index_to_pos(dev, 1, 0x10 + ch, pos);
|
||||
pos = save_index_to_pos(dev, 1, 0x13 + ch, pos);
|
||||
pos = save_index_to_pos(dev, 1, 0x16 + ch, pos);
|
||||
pos = save_index_to_pos(dev, 1, 0x19 + ch, pos);
|
||||
return pos;
|
||||
}
|
||||
|
||||
static void dqs_save_MC_NVRAM(unsigned int dev) {
|
||||
int pos = 0;
|
||||
u32 reg;
|
||||
printk_debug("DQS SAVE NVRAM: %x\n", dev);
|
||||
pos = dqs_save_MC_NVRAM_ch(dev, 0, pos);
|
||||
pos = dqs_save_MC_NVRAM_ch(dev, 1, pos);
|
||||
/* save the maxasync lat here */
|
||||
reg = pci_read_config32(dev, DRAM_CONFIG_HIGH);
|
||||
pos = s3_save_nvram_early(reg, 4, pos);
|
||||
}
|
||||
|
||||
static void dqs_restore_MC_NVRAM(unsigned int dev) {
|
||||
int pos = 0;
|
||||
u32 reg;
|
||||
|
||||
printk_debug("DQS RESTORE FROM NVRAM: %x\n", dev);
|
||||
pos = dqs_load_MC_NVRAM_ch(dev, 0, pos);
|
||||
pos = dqs_load_MC_NVRAM_ch(dev, 1, pos);
|
||||
/* load the maxasync lat here */
|
||||
pos = s3_load_nvram_early(4, ®, pos);
|
||||
reg &= (DCH_MaxAsyncLat_MASK <<DCH_MaxAsyncLat_SHIFT);
|
||||
reg |= pci_read_config32(dev, DRAM_CONFIG_HIGH);
|
||||
pci_write_config32(dev, DRAM_CONFIG_HIGH, reg);
|
||||
}
|
||||
|
||||
#if MEM_TRAIN_SEQ == 0
|
||||
|
||||
|
||||
#if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
|
||||
static void dqs_timing(int controllers, const struct mem_controller *ctrl, tsc_t *tsc0, struct sys_info *sysinfo)
|
||||
#else
|
||||
|
@ -1891,6 +1975,7 @@ static void dqs_timing(int controllers, const struct mem_controller *ctrl, struc
|
|||
if(train_DqsRcvrEn(ctrl+i, 2, sysinfo)) goto out;
|
||||
printk_debug(" done\r\n");
|
||||
sysinfo->mem_trained[i]=1;
|
||||
dqs_save_MC_NVRAM((ctrl+i)->f2);
|
||||
}
|
||||
|
||||
out:
|
||||
|
|
Loading…
Reference in New Issue