S3 code in vendorcode folder.

Change the ExecuteFinalHltInstruction to assembly code. so we can make
sure the code can run stackless.

Change-Id: I783ced6cf7c5bc29c12a37aef29077e610d8957d
Signed-off-by: Zheng Bao <zheng.bao@amd.com>
Signed-off-by: zbao <fishbaozi@gmail.com>
Reviewed-on: http://review.coreboot.org/622
Tested-by: build bot (Jenkins)
Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
This commit is contained in:
zbao 2012-04-05 13:20:50 +08:00 committed by Stefan Reinauer
parent 9bcdbf8eaa
commit 3925622638
7 changed files with 325 additions and 234 deletions

View File

@ -113,6 +113,7 @@ agesa_lib_src += $(AGESA_ROOT)/Proc/GNB/Gfx/GfxInitAtMidPost.c
agesa_lib_src += $(AGESA_ROOT)/Proc/CPU/Family/0x14/cpuF14CacheDefaults.c
agesa_lib_src += $(AGESA_ROOT)/Proc/Mem/Ps/ON/mpuon3.c
agesa_lib_src += $(AGESA_ROOT)/Proc/CPU/cahalt.c
agesa_lib_src += $(AGESA_ROOT)/Proc/CPU/cahaltasm.S
agesa_lib_src += $(AGESA_ROOT)/Proc/Mem/Tech/mt.c
agesa_lib_src += $(AGESA_ROOT)/Proc/CPU/Family/0x14/ON/F14OnMicrocodePatchTables.c
agesa_lib_src += $(AGESA_ROOT)/Proc/CPU/Family/0x14/ON/F14OnPciTables.c

View File

@ -305,7 +305,9 @@ static __inline__ __attribute__((always_inline)) unsigned long __readcr0(void)
unsigned long value;
__asm__ __volatile__ (
"mov %%cr0, %[value]"
: [value] "=a" (value));
: [value] "=a" (value)
:
: "memory");
return value;
}
@ -379,6 +381,7 @@ static __inline__ __attribute__((always_inline)) void __writecr0(unsigned long D
"mov %%eax, %%cr0"
:
: "a" (Data)
: "memory"
);
}
@ -508,13 +511,16 @@ static __inline__ __attribute__((always_inline)) void __debugbreak(void)
__asm__ __volatile__ ("int3");
}
static __inline__ __attribute__((always_inline)) void __invd(void)
{
__asm__ __volatile__ ("invd");
}
static __inline__ __attribute__((always_inline)) void __wbinvd(void)
{
__asm__ __volatile__ ("wbinvd");
}
static __inline__ __attribute__((always_inline)) void __lidt(void *Source)
{
__asm__ __volatile__("lidt %0" : : "m"(*(short*)Source));

View File

@ -106,149 +106,16 @@ ExecuteWbinvdInstruction (
*/
//----------------------------------------------------------------------------
STATIC
VOID
PrimaryCoreFunctions (AP_MTRR_SETTINGS *ApMtrrSettingsList)
{
UINT64 data;
UINT32 msrno;
// Configure the MTRRs on the AP so
// when it runs remote code it will execute
// out of RAM instead of ROM.
// Disable MTRRs and turn on modification enable bit
data = __readmsr (0xC0010010); // MTRR_SYS_CFG
data &= ~(1 << 18); // MtrrFixDramEn
data &= ~(1 << 20); // MtrrVarDramEn
data |= (1 << 19); // MtrrFixDramModEn
data |= (1 << 17); // SysUcLockEn
__writemsr (0xC0010010, data);
// Set 7FFFh-00000h and 9FFFFh-80000h as WB DRAM
__writemsr (0x250, 0x1E1E1E1E1E1E1E1E); // AMD_MTRR_FIX64k_00000
__writemsr (0x258, 0x1E1E1E1E1E1E1E1E); // AMD_MTRR_FIX16k_80000
// Set BFFFFh-A0000h, DFFFFh-C0000h as Uncacheable Memory-mapped IO
__writemsr (0x259, 0); // AMD_AP_MTRR_FIX16k_A0000
__writemsr (0x268, 0); // AMD_MTRR_FIX4k_C0000
__writemsr (0x269, 0); // AMD_MTRR_FIX4k_C8000
__writemsr (0x26A, 0); // AMD_MTRR_FIX4k_D0000
__writemsr (0x26B, 0); // AMD_MTRR_FIX4k_D8000
// Set FFFFFh-E0000h as Uncacheable Memory
for (msrno = 0x26C; msrno <= 0x26F; msrno++)
__writemsr (msrno, 0x1818181818181818);
// If IBV provided settings for Fixed-Sized MTRRs,
// overwrite the default settings.
if ((uintptr_t) ApMtrrSettingsList != 0 && (uintptr_t) ApMtrrSettingsList != 0xFFFFFFFF)
{
int index;
for (index = 0; ApMtrrSettingsList [index].MsrAddr != CPU_LIST_TERMINAL; index++)
__writemsr (ApMtrrSettingsList [index].MsrAddr, ApMtrrSettingsList [index].MsrData);
}
// restore variable MTTR6 and MTTR7 to default states
for (msrno = 0x20F; msrno <= 0x20C; msrno--) // decrement so that the pair is disable before the base is cleared
__writemsr (msrno, 0);
// Enable fixed-range and variable-range MTRRs
// Set Fixed-Range Enable (FE) and MTRR Enable (E) bits
__writemsr (0x2FF, __readmsr (0x2FF) | 0xC00);
// Enable Top-of-Memory setting
// Enable use of RdMem/WrMem bits attributes
data = __readmsr (0xC0010010); // MTRR_SYS_CFG
data |= (1 << 18); // MtrrFixDramEn
data |= (1 << 20); // MtrrVarDramEn
data &= ~(1 << 19); // MtrrFixDramModEn
__writemsr (0xC0010010, data);
}
//----------------------------------------------------------------------------
/* see cahaltasm.S
VOID
ExecuteFinalHltInstruction (
IN UINT32 SharedCore,
IN UINT32 HaltFlags,
IN AP_MTRR_SETTINGS *ApMtrrSettingsList,
IN AMD_CONFIG_PARAMS *StdHeader
)
{
int abcdRegs [4];
UINT32 cr0val;
UINT64 data;
cr0val = __readcr0 ();
if (SharedCore & 2)
{
// set CombineCr0Cd and enable cache in CR0
__writemsr (MSR_CU_CFG3, __readmsr (MSR_CU_CFG3) | 1ULL << 49);
__writecr0 (cr0val & ~0x60000000);
}
else
__writecr0 (cr0val | 0x60000000);
if (SharedCore & 1) PrimaryCoreFunctions (ApMtrrSettingsList);
// Make sure not to touch any Shared MSR from this point on
// Restore settings that were temporarily overridden for the cache as ram phase
data = __readmsr (0xC0011022); // MSR_DC_CFG
data &= ~(1 << 4); // DC_DIS_SPEC_TLB_RLD
data &= ~(1 << 8); // DIS_CLR_WBTOL2_SMC_HIT
data &= ~(1 << 13); // DIS_HW_PF
__writemsr (0xC0011022, data);
data = __readmsr (0xC0011021); // MSR_IC_CFG - C001_1021
data &= ~(1 << 9); // IC_DIS_SPEC_TLB_RLD
__writemsr (0xC0011021, data);
// AMD_DISABLE_STACK_FAMILY_HOOK
__cpuid (abcdRegs, 1);
if ((abcdRegs [0] >> 20) == 1) //-----family 10h (Hydra) only-----
{
data = __readmsr (0xC0011022);
data &= ~(1 << 4);
data &= ~(1 << 8);
data &= ~(1 << 13);
__writemsr (0xC0011022, data);
data = __readmsr (0xC0011021);
data &= ~(1 << 14);
data &= ~(1 << 9);
__writemsr (0xC0011021, data);
data = __readmsr (0xC001102A);
data &= ~(1 << 15);
data &= ~(1ull << 35);
__writemsr (0xC001102A, data);
}
else if ((abcdRegs [0] >> 20) == 6) //-----family 15h (Orochi) only-----
{
data = __readmsr (0xC0011020);
data &= ~(1 << 28);
__writemsr (0xC0011020, data);
data = __readmsr (0xC0011021);
data &= ~(1 << 9);
__writemsr (0xC0011021, data);
data = __readmsr (0xC0011022);
data &= ~(1 << 4);
data &= ~(1l << 13);
__writemsr (0xC0011022, data);
}
for (;;)
{
_disable ();
__halt ();
}
}
*/
//----------------------------------------------------------------------------

View File

@ -0,0 +1,203 @@
/*
* Copyright (c) 2011, Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Advanced Micro Devices, Inc. nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
.include "src/vendorcode/amd/agesa/f14/gcccar.inc"
.code32
.align 4
.globl ExecuteFinalHltInstruction
.type ExecuteFinalHltInstruction, @function
/* ExecuteFinalHltInstruction (
IN UINT32 HaltFlags,
IN AP_MTRR_SETTINGS *ApMtrrSettingsList,
IN AMD_CONFIG_PARAMS *StdHeader
)
*/
/* This function disables CAR. We don't care about the stack on this CPU */
ExecuteFinalHltInstruction:
movl 4(%esp), %esi /* HaltFlags*/
movl 8(%esp), %edi /* ApMtrrSettingList */
/* Do these special steps in case if the core is part of a compute unit
* Note: The following bits are family specific flags, that gets set during build time,
* and indicates things like "family cache control methodology", etc.
* esi bit0 = 0 -> not a Primary core
* esi bit0 = 1 -> Primary core
* esi bit1 = 0 -> Cache disable
* esi bit1 = 1 -> Cache enable
*/
bt $1, %esi /* .if (esi & 2h) */
jz 0f
/* Set CombineCr0Cd bit */
movl $CU_CFG3, %ecx
rdmsr
bts $(COMBINE_CR0_CD - 32), %edx
wrmsr
/* Clear the CR0.CD bit */
movl %cr0, %eax /* Make sure cache is enabled for all APs */
btr $CR0_CD, %eax
btr $CR0_NW, %eax
mov %eax, %cr0 /* Write back to CR0 */
jmp 1f /* .else */
0:
movl %cr0, %eax /* Make sure cache is disabled for all APs */
bts $CR0_CD, %eax /* Disable cache */
bts $CR0_NW, %eax
movl %eax, %cr0 /* Write back to CR0 */
1: /* .endif */
bt $0, %esi /* .if (esi & 1h) */
jz 2f
/* This core is a primary core and needs to do all the MTRRs, including shared MTRRs. */
movl %edi, %esi /* Get ApMtrrSettingList */
/* Configure the MTRRs on the AP so
* when it runs remote code it will execute
* out of RAM instead of ROM.
*/
/* Disable MTRRs and turn on modification enable bit */
movl $MTRR_SYS_CFG, %ecx
rdmsr
btr $MTRR_VAR_DRAM_EN, %eax /* Disable */
bts $MTRR_FIX_DRAM_MOD_EN, %eax /* Enable */
btr $MTRR_FIX_DRAM_EN, %eax /* Disable */
bts $SYS_UC_LOCK_EN, %eax
wrmsr
/* Setup default values for Fixed-Sized MTRRs */
/* Set 7FFFh-00000h as WB */
movl $AMD_AP_MTRR_FIX64k_00000, %ecx
movl $0x1E1E1E1E, %eax
movl %eax, %edx
wrmsr
/* Set 9FFFFh-80000h also as WB */
movl $AMD_AP_MTRR_FIX16k_80000, %ecx
wrmsr
/* Set BFFFFh-A0000h as Uncacheable Memory-mapped IO */
movl $AMD_AP_MTRR_FIX16k_A0000, %ecx
xorl %eax, %eax
xorl %edx, %edx
wrmsr
/* Set DFFFFh-C0000h as Uncacheable Memory-mapped IO */
xorl %eax, %eax
xorl %edx, %edx
movl $AMD_AP_MTRR_FIX4k_C0000, %ecx
CDLoop:
wrmsr
inc %ecx
cmp $AMD_AP_MTRR_FIX4k_D8000, %ecx
jbe CDLoop
/* Set FFFFFh-E0000h as Uncacheable Memory */
movl $0x18181818, %eax
movl %eax, %edx
mov $AMD_AP_MTRR_FIX4k_E0000, %ecx
EFLoop:
wrmsr
inc %ecx
cmp $AMD_AP_MTRR_FIX4k_F8000, %ecx
jbe EFLoop
/* If IBV provided settings for Fixed-Sized MTRRs,
* overwrite the default settings. */
cmp $0, %esi /*.if ((esi != 0) && (esi != 0FFFFFFFFh)) */
jz 4f
cmp $0xFFFFFFFF, %esi
jz 4f
5:
mov (%esi), %ecx /* (AP_MTRR_SETTINGS ptr [esi]).MsrAddr */
/* While we are not at the end of the list */
cmp $CPU_LIST_TERMINAL, %ecx /* .while (ecx != CPU_LIST_TERMINAL)*/
je 4f
/* TODO - coreboot isn't checking for valid data.
* Ensure that the MSR address is valid for Fixed-Sized MTRRs */
/*.if ( ((ecx >= AMD_AP_MTRR_FIX4k_C0000) && (ecx <= AMD_AP_MTRR_FIX4k_F8000)) || \
(ecx == AMD_AP_MTRR_FIX64k_00000) || (ecx == AMD_AP_MTRR_FIX16k_80000 ) || \
(ecx == AMD_AP_MTRR_FIX16k_A0000))
*/
mov 4(%esi), %eax /* MsrData */
mov 8(%esi), %edx /* MsrData */
wrmsr
/* .endif */
add $12, %esi /* sizeof (AP_MTRR_SETTINGS) */
jmp 5b /* .endw */
4: /* .endif */
/* restore variable MTTR6 and MTTR7 to default states */
movl $AMD_MTRR_VARIABLE_BASE6, %ecx /* clear MTRRPhysBase6 MTRRPhysMask6 */
xor %eax, %eax /* and MTRRPhysBase7 MTRRPhysMask7 */
xor %edx, %edx
cmp $10, %ecx /* .while (cl < 010h) */
jge 6f
wrmsr
inc %ecx
6: /* .endw */
/* Enable fixed-range and variable-range MTRRs */
mov $AMD_MTRR_DEFTYPE, %ecx
rdmsr
bts $MTRR_DEF_TYPE_EN, %eax /* MtrrDefTypeEn */
bts $MTRR_DEF_TYPE_FIX_EN, %eax /* MtrrDefTypeFixEn */
wrmsr
/* Enable Top-of-Memory setting */
/* Enable use of RdMem/WrMem bits attributes */
mov $MTRR_SYS_CFG, %ecx
rdmsr
bts $MTRR_VAR_DRAM_EN, %eax /* Enable */
btr $MTRR_FIX_DRAM_MOD_EN, %eax /* Disable */
bts $MTRR_FIX_DRAM_EN, %eax /* Enable */
wrmsr
bts $FLAG_IS_PRIMARY, %esi
jmp 3f /* .else ; end if primary core */
2:
xor %esi, %esi
3: /* .endif*/
/* Make sure not to touch any Shared MSR from this point on */
AMD_DISABLE_STACK_FAMILY_HOOK
xor %eax, %eax
7:
cli
hlt
jmp 7b /* ExecuteHltInstruction */
.size ExecuteFinalHltInstruction, .-ExecuteFinalHltInstruction

View File

@ -245,7 +245,7 @@ AmdS3Save (
HeapStatus = AmdS3SaveParams->StdHeader.HeapStatus;
AmdS3SaveParams->StdHeader.HeapStatus = HEAP_S3_RESUME;
AmdS3SaveParams->StdHeader.HeapBasePtr = (UINT64) HeapPtr;
AmdS3SaveParams->StdHeader.HeapBasePtr = (VOID *) HeapPtr;
for (i = 0; i < S3LATE_TABLE_SIZE; i++) {
if (HeapPtrs[i] != NULL) {

View File

@ -259,30 +259,30 @@ MemFS3GetDeviceList (
(*DeviceBlockHdrPtr)->RelativeOrMaskOffset = (UINT16) AllocHeapParams.RequestedBufferSize;
// Copy device list on the stack to the heap.
BufferOffset = sizeof (DEVICE_BLOCK_HEADER) + (UINT64) AllocHeapParams.BufferPtr;
BufferOffset = sizeof (DEVICE_BLOCK_HEADER) + (UINT64) (UINT32) AllocHeapParams.BufferPtr;
for (Die = 0; Die < DieCount; Die ++) {
for (i = PRESELFREF; i <= POSTSELFREF; i ++) {
// Copy PCI device descriptor to the heap if it exists.
if (DeviceDescript[Die].PCIDevice[i].RegisterListID != 0xFFFFFFFF) {
LibAmdMemCopy ((VOID *) BufferOffset, &(DeviceDescript[Die].PCIDevice[i]), sizeof (PCI_DEVICE_DESCRIPTOR), StdHeader);
LibAmdMemCopy ((VOID *)(UINT32) BufferOffset, &(DeviceDescript[Die].PCIDevice[i]), sizeof (PCI_DEVICE_DESCRIPTOR), StdHeader);
(*DeviceBlockHdrPtr)->NumDevices ++;
BufferOffset += sizeof (PCI_DEVICE_DESCRIPTOR);
}
// Copy conditional PCI device descriptor to the heap if it exists.
if (DeviceDescript[Die].CPCIDevice[i].RegisterListID != 0xFFFFFFFF) {
LibAmdMemCopy ((VOID *) BufferOffset, &(DeviceDescript[Die].CPCIDevice[i]), sizeof (CONDITIONAL_PCI_DEVICE_DESCRIPTOR), StdHeader);
LibAmdMemCopy ((VOID *)(UINT32) BufferOffset, &(DeviceDescript[Die].CPCIDevice[i]), sizeof (CONDITIONAL_PCI_DEVICE_DESCRIPTOR), StdHeader);
(*DeviceBlockHdrPtr)->NumDevices ++;
BufferOffset += sizeof (CONDITIONAL_PCI_DEVICE_DESCRIPTOR);
}
// Copy MSR device descriptor to the heap if it exists.
if (DeviceDescript[Die].MSRDevice[i].RegisterListID != 0xFFFFFFFF) {
LibAmdMemCopy ((VOID *) BufferOffset, &(DeviceDescript[Die].MSRDevice[i]), sizeof (MSR_DEVICE_DESCRIPTOR), StdHeader);
LibAmdMemCopy ((VOID *)(UINT32) BufferOffset, &(DeviceDescript[Die].MSRDevice[i]), sizeof (MSR_DEVICE_DESCRIPTOR), StdHeader);
(*DeviceBlockHdrPtr)->NumDevices ++;
BufferOffset += sizeof (MSR_DEVICE_DESCRIPTOR);
}
// Copy conditional MSR device descriptor to the heap if it exists.
if (DeviceDescript[Die].CMSRDevice[i].RegisterListID != 0xFFFFFFFF) {
LibAmdMemCopy ((VOID *) BufferOffset, &(DeviceDescript[Die].PCIDevice[i]), sizeof (CONDITIONAL_MSR_DEVICE_DESCRIPTOR), StdHeader);
LibAmdMemCopy ((VOID *)(UINT32) BufferOffset, &(DeviceDescript[Die].PCIDevice[i]), sizeof (CONDITIONAL_MSR_DEVICE_DESCRIPTOR), StdHeader);
(*DeviceBlockHdrPtr)->NumDevices ++;
BufferOffset += sizeof (CONDITIONAL_MSR_DEVICE_DESCRIPTOR);
}

View File

@ -61,6 +61,20 @@ AMD_MTRR_FIX4k_E8000 = 0x026D
AMD_MTRR_FIX4k_F0000 = 0x026E
AMD_MTRR_FIX4k_F8000 = 0x026F
/* Reproduced from AGESA.h */
AMD_AP_MTRR_FIX64k_00000 = 0x00000250
AMD_AP_MTRR_FIX16k_80000 = 0x00000258
AMD_AP_MTRR_FIX16k_A0000 = 0x00000259
AMD_AP_MTRR_FIX4k_C0000 = 0x00000268
AMD_AP_MTRR_FIX4k_C8000 = 0x00000269
AMD_AP_MTRR_FIX4k_D0000 = 0x0000026A
AMD_AP_MTRR_FIX4k_D8000 = 0x0000026B
AMD_AP_MTRR_FIX4k_E0000 = 0x0000026C
AMD_AP_MTRR_FIX4k_E8000 = 0x0000026D
AMD_AP_MTRR_FIX4k_F0000 = 0x0000026E
AMD_AP_MTRR_FIX4k_F8000 = 0x0000026F
CPU_LIST_TERMINAL = 0xFFFFFFFF
AMD_MTRR_DEFTYPE = 0x02FF
WB_DRAM_TYPE = 0x1E /* MemType - memory type */
MTRR_DEF_TYPE_EN = 11 /* MtrrDefTypeEn - variable and fixed MTRRs default enabled */