Whitespace and other code cleanup in peperation for AMD Barcelona support.

Signed-off-by: Marc Jones <marc.jones@amd.com>
Reviewed-by: Jordan Crouse <jordan.crouse@amd.com>
Acked-by: Myles Watson <myles@pel.cs.byu.edu>



git-svn-id: svn://svn.coreboot.org/coreboot/trunk@3013 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
This commit is contained in:
Marc Jones 2007-12-19 00:47:09 +00:00
parent 244dd82fd6
commit 2006b38fed
2 changed files with 205 additions and 209 deletions

View File

@ -1,8 +1,25 @@
/* by yhlu 6.2005 */ /*
/* yhlu 2005.12 make it support HDT Memory Debuggers with Disassmbly, please select the PCI Bus mem for Phys Type*/ * This file is part of the LinuxBIOS project.
/* yhlu 2006.3 copy data from cache to ram and reserve 0x1000 for global variables */ *
* Copyright (C) 2005-2007 Advanced Micro Devices, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#define CacheSize DCACHE_RAM_SIZE #define CacheSize DCACHE_RAM_SIZE
#define CacheBase (0xd0000 - CacheSize) #define CacheBase (0xd0000 - CacheSize)
/* leave some space for global variable to pass to RAM stage */ /* leave some space for global variable to pass to RAM stage */
#define GlobalVarSize DCACHE_RAM_GLOBAL_VAR_SIZE #define GlobalVarSize DCACHE_RAM_GLOBAL_VAR_SIZE
@ -10,187 +27,187 @@
#include <cpu/amd/mtrr.h> #include <cpu/amd/mtrr.h>
/* Save the BIST result */ /* Save the BIST result */
movl %eax, %ebp movl %eax, %ebp
/*for normal part %ebx already contain cpu_init_detected from fallback call */ /* for normal part %ebx already contain cpu_init_detected from fallback call */
cache_as_ram_setup: cache_as_ram_setup:
/* hope we can skip the double set for normal part */ /* hope we can skip the double set for normal part */
#if ((HAVE_FAILOVER_BOOT==1) && (USE_FAILOVER_IMAGE==1)) || ((HAVE_FAILOVER_BOOT==0) && (USE_FALLBACK_IMAGE==1)) #if ((HAVE_FAILOVER_BOOT == 1) && (USE_FAILOVER_IMAGE == 1)) || ((HAVE_FAILOVER_BOOT == 0) && (USE_FALLBACK_IMAGE == 1))
/* check if cpu_init_detected */ /* check if cpu_init_detected */
movl $MTRRdefType_MSR, %ecx movl $MTRRdefType_MSR, %ecx
rdmsr rdmsr
andl $0x00000800, %eax andl $(1 << 11), %eax
movl %eax, %ebx /* We store the status */ movl %eax, %ebx /* We store the status */
/* Set MtrrFixDramModEn for clear fixed mtrr */ /* Set MtrrFixDramModEn for clear fixed mtrr */
enable_fixed_mtrr_dram_modify: enable_fixed_mtrr_dram_modify:
movl $SYSCFG_MSR, %ecx movl $SYSCFG_MSR, %ecx
rdmsr rdmsr
andl $(~(SYSCFG_MSR_MtrrFixDramEn|SYSCFG_MSR_MtrrVarDramEn)), %eax andl $(~(SYSCFG_MSR_MtrrFixDramEn | SYSCFG_MSR_MtrrVarDramEn)), %eax
orl $SYSCFG_MSR_MtrrFixDramModEn, %eax orl $SYSCFG_MSR_MtrrFixDramModEn, %eax
wrmsr wrmsr
/*Clear all MTRRs */ /* Clear all MTRRs */
xorl %edx, %edx
movl $fixed_mtrr_msr, %esi
xorl %edx, %edx
movl $fixed_mtrr_msr, %esi
clear_fixed_var_mtrr: clear_fixed_var_mtrr:
lodsl (%esi), %eax lodsl (%esi), %eax
testl %eax, %eax testl %eax, %eax
jz clear_fixed_var_mtrr_out jz clear_fixed_var_mtrr_out
movl %eax, %ecx movl %eax, %ecx
xorl %eax, %eax xorl %eax, %eax
wrmsr wrmsr
jmp clear_fixed_var_mtrr jmp clear_fixed_var_mtrr
clear_fixed_var_mtrr_out: clear_fixed_var_mtrr_out:
#if CacheSize == 0x10000 #if CacheSize == 0x10000
/* enable caching for 64K using fixed mtrr */ /* enable caching for 64K using fixed mtrr */
movl $0x268, %ecx /* fix4k_c0000*/ movl $0x268, %ecx /* fix4k_c0000 */
movl $0x06060606, %eax /* WB IO type */ movl $0x06060606, %eax /* WB IO type */
movl %eax, %edx movl %eax, %edx
wrmsr wrmsr
movl $0x269, %ecx movl $0x269, %ecx
wrmsr wrmsr
#endif #endif
#if CacheSize == 0xc000 #if CacheSize == 0xc000
/* enable caching for 16K using fixed mtrr */ /* enable caching for 16K using fixed mtrr */
movl $0x268, %ecx /* fix4k_c4000*/ movl $0x268, %ecx /* fix4k_c4000 */
movl $0x06060606, %edx /* WB IO type */ movl $0x06060606, %edx /* WB IO type */
xorl %eax, %eax xorl %eax, %eax
wrmsr wrmsr
/* enable caching for 32K using fixed mtrr */ /* enable caching for 32K using fixed mtrr */
movl $0x269, %ecx /* fix4k_c8000*/ movl $0x269, %ecx /* fix4k_c8000 */
movl $0x06060606, %eax /* WB IO type */ movl $0x06060606, %eax /* WB IO type */
movl %eax, %edx movl %eax, %edx
wrmsr wrmsr
#endif #endif
#if CacheSize == 0x8000 #if CacheSize == 0x8000
/* enable caching for 32K using fixed mtrr */ /* enable caching for 32K using fixed mtrr */
movl $0x269, %ecx /* fix4k_c8000*/ movl $0x269, %ecx /* fix4k_c8000 */
movl $0x06060606, %eax /* WB IO type */ movl $0x06060606, %eax /* WB IO type */
movl %eax, %edx movl %eax, %edx
wrmsr wrmsr
#endif #endif
#if CacheSize < 0x8000 #if CacheSize < 0x8000
/* enable caching for 16K/8K/4K using fixed mtrr */ /* enable caching for 16K/8K/4K using fixed mtrr */
movl $0x269, %ecx /* fix4k_cc000*/ movl $0x269, %ecx /* fix4k_cc000*/
#if CacheSize == 0x4000 #if CacheSize == 0x4000
movl $0x06060606, %edx /* WB IO type */ movl $0x06060606, %edx /* WB IO type */
#endif #endif
#if CacheSize == 0x2000 #if CacheSize == 0x2000
movl $0x06060000, %edx /* WB IO type */ movl $0x06060000, %edx /* WB IO type */
#endif #endif
#if CacheSize == 0x1000 #if CacheSize == 0x1000
movl $0x06000000, %edx /* WB IO type */ movl $0x06000000, %edx /* WB IO type */
#endif #endif
xorl %eax, %eax xorl %eax, %eax
wrmsr wrmsr
#endif #endif
/* enable memory access for first MBs using top_mem */ /* enable memory access for first MBs using top_mem */
movl $TOP_MEM, %ecx movl $TOP_MEM, %ecx
xorl %edx, %edx xorl %edx, %edx
movl $(((CONFIG_LB_MEM_TOPK << 10) + TOP_MEM_MASK) & ~TOP_MEM_MASK) , %eax movl $(((CONFIG_LB_MEM_TOPK << 10) + TOP_MEM_MASK) & ~TOP_MEM_MASK) , %eax
wrmsr wrmsr
#endif /* USE_FAILOVER_IMAGE == 1*/ #endif /* USE_FAILOVER_IMAGE == 1*/
#if ((HAVE_FAILOVER_BOOT==1) && (USE_FAILOVER_IMAGE == 0)) || ((HAVE_FAILOVER_BOOT==0) && (USE_FALLBACK_IMAGE==0)) #if ((HAVE_FAILOVER_BOOT == 1) && (USE_FAILOVER_IMAGE == 0)) || ((HAVE_FAILOVER_BOOT == 0) && (USE_FALLBACK_IMAGE == 0))
/* disable cache */ /* disable cache */
movl %cr0, %eax movl %cr0, %eax
orl $(0x1<<30),%eax orl $(1 << 30),%eax
movl %eax, %cr0 movl %eax, %cr0
#endif
#if defined(XIP_ROM_SIZE) && defined(XIP_ROM_BASE)
/* enable write base caching so we can do execute in place
* on the flash rom.
*/
movl $0x202, %ecx
xorl %edx, %edx
movl $(XIP_ROM_BASE | MTRR_TYPE_WRBACK), %eax
wrmsr
movl $0x203, %ecx
movl $((1<<(CPU_ADDR_BITS-32))-1), %edx /* AMD 40 bit */
movl $(~(XIP_ROM_SIZE - 1) | 0x800), %eax
wrmsr
#endif /* XIP_ROM_SIZE && XIP_ROM_BASE */
#if ((HAVE_FAILOVER_BOOT==1) && (USE_FAILOVER_IMAGE==1)) || ((HAVE_FAILOVER_BOOT==0) && (USE_FALLBACK_IMAGE==1))
/* Set the default memory type and enable fixed and variable MTRRs */
movl $MTRRdefType_MSR, %ecx
xorl %edx, %edx
/* Enable Variable and Fixed MTRRs */
movl $0x00000c00, %eax
wrmsr
/* Enable the MTRRs and IORRs in SYSCFG */
movl $SYSCFG_MSR, %ecx
rdmsr
orl $(SYSCFG_MSR_MtrrVarDramEn | SYSCFG_MSR_MtrrFixDramEn), %eax
wrmsr
#endif #endif
/* enable cache */ #if defined(XIP_ROM_SIZE) && defined(XIP_ROM_BASE)
movl %cr0, %eax /* enable write base caching so we can do execute in place
andl $0x9fffffff,%eax * on the flash rom.
movl %eax, %cr0 */
movl $0x202, %ecx
xorl %edx, %edx
movl $(XIP_ROM_BASE | MTRR_TYPE_WRBACK), %eax
wrmsr
#if ((HAVE_FAILOVER_BOOT==1) && (USE_FAILOVER_IMAGE==1)) || ((HAVE_FAILOVER_BOOT==0) && (USE_FALLBACK_IMAGE==1)) movl $0x203, %ecx
movl $((1 << (CPU_ADDR_BITS - 32)) - 1), %edx /* AMD 40 bit for K8, 48 bit for GH */
movl $(~(XIP_ROM_SIZE - 1) | 0x800), %eax
wrmsr
#endif /* XIP_ROM_SIZE && XIP_ROM_BASE */
/* Read the range with lodsl*/ #if ((HAVE_FAILOVER_BOOT == 1) && (USE_FAILOVER_IMAGE == 1)) || ((HAVE_FAILOVER_BOOT == 0) && (USE_FALLBACK_IMAGE == 1))
/* Set the default memory type and enable fixed and variable MTRRs */
movl $MTRRdefType_MSR, %ecx
xorl %edx, %edx
/* Enable Variable and Fixed MTRRs */
movl $0x00000c00, %eax
wrmsr
/* Enable the MTRRs and IORRs in SYSCFG */
movl $SYSCFG_MSR, %ecx
rdmsr
orl $(SYSCFG_MSR_MtrrVarDramEn | SYSCFG_MSR_MtrrFixDramEn), %eax
wrmsr
#endif
/* enable cache */
movl %cr0, %eax
andl $0x9fffffff, %eax
movl %eax, %cr0
#if ((HAVE_FAILOVER_BOOT == 1) && (USE_FAILOVER_IMAGE == 1)) || ((HAVE_FAILOVER_BOOT == 0) && (USE_FALLBACK_IMAGE == 1))
/* Read the range with lodsl*/
cld cld
movl $CacheBase, %esi movl $CacheBase, %esi
movl $(CacheSize>>2), %ecx movl $(CacheSize >> 2), %ecx
rep rep lodsl
lodsl /* Clear the range */
/* Clear the range */ movl $CacheBase, %edi
movl $CacheBase, %edi movl $(CacheSize >> 2), %ecx
movl $(CacheSize>>2), %ecx xorl %eax, %eax
xorl %eax, %eax rep stosl
rep
stosl
#endif /*USE_FAILOVER_IMAGE == 1*/ #endif /*USE_FAILOVER_IMAGE == 1*/
/* set up the stack pointer */ /* set up the stack pointer */
movl $(CacheBase+CacheSize - GlobalVarSize), %eax movl $(CacheBase + CacheSize - GlobalVarSize), %eax
movl %eax, %esp movl %eax, %esp
/* Restore the BIST result */ /* Restore the BIST result */
movl %ebp, %eax movl %ebp, %eax
/* We need to set ebp ? No need */ /* We need to set ebp ? No need */
movl %esp, %ebp movl %esp, %ebp
pushl %ebx /* init detected */ pushl %ebx /* init detected */
pushl %eax /* bist */ pushl %eax /* bist */
call cache_as_ram_main call cache_as_ram_main
/* We will not go back */ /* We will not go back */
fixed_mtrr_msr: fixed_mtrr_msr:
.long 0x250, 0x258, 0x259 .long 0x250, 0x258, 0x259
.long 0x268, 0x269, 0x26A .long 0x268, 0x269, 0x26A
.long 0x26B, 0x26C, 0x26D .long 0x26B, 0x26C, 0x26D
.long 0x26E, 0x26F .long 0x26E, 0x26F
var_mtrr_msr: var_mtrr_msr:
.long 0x200, 0x201, 0x202, 0x203 .long 0x200, 0x201, 0x202, 0x203
.long 0x204, 0x205, 0x206, 0x207 .long 0x204, 0x205, 0x206, 0x207
.long 0x208, 0x209, 0x20A, 0x20B .long 0x208, 0x209, 0x20A, 0x20B
.long 0x20C, 0x20D, 0x20E, 0x20F .long 0x20C, 0x20D, 0x20E, 0x20F
var_iorr_msr: var_iorr_msr:
.long 0xC0010016, 0xC0010017, 0xC0010018, 0xC0010019 .long 0xC0010016, 0xC0010017, 0xC0010018, 0xC0010019
mem_top: mem_top:
.long 0xC001001A, 0xC001001D .long 0xC001001A, 0xC001001D
.long 0x000 /* NULL, end of table */ .long 0x000 /* NULL, end of table */
cache_as_ram_setup_out: cache_as_ram_setup_out:

View File

@ -1,43 +1,22 @@
/*============================================================================
Copyright 2005 ADVANCED MICRO DEVICES, INC. All Rights Reserved.
This software and any related documentation (the "Materials") are the
confidential proprietary information of AMD. Unless otherwise provided in a
software agreement specifically licensing the Materials, the Materials are
provided in confidence and may not be distributed, modified, or reproduced in
whole or in part by any means.
LIMITATION OF LIABILITY: THE MATERIALS ARE PROVIDED "AS IS" WITHOUT ANY
EXPRESS OR IMPLIED WARRANTY OF ANY KIND, INCLUDING BUT NOT LIMITED TO
WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, TITLE, FITNESS FOR ANY
PARTICULAR PURPOSE, OR WARRANTIES ARISING FROM CONDUCT, COURSE OF DEALING, OR
USAGE OF TRADE. IN NO EVENT SHALL AMD OR ITS LICENSORS BE LIABLE FOR ANY
DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF PROFITS,
BUSINESS INTERRUPTION, OR LOSS OF INFORMATION) ARISING OUT OF THE USE OF OR
INABILITY TO USE THE MATERIALS, EVEN IF AMD HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES. BECAUSE SOME JURISDICTIONS PROHIBIT THE EXCLUSION
OR LIMITATION OF LIABILITY FOR CONSEQUENTIAL OR INCIDENTAL DAMAGES, THE ABOVE
LIMITATION MAY NOT APPLY TO YOU.
AMD does not assume any responsibility for any errors which may appear in the
Materials nor any responsibility to support or update the Materials. AMD
retains the right to modify the Materials at any time, without notice, and is
not obligated to provide such modified Materials to you.
NO SUPPORT OBLIGATION: AMD is not obligated to furnish, support, or make any
further information, software, technical information, know-how, or show-how
available to you.
U.S. GOVERNMENT RESTRICTED RIGHTS: The Materials are provided with "RESTRICTED
RIGHTS." Use, duplication, or disclosure by the Government is subject to the
restrictions as set forth in FAR 52.227-14 and DFAR 252.227-7013, et seq., or
its successor. Use of the Materials by the Government constitutes
acknowledgement of AMD's proprietary rights in them.
============================================================================*/
//@DOC
// microcode.c
/* /*
$1.0$ * This file is part of the LinuxBIOS project.
*/ *
// Description: microcode patch support for k8 * Copyright (C) 2007 Advanced Micro Devices, Inc.
// by yhlu *
// * This program is free software; you can redistribute it and/or modify
//============================================================================ * it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h> #include <stdint.h>
#include <console/console.h> #include <console/console.h>
#include <cpu/cpu.h> #include <cpu/cpu.h>
@ -46,64 +25,64 @@ $1.0$
#include <cpu/x86/cache.h> #include <cpu/x86/cache.h>
struct microcode { struct microcode {
uint32_t date_code; u32 date_code;
uint32_t patch_id; u32 patch_id;
uint16_t m_patch_data_id; u16 m_patch_data_id;
uint8_t m_patch_data_len; u8 m_patch_data_len;
uint8_t init_flag; u8 init_flag;
uint32_t m_patch_data_cksum;
uint32_t nb_dev_id;
uint32_t ht_io_hub_dev_id;
uint16_t processor_rev_id; u32 m_patch_data_cksum;
uint8_t ht_io_hub_rev_id;
uint8_t nb_rev_id;
uint8_t bios_api_rev; u32 nb_dev_id;
uint8_t resv1[3]; u32 ht_io_hub_dev_id;
uint32_t match_reg[8]; u16 processor_rev_id;
u8 ht_io_hub_rev_id;
u8 nb_rev_id;
uint8_t m_patch_data[896]; u8 bios_api_rev;
uint8_t resv2[896]; u8 resv1[3];
uint8_t x86_code_present; u32 match_reg[8];
uint8_t x86_code_entry[191];
u8 m_patch_data[896];
u8 resv2[896];
u8 x86_code_present;
u8 x86_code_entry[191];
}; };
static int need_apply_patch(struct microcode *m, unsigned equivalent_processor_rev_id) static int need_apply_patch(struct microcode *m, u32 equivalent_processor_rev_id)
{ {
if (m->processor_rev_id != equivalent_processor_rev_id) return 0; if (m->processor_rev_id != equivalent_processor_rev_id) return 0;
if (m->nb_dev_id) { if (m->nb_dev_id) {
//look at the device id, if not found return; //look at the device id, if not found return;
//if(m->nb_rev_id != installed_nb_rev_id) return 0; //if(m->nb_rev_id != installed_nb_rev_id) return 0;
} }
if (m->ht_io_hub_dev_id) { if (m->ht_io_hub_dev_id) {
//look at the device id, if not found return; //look at the device id, if not found return;
//if(m->ht_io_hub_rev_id != installed_ht_io_bub_rev_id) return 0; //if(m->ht_io_hub_rev_id != installed_ht_io_bub_rev_id) return 0;
} }
if (m->x86_code_present) { if (m->x86_code_present) {
//if(!x86_code_execute()) return 0; //if(!x86_code_execute()) return 0;
} }
return 1; return 1;
} }
void amd_update_microcode(void *microcode_updates, unsigned equivalent_processor_rev_id) void amd_update_microcode(void *microcode_updates, u32 equivalent_processor_rev_id)
{ {
unsigned int patch_id, new_patch_id; u32 patch_id, new_patch_id;
struct microcode *m; struct microcode *m;
char *c; char *c;
msr_t msr; msr_t msr;
msr = rdmsr(0x8b); msr = rdmsr(0x8b);
patch_id = msr.lo; patch_id = msr.lo;
@ -117,15 +96,15 @@ void amd_update_microcode(void *microcode_updates, unsigned equivalent_processor
//apply patch //apply patch
msr.hi = 0; msr.hi = 0;
msr.lo = (uint32_t)m; msr.lo = (u32)m;
wrmsr(0xc0010020, msr); wrmsr(0xc0010020, msr);
printk_debug("microcode: patch id that want to apply= 0x%08x\n", m->patch_id); printk_debug("microcode: patch id that want to apply= 0x%08x\n", m->patch_id);
//read the patch_id again //read the patch_id again
msr = rdmsr(0x8b); msr = rdmsr(0x8b);
new_patch_id = msr.lo; new_patch_id = msr.lo;
printk_debug("microcode: updated to patch id = 0x%08x %s\r\n", new_patch_id , (new_patch_id == m->patch_id)?" success":" fail" ); printk_debug("microcode: updated to patch id = 0x%08x %s\r\n", new_patch_id , (new_patch_id == m->patch_id)?" success":" fail" );
break; break;