Whitespace and other code cleanup in peperation for AMD Barcelona support.

Signed-off-by: Marc Jones <marc.jones@amd.com>
Reviewed-by: Jordan Crouse <jordan.crouse@amd.com>
Acked-by: Myles Watson <myles@pel.cs.byu.edu>



git-svn-id: svn://svn.coreboot.org/coreboot/trunk@3013 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
This commit is contained in:
Marc Jones 2007-12-19 00:47:09 +00:00
parent 244dd82fd6
commit 2006b38fed
2 changed files with 205 additions and 209 deletions

View File

@ -1,8 +1,25 @@
/* by yhlu 6.2005 */
/* yhlu 2005.12 make it support HDT Memory Debuggers with Disassmbly, please select the PCI Bus mem for Phys Type*/
/* yhlu 2006.3 copy data from cache to ram and reserve 0x1000 for global variables */
/*
* This file is part of the LinuxBIOS project.
*
* Copyright (C) 2005-2007 Advanced Micro Devices, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#define CacheSize DCACHE_RAM_SIZE
#define CacheBase (0xd0000 - CacheSize)
/* leave some space for global variable to pass to RAM stage */
#define GlobalVarSize DCACHE_RAM_GLOBAL_VAR_SIZE
@ -12,30 +29,31 @@
/* Save the BIST result */
movl %eax, %ebp
/*for normal part %ebx already contain cpu_init_detected from fallback call */
/* for normal part %ebx already contain cpu_init_detected from fallback call */
cache_as_ram_setup:
/* hope we can skip the double set for normal part */
#if ((HAVE_FAILOVER_BOOT==1) && (USE_FAILOVER_IMAGE==1)) || ((HAVE_FAILOVER_BOOT==0) && (USE_FALLBACK_IMAGE==1))
#if ((HAVE_FAILOVER_BOOT == 1) && (USE_FAILOVER_IMAGE == 1)) || ((HAVE_FAILOVER_BOOT == 0) && (USE_FALLBACK_IMAGE == 1))
/* check if cpu_init_detected */
movl $MTRRdefType_MSR, %ecx
rdmsr
andl $0x00000800, %eax
andl $(1 << 11), %eax
movl %eax, %ebx /* We store the status */
/* Set MtrrFixDramModEn for clear fixed mtrr */
enable_fixed_mtrr_dram_modify:
movl $SYSCFG_MSR, %ecx
rdmsr
andl $(~(SYSCFG_MSR_MtrrFixDramEn|SYSCFG_MSR_MtrrVarDramEn)), %eax
andl $(~(SYSCFG_MSR_MtrrFixDramEn | SYSCFG_MSR_MtrrVarDramEn)), %eax
orl $SYSCFG_MSR_MtrrFixDramModEn, %eax
wrmsr
/*Clear all MTRRs */
/* Clear all MTRRs */
xorl %edx, %edx
movl $fixed_mtrr_msr, %esi
clear_fixed_var_mtrr:
lodsl (%esi), %eax
testl %eax, %eax
@ -50,7 +68,7 @@ clear_fixed_var_mtrr_out:
#if CacheSize == 0x10000
/* enable caching for 64K using fixed mtrr */
movl $0x268, %ecx /* fix4k_c0000*/
movl $0x268, %ecx /* fix4k_c0000 */
movl $0x06060606, %eax /* WB IO type */
movl %eax, %edx
wrmsr
@ -60,22 +78,21 @@ clear_fixed_var_mtrr_out:
#if CacheSize == 0xc000
/* enable caching for 16K using fixed mtrr */
movl $0x268, %ecx /* fix4k_c4000*/
movl $0x268, %ecx /* fix4k_c4000 */
movl $0x06060606, %edx /* WB IO type */
xorl %eax, %eax
wrmsr
/* enable caching for 32K using fixed mtrr */
movl $0x269, %ecx /* fix4k_c8000*/
movl $0x269, %ecx /* fix4k_c8000 */
movl $0x06060606, %eax /* WB IO type */
movl %eax, %edx
wrmsr
#endif
#if CacheSize == 0x8000
/* enable caching for 32K using fixed mtrr */
movl $0x269, %ecx /* fix4k_c8000*/
movl $0x269, %ecx /* fix4k_c8000 */
movl $0x06060606, %eax /* WB IO type */
movl %eax, %edx
wrmsr
@ -84,15 +101,15 @@ clear_fixed_var_mtrr_out:
#if CacheSize < 0x8000
/* enable caching for 16K/8K/4K using fixed mtrr */
movl $0x269, %ecx /* fix4k_cc000*/
#if CacheSize == 0x4000
#if CacheSize == 0x4000
movl $0x06060606, %edx /* WB IO type */
#endif
#if CacheSize == 0x2000
#endif
#if CacheSize == 0x2000
movl $0x06060000, %edx /* WB IO type */
#endif
#if CacheSize == 0x1000
#endif
#if CacheSize == 0x1000
movl $0x06000000, %edx /* WB IO type */
#endif
#endif
xorl %eax, %eax
wrmsr
#endif
@ -105,10 +122,10 @@ clear_fixed_var_mtrr_out:
#endif /* USE_FAILOVER_IMAGE == 1*/
#if ((HAVE_FAILOVER_BOOT==1) && (USE_FAILOVER_IMAGE == 0)) || ((HAVE_FAILOVER_BOOT==0) && (USE_FALLBACK_IMAGE==0))
#if ((HAVE_FAILOVER_BOOT == 1) && (USE_FAILOVER_IMAGE == 0)) || ((HAVE_FAILOVER_BOOT == 0) && (USE_FALLBACK_IMAGE == 0))
/* disable cache */
movl %cr0, %eax
orl $(0x1<<30),%eax
orl $(1 << 30),%eax
movl %eax, %cr0
#endif
@ -123,12 +140,12 @@ clear_fixed_var_mtrr_out:
wrmsr
movl $0x203, %ecx
movl $((1<<(CPU_ADDR_BITS-32))-1), %edx /* AMD 40 bit */
movl $((1 << (CPU_ADDR_BITS - 32)) - 1), %edx /* AMD 40 bit for K8, 48 bit for GH */
movl $(~(XIP_ROM_SIZE - 1) | 0x800), %eax
wrmsr
#endif /* XIP_ROM_SIZE && XIP_ROM_BASE */
#if ((HAVE_FAILOVER_BOOT==1) && (USE_FAILOVER_IMAGE==1)) || ((HAVE_FAILOVER_BOOT==0) && (USE_FALLBACK_IMAGE==1))
#if ((HAVE_FAILOVER_BOOT == 1) && (USE_FAILOVER_IMAGE == 1)) || ((HAVE_FAILOVER_BOOT == 0) && (USE_FALLBACK_IMAGE == 1))
/* Set the default memory type and enable fixed and variable MTRRs */
movl $MTRRdefType_MSR, %ecx
xorl %edx, %edx
@ -145,32 +162,31 @@ clear_fixed_var_mtrr_out:
/* enable cache */
movl %cr0, %eax
andl $0x9fffffff,%eax
andl $0x9fffffff, %eax
movl %eax, %cr0
#if ((HAVE_FAILOVER_BOOT==1) && (USE_FAILOVER_IMAGE==1)) || ((HAVE_FAILOVER_BOOT==0) && (USE_FALLBACK_IMAGE==1))
#if ((HAVE_FAILOVER_BOOT == 1) && (USE_FAILOVER_IMAGE == 1)) || ((HAVE_FAILOVER_BOOT == 0) && (USE_FALLBACK_IMAGE == 1))
/* Read the range with lodsl*/
cld
movl $CacheBase, %esi
movl $(CacheSize>>2), %ecx
rep
lodsl
movl $(CacheSize >> 2), %ecx
rep lodsl
/* Clear the range */
movl $CacheBase, %edi
movl $(CacheSize>>2), %ecx
movl $(CacheSize >> 2), %ecx
xorl %eax, %eax
rep
stosl
rep stosl
#endif /*USE_FAILOVER_IMAGE == 1*/
/* set up the stack pointer */
movl $(CacheBase+CacheSize - GlobalVarSize), %eax
movl $(CacheBase + CacheSize - GlobalVarSize), %eax
movl %eax, %esp
/* Restore the BIST result */
movl %ebp, %eax
/* We need to set ebp ? No need */
movl %esp, %ebp
pushl %ebx /* init detected */
@ -193,4 +209,5 @@ var_iorr_msr:
mem_top:
.long 0xC001001A, 0xC001001D
.long 0x000 /* NULL, end of table */
cache_as_ram_setup_out:

View File

@ -1,43 +1,22 @@
/*============================================================================
Copyright 2005 ADVANCED MICRO DEVICES, INC. All Rights Reserved.
This software and any related documentation (the "Materials") are the
confidential proprietary information of AMD. Unless otherwise provided in a
software agreement specifically licensing the Materials, the Materials are
provided in confidence and may not be distributed, modified, or reproduced in
whole or in part by any means.
LIMITATION OF LIABILITY: THE MATERIALS ARE PROVIDED "AS IS" WITHOUT ANY
EXPRESS OR IMPLIED WARRANTY OF ANY KIND, INCLUDING BUT NOT LIMITED TO
WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, TITLE, FITNESS FOR ANY
PARTICULAR PURPOSE, OR WARRANTIES ARISING FROM CONDUCT, COURSE OF DEALING, OR
USAGE OF TRADE. IN NO EVENT SHALL AMD OR ITS LICENSORS BE LIABLE FOR ANY
DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF PROFITS,
BUSINESS INTERRUPTION, OR LOSS OF INFORMATION) ARISING OUT OF THE USE OF OR
INABILITY TO USE THE MATERIALS, EVEN IF AMD HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES. BECAUSE SOME JURISDICTIONS PROHIBIT THE EXCLUSION
OR LIMITATION OF LIABILITY FOR CONSEQUENTIAL OR INCIDENTAL DAMAGES, THE ABOVE
LIMITATION MAY NOT APPLY TO YOU.
AMD does not assume any responsibility for any errors which may appear in the
Materials nor any responsibility to support or update the Materials. AMD
retains the right to modify the Materials at any time, without notice, and is
not obligated to provide such modified Materials to you.
NO SUPPORT OBLIGATION: AMD is not obligated to furnish, support, or make any
further information, software, technical information, know-how, or show-how
available to you.
U.S. GOVERNMENT RESTRICTED RIGHTS: The Materials are provided with "RESTRICTED
RIGHTS." Use, duplication, or disclosure by the Government is subject to the
restrictions as set forth in FAR 52.227-14 and DFAR 252.227-7013, et seq., or
its successor. Use of the Materials by the Government constitutes
acknowledgement of AMD's proprietary rights in them.
============================================================================*/
//@DOC
// microcode.c
/*
$1.0$
*/
// Description: microcode patch support for k8
// by yhlu
//
//============================================================================
* This file is part of the LinuxBIOS project.
*
* Copyright (C) 2007 Advanced Micro Devices, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include <console/console.h>
#include <cpu/cpu.h>
@ -46,35 +25,35 @@ $1.0$
#include <cpu/x86/cache.h>
struct microcode {
uint32_t date_code;
uint32_t patch_id;
u32 date_code;
u32 patch_id;
uint16_t m_patch_data_id;
uint8_t m_patch_data_len;
uint8_t init_flag;
u16 m_patch_data_id;
u8 m_patch_data_len;
u8 init_flag;
uint32_t m_patch_data_cksum;
u32 m_patch_data_cksum;
uint32_t nb_dev_id;
uint32_t ht_io_hub_dev_id;
u32 nb_dev_id;
u32 ht_io_hub_dev_id;
uint16_t processor_rev_id;
uint8_t ht_io_hub_rev_id;
uint8_t nb_rev_id;
u16 processor_rev_id;
u8 ht_io_hub_rev_id;
u8 nb_rev_id;
uint8_t bios_api_rev;
uint8_t resv1[3];
u8 bios_api_rev;
u8 resv1[3];
uint32_t match_reg[8];
u32 match_reg[8];
uint8_t m_patch_data[896];
uint8_t resv2[896];
u8 m_patch_data[896];
u8 resv2[896];
uint8_t x86_code_present;
uint8_t x86_code_entry[191];
u8 x86_code_present;
u8 x86_code_entry[191];
};
static int need_apply_patch(struct microcode *m, unsigned equivalent_processor_rev_id)
static int need_apply_patch(struct microcode *m, u32 equivalent_processor_rev_id)
{
if (m->processor_rev_id != equivalent_processor_rev_id) return 0;
@ -97,9 +76,9 @@ static int need_apply_patch(struct microcode *m, unsigned equivalent_processor_r
}
void amd_update_microcode(void *microcode_updates, unsigned equivalent_processor_rev_id)
void amd_update_microcode(void *microcode_updates, u32 equivalent_processor_rev_id)
{
unsigned int patch_id, new_patch_id;
u32 patch_id, new_patch_id;
struct microcode *m;
char *c;
msr_t msr;
@ -117,7 +96,7 @@ void amd_update_microcode(void *microcode_updates, unsigned equivalent_processor
//apply patch
msr.hi = 0;
msr.lo = (uint32_t)m;
msr.lo = (u32)m;
wrmsr(0xc0010020, msr);