Applying YhLu's patch from issue 37.
a. apic id liftting to way that kernel like and let bsp to stay with 0 b. hw memhole: solve if hole_startk == some node basek This, together with the previous one will break most of the tree, but Yinghai Lu is really good at fixing things, so... git-svn-id: svn://svn.coreboot.org/coreboot/trunk@2116 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
This commit is contained in:
parent
806e146e75
commit
f5183cfa19
|
@ -188,37 +188,37 @@ define FALLBACK_SIZE
|
|||
default 65536
|
||||
format "0x%x"
|
||||
export used
|
||||
comment "ROM_SECTION_SIZE to use for the fallback build."
|
||||
comment "Default fallback image size"
|
||||
end
|
||||
define ROM_SIZE
|
||||
default none
|
||||
format "0x%x"
|
||||
export used
|
||||
comment "Total number of bytes allocated for normal and fallback LinuxBIOS images and payloads. Note that the fallback image goes at the end of the ROM, and the normal image at the beginning."
|
||||
comment "Size of your ROM"
|
||||
end
|
||||
define ROM_IMAGE_SIZE
|
||||
default 65535
|
||||
format "0x%x"
|
||||
export always
|
||||
comment "Maximum number of bytes allowed for a LinuxBIOS image. Does not include the payload."
|
||||
comment "Default image size"
|
||||
end
|
||||
define ROM_SECTION_SIZE
|
||||
default {FALLBACK_SIZE}
|
||||
format "0x%x"
|
||||
export used
|
||||
comment "Default rom section size. Normally, this is calculated in mainboard Config.lb and varies between the normal and fallback builds."
|
||||
comment "Default rom section size"
|
||||
end
|
||||
define ROM_SECTION_OFFSET
|
||||
default {ROM_SIZE - FALLBACK_SIZE}
|
||||
format "0x%x"
|
||||
export used
|
||||
comment "Number of bytes from the beginning of the ROM to the start of the section containing this build (normal or fallback). Normally, this is calculated in mainboard Config.lb."
|
||||
comment "Default rom section offset"
|
||||
end
|
||||
define PAYLOAD_SIZE
|
||||
default {ROM_SECTION_SIZE - ROM_IMAGE_SIZE}
|
||||
format "0x%x"
|
||||
export always
|
||||
comment "Maximum number of bytes allowed for a payload. Normally, this is calculated as above."
|
||||
comment "Default payload size"
|
||||
end
|
||||
define _ROMBASE
|
||||
default {PAYLOAD_SIZE}
|
||||
|
@ -373,6 +373,11 @@ define CONFIG_CONSOLE_VGA
|
|||
export always
|
||||
comment "Log messages to VGA"
|
||||
end
|
||||
define CONFIG_CONSOLE_VGA_MULTI
|
||||
default 0
|
||||
export always
|
||||
comment "Multi VGA console"
|
||||
end
|
||||
define CONFIG_CONSOLE_BTEXT
|
||||
default 0
|
||||
export always
|
||||
|
@ -479,7 +484,6 @@ define CONFIG_SYS_CLK_FREQ
|
|||
export used
|
||||
comment "System clock frequency in MHz"
|
||||
end
|
||||
|
||||
###############################################
|
||||
# SMP options
|
||||
###############################################
|
||||
|
@ -514,7 +518,21 @@ define SERIAL_CPU_INIT
|
|||
export always
|
||||
comment "Serialize CPU init"
|
||||
end
|
||||
|
||||
define APIC_ID_OFFSET
|
||||
default 0
|
||||
export always
|
||||
comment "We need to share this value between cache_as_ram_auto.c and northbridge.c"
|
||||
end
|
||||
define ENABLE_APIC_EXT_ID
|
||||
default 0
|
||||
export always
|
||||
comment "Enable APIC ext id mode 8 bit"
|
||||
end
|
||||
define LIFT_BSP_APIC_ID
|
||||
default 0
|
||||
export always
|
||||
comment "decide if we lift bsp apic id while ap apic id"
|
||||
end
|
||||
###############################################
|
||||
# Boot options
|
||||
###############################################
|
||||
|
@ -533,7 +551,7 @@ define CONFIG_ROM_STREAM_START
|
|||
default {0xffffffff - ROM_SIZE + ROM_SECTION_OFFSET + 1}
|
||||
format "0x%x"
|
||||
export always
|
||||
comment "Memory address of this (normal or fallback) build's payload in ROM. Normally, this is calculated as above."
|
||||
comment "ROM stream start location"
|
||||
end
|
||||
define CONFIG_FS_STREAM
|
||||
default 0
|
||||
|
@ -797,10 +815,22 @@ define CK804_DEVN_BASE
|
|||
comment "CK804 device count from 0 or 1"
|
||||
end
|
||||
|
||||
define K8_E0_MEM_HOLE_SIZEK
|
||||
define K8_HW_MEM_HOLE_SIZEK
|
||||
default 0
|
||||
export always
|
||||
comment "Opteron E0 later memory hole size in K"
|
||||
comment "Opteron E0 later memory hole size in K, 0 mean disable"
|
||||
end
|
||||
|
||||
define K8_HW_MEM_HOLE_SIZE_AUTO_INC
|
||||
default 0
|
||||
export always
|
||||
comment "Opteron E0 later memory hole size auto increase to avoid hole startk equal to basek"
|
||||
end
|
||||
|
||||
define K8_HT_FREQ_1G_SUPPORT
|
||||
default 0
|
||||
export always
|
||||
comment "Optern E0 later could support 1G HT, but still depends MB design"
|
||||
end
|
||||
|
||||
define CONFIG_PCI_ROM_RUN
|
||||
|
@ -809,6 +839,12 @@ define CONFIG_PCI_ROM_RUN
|
|||
comment "Init PCI device option rom"
|
||||
end
|
||||
|
||||
define CONFIG_PCI_64BIT_PREF_MEM
|
||||
default 0
|
||||
export always
|
||||
comment "allow PCI device get 4G above Region as pref mem"
|
||||
end
|
||||
|
||||
|
||||
###############################################
|
||||
# Board specific options
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
/* 2004.12 yhlu add dual core support */
|
||||
/* 24 June 2005 Cleaned up dual core support Eric Biederman */
|
||||
|
||||
#include <console/console.h>
|
||||
#include <cpu/cpu.h>
|
||||
|
@ -10,100 +9,13 @@
|
|||
#include <pc80/mc146818rtc.h>
|
||||
#include <smp/spinlock.h>
|
||||
#include <cpu/x86/mtrr.h>
|
||||
#include "../model_fxx/model_fxx_msr.h"
|
||||
#include "../../../northbridge/amd/amdk8/cpu_rev.c"
|
||||
#include <cpu/amd/model_fxx_msr.h>
|
||||
#include <cpu/amd/model_fxx_rev.h>
|
||||
|
||||
static int first_time = 1;
|
||||
static int disable_siblings = !CONFIG_LOGICAL_CPUS;
|
||||
|
||||
void amd_sibling_init(device_t cpu, struct node_core_id id)
|
||||
{
|
||||
unsigned long i;
|
||||
unsigned siblings, max_siblings;
|
||||
|
||||
/* On the bootstrap processor see if I want sibling cpus enabled */
|
||||
if (first_time) {
|
||||
first_time = 0;
|
||||
get_option(&disable_siblings, "dual_core");
|
||||
}
|
||||
|
||||
siblings = cpuid_ecx(0x80000008) & 0xff;
|
||||
printk_debug("%d Sibling Cores found\n", siblings);
|
||||
|
||||
/* For now assume all cpus have the same number of siblings */
|
||||
max_siblings = siblings + 1;
|
||||
|
||||
/* Wishlist? make dual cores look like hyperthreading */
|
||||
|
||||
/* See if I am a sibling cpu */
|
||||
if (disable_siblings && (id.coreid != 0)) {
|
||||
cpu->enabled = 0;
|
||||
}
|
||||
|
||||
if (id.coreid == 0) {
|
||||
/* On the primary cpu find the siblings */
|
||||
for (i = 1; i <= siblings; i++) {
|
||||
struct device_path cpu_path;
|
||||
device_t new;
|
||||
/* Build the cpu device path */
|
||||
cpu_path.type = DEVICE_PATH_APIC;
|
||||
cpu_path.u.apic.apic_id =
|
||||
(0x10 + i*0x10 + id.nodeid);
|
||||
|
||||
new = alloc_dev(cpu->bus, &cpu_path);
|
||||
if (!new) {
|
||||
continue;
|
||||
}
|
||||
|
||||
new->path.u.apic.node_id = cpu->path.u.apic.node_id;
|
||||
new->path.u.apic.core_id = i;
|
||||
/* Report what I have done */
|
||||
printk_debug("CPU: %s %s\n",
|
||||
dev_path(new), new->enabled?"enabled":"disabled");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct node_core_id get_node_core_id(void)
|
||||
{
|
||||
struct node_core_id id;
|
||||
unsigned siblings;
|
||||
/* Get the apicid at reset */
|
||||
id.nodeid = (cpuid_ebx(1) >> 24) & 0xff;
|
||||
id.coreid = 0;
|
||||
/* Find out how many siblings we have */
|
||||
siblings = cpuid_ecx(0x80000008) & 0xff;
|
||||
if (siblings) {
|
||||
unsigned bits;
|
||||
msr_t msr;
|
||||
bits = 0;
|
||||
while ((1 << bits) <= siblings)
|
||||
bits++;
|
||||
|
||||
msr = rdmsr(NB_CFG_MSR);
|
||||
if ((msr.hi >> (54-32)) & 1) {
|
||||
// when NB_CFG[54] is set, nodeid = ebx[27:25], coreid = ebx[24]
|
||||
id.coreid = id.nodeid & ((1 << bits) - 1);
|
||||
id.nodeid >>= bits;
|
||||
} else {
|
||||
// when NB_CFG[54] is clear, nodeid = ebx[26:24], coreid = ebx[27]
|
||||
id.coreid = id.nodeid >> 3;
|
||||
id.nodeid &= 7;
|
||||
}
|
||||
} else {
|
||||
if (!is_cpu_pre_e0()) {
|
||||
id.nodeid >>= 1;
|
||||
}
|
||||
}
|
||||
return id;
|
||||
}
|
||||
|
||||
unsigned int read_nb_cfg_54(void)
|
||||
{
|
||||
msr_t msr;
|
||||
msr = rdmsr(NB_CFG_MSR);
|
||||
return ( ( msr.hi >> (54-32)) & 1);
|
||||
}
|
||||
#include "dualcore_id.c"
|
||||
|
||||
static int get_max_siblings(int nodes)
|
||||
{
|
||||
|
@ -161,14 +73,27 @@ unsigned get_apicid_base(unsigned ioapic_num)
|
|||
siblings = get_max_siblings(nodes);
|
||||
|
||||
if(bsp_apic_id > 0) { // io apic could start from 0
|
||||
return 0;
|
||||
return 0;
|
||||
} else if(pci_read_config32(dev, 0x68) & ( (1<<17) | (1<<18)) ) { // enabled ext id but bsp = 0
|
||||
if(!disable_siblings) { return siblings + 1; }
|
||||
else { return 1; }
|
||||
return 1;
|
||||
}
|
||||
|
||||
nb_cfg_54 = read_nb_cfg_54();
|
||||
|
||||
#if 0
|
||||
//it is for all e0 single core and nc_cfg_54 low is set, but in the auto.c stage we do not set that bit for it.
|
||||
if(nb_cfg_54 && (!disable_siblings) && (siblings == 0)) {
|
||||
//we need to check if e0 single core is there
|
||||
int i;
|
||||
for(i=0; i<nodes; i++) {
|
||||
if(is_e0_later_in_bsp(i)) {
|
||||
siblings = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
//contruct apicid_base
|
||||
|
||||
if((!disable_siblings) && (siblings>0) ) {
|
||||
|
@ -193,4 +118,78 @@ unsigned get_apicid_base(unsigned ioapic_num)
|
|||
|
||||
return apicid_base;
|
||||
}
|
||||
#if 0
|
||||
void amd_sibling_init(device_t cpu)
|
||||
{
|
||||
unsigned i, siblings;
|
||||
struct cpuid_result result;
|
||||
unsigned nb_cfg_54;
|
||||
struct node_core_id id;
|
||||
|
||||
/* On the bootstrap processor see if I want sibling cpus enabled */
|
||||
if (first_time) {
|
||||
first_time = 0;
|
||||
get_option(&disable_siblings, "dual_core");
|
||||
}
|
||||
result = cpuid(0x80000008);
|
||||
/* See how many sibling cpus we have */
|
||||
/* Is dualcore supported */
|
||||
siblings = (result.ecx & 0xff);
|
||||
if ( siblings < 1) {
|
||||
return;
|
||||
}
|
||||
|
||||
#if 1
|
||||
printk_debug("CPU: %u %d siblings\n",
|
||||
cpu->path.u.apic.apic_id,
|
||||
siblings);
|
||||
#endif
|
||||
|
||||
nb_cfg_54 = read_nb_cfg_54();
|
||||
#if 1
|
||||
id = get_node_core_id(nb_cfg_54); // pre e0 nb_cfg_54 can not be set
|
||||
|
||||
/* See if I am a sibling cpu */
|
||||
//if ((cpu->path.u.apic.apic_id>>(nb_cfg_54?0:3)) & siblings ) { // siblings = 1, 3, 7, 15,....
|
||||
//if ( ( (cpu->path.u.apic.apic_id>>(nb_cfg_54?0:3)) % (siblings+1) ) != 0 ) {
|
||||
if(id.coreid != 0) {
|
||||
if (disable_siblings) {
|
||||
cpu->enabled = 0;
|
||||
}
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* I am the primary cpu start up my siblings */
|
||||
|
||||
for(i = 1; i <= siblings; i++) {
|
||||
struct device_path cpu_path;
|
||||
device_t new;
|
||||
/* Build the cpu device path */
|
||||
cpu_path.type = DEVICE_PATH_APIC;
|
||||
cpu_path.u.apic.apic_id = cpu->path.u.apic.apic_id + i * (nb_cfg_54?1:8);
|
||||
|
||||
/* See if I can find the cpu */
|
||||
new = find_dev_path(cpu->bus, &cpu_path);
|
||||
/* Allocate the new cpu device structure */
|
||||
if(!new) {
|
||||
new = alloc_dev(cpu->bus, &cpu_path);
|
||||
new->enabled = 1;
|
||||
new->initialized = 0;
|
||||
}
|
||||
|
||||
new->path.u.apic.node_id = cpu->path.u.apic.node_id;
|
||||
new->path.u.apic.core_id = i;
|
||||
|
||||
#if 1
|
||||
printk_debug("CPU: %u has sibling %u\n",
|
||||
cpu->path.u.apic.apic_id,
|
||||
new->path.u.apic.apic_id);
|
||||
#endif
|
||||
|
||||
if(new->enabled && !new->initialized)
|
||||
start_cpu(new);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/* 2004.12 yhlu add dual core support */
|
||||
|
||||
#include <arch/cpu.h>
|
||||
#include "cpu/amd/model_fxx/model_fxx_msr.h"
|
||||
#include <cpu/amd/model_fxx_msr.h>
|
||||
|
||||
static inline unsigned int read_nb_cfg_54(void)
|
||||
{
|
||||
|
|
|
@ -5,30 +5,55 @@
|
|||
* 2005.02 yhlu add e0 memory hole support
|
||||
|
||||
* Copyright 2005 AMD
|
||||
* 2005.08 yhlu add microcode support
|
||||
|
||||
* 2005.08 yhlu add microcode support
|
||||
*/
|
||||
#include <console/console.h>
|
||||
#include <cpu/x86/msr.h>
|
||||
#include <cpu/amd/mtrr.h>
|
||||
#include <device/device.h>
|
||||
#include <device/device.h>
|
||||
#include <device/pci.h>
|
||||
#include <string.h>
|
||||
#include <cpu/x86/msr.h>
|
||||
#include <cpu/x86/pae.h>
|
||||
#include <pc80/mc146818rtc.h>
|
||||
#include <cpu/x86/lapic.h>
|
||||
|
||||
#include "../../../northbridge/amd/amdk8/amdk8.h"
|
||||
#include "../../../northbridge/amd/amdk8/cpu_rev.c"
|
||||
|
||||
#include <cpu/amd/model_fxx_rev.h>
|
||||
#include <cpu/cpu.h>
|
||||
#include <cpu/amd/microcode.h>
|
||||
#include <cpu/x86/cache.h>
|
||||
#include <cpu/x86/mtrr.h>
|
||||
#include <cpu/x86/mem.h>
|
||||
|
||||
#include <cpu/amd/dualcore.h>
|
||||
|
||||
#include "model_fxx_msr.h"
|
||||
#include <cpu/amd/model_fxx_msr.h>
|
||||
|
||||
int is_e0_later_in_bsp(int nodeid)
|
||||
{
|
||||
uint32_t val;
|
||||
uint32_t val_old;
|
||||
int e0_later;
|
||||
if(nodeid==0) { // we don't need to do that for node 0 in core0/node0
|
||||
return !is_cpu_pre_e0();
|
||||
}
|
||||
// d0 will be treated as e0 with this methods, but the d0 nb_cfg_54 always 0
|
||||
device_t dev;
|
||||
dev = dev_find_slot(0, PCI_DEVFN(0x18+nodeid,2));
|
||||
if(!dev) return 0;
|
||||
val_old = pci_read_config32(dev, 0x80);
|
||||
val = val_old;
|
||||
val |= (1<<3);
|
||||
pci_write_config32(dev, 0x80, val);
|
||||
val = pci_read_config32(dev, 0x80);
|
||||
e0_later = !!(val & (1<<3));
|
||||
if(e0_later) { // pre_e0 bit 3 always be 0 and can not be changed
|
||||
pci_write_config32(dev, 0x80, val_old); // restore it
|
||||
}
|
||||
|
||||
return e0_later;
|
||||
}
|
||||
|
||||
#define MCI_STATUS 0x401
|
||||
|
||||
|
@ -53,7 +78,6 @@ static inline void wrmsr_amd(unsigned index, msr_t msr)
|
|||
}
|
||||
|
||||
|
||||
|
||||
#define MTRR_COUNT 8
|
||||
#define ZERO_CHUNK_KB 0x800UL /* 2M */
|
||||
#define TOLM_KB 0x400000UL
|
||||
|
@ -151,9 +175,11 @@ static void set_init_ecc_mtrrs(void)
|
|||
enable_cache();
|
||||
}
|
||||
|
||||
|
||||
static void init_ecc_memory(unsigned node_id)
|
||||
{
|
||||
unsigned long startk, begink, endk;
|
||||
unsigned long hole_startk = 0;
|
||||
unsigned long basek;
|
||||
struct mtrr_state mtrr_state;
|
||||
device_t f1_dev, f2_dev, f3_dev;
|
||||
|
@ -198,6 +224,18 @@ static void init_ecc_memory(unsigned node_id)
|
|||
startk = (pci_read_config32(f1_dev, 0x40 + (node_id*8)) & 0xffff0000) >> 2;
|
||||
endk = ((pci_read_config32(f1_dev, 0x44 + (node_id*8)) & 0xffff0000) >> 2) + 0x4000;
|
||||
|
||||
#if K8_HW_MEM_HOLE_SIZEK != 0
|
||||
if (!is_cpu_pre_e0())
|
||||
{
|
||||
|
||||
uint32_t val;
|
||||
val = pci_read_config32(f1_dev, 0xf0);
|
||||
if(val & 1) {
|
||||
hole_startk = ((val & (0xff<<24)) >> 10);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* Don't start too early */
|
||||
begink = startk;
|
||||
|
@ -221,8 +259,15 @@ static void init_ecc_memory(unsigned node_id)
|
|||
unsigned long size;
|
||||
void *addr;
|
||||
|
||||
#if K8_HW_MEM_HOLE_SIZEK != 0
|
||||
if ( hole_startk != 0 ) {
|
||||
if ((basek >= hole_startk) && (basek < 4*1024*1024)) continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Report every 64M */
|
||||
if ((basek % (64*1024)) == 0) {
|
||||
|
||||
/* Restore the normal state */
|
||||
map_2M_page(0);
|
||||
restore_mtrr_state(&mtrr_state);
|
||||
|
@ -234,6 +279,7 @@ static void init_ecc_memory(unsigned node_id)
|
|||
/* Return to the initialization state */
|
||||
set_init_ecc_mtrrs();
|
||||
disable_lapic();
|
||||
|
||||
}
|
||||
|
||||
limitk = (basek + ZERO_CHUNK_KB) & ~(ZERO_CHUNK_KB - 1);
|
||||
|
@ -304,7 +350,7 @@ static inline void k8_errata(void)
|
|||
* FIXME this is only needed if ECC is enabled.
|
||||
*/
|
||||
msr.hi |= 1 << (36 - 32);
|
||||
}
|
||||
}
|
||||
wrmsr(NB_CFG_MSR, msr);
|
||||
}
|
||||
|
||||
|
@ -324,7 +370,6 @@ static inline void k8_errata(void)
|
|||
|
||||
/* Erratum 91 prefetch miss is handled in the kernel */
|
||||
|
||||
|
||||
/* Erratum 106 ... */
|
||||
msr = rdmsr_amd(LS_CFG_MSR);
|
||||
msr.lo |= 1 << 25;
|
||||
|
@ -335,49 +380,39 @@ static inline void k8_errata(void)
|
|||
msr.hi |= 1 << (43 - 32);
|
||||
wrmsr_amd(BU_CFG_MSR, msr);
|
||||
|
||||
if (is_cpu_pre_e0() && !is_cpu_pre_d0()) {
|
||||
if(is_cpu_d0()) {
|
||||
/* Erratum 110 ...*/
|
||||
msr = rdmsr_amd(CPU_ID_HYPER_EXT_FEATURES);
|
||||
msr.hi |=1;
|
||||
wrmsr_amd(CPU_ID_HYPER_EXT_FEATURES, msr);
|
||||
}
|
||||
|
||||
if (!is_cpu_pre_e0()) {
|
||||
if (!is_cpu_pre_e0())
|
||||
{
|
||||
/* Erratum 110 ... */
|
||||
msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR);
|
||||
msr.hi |=1;
|
||||
wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr);
|
||||
|
||||
/* Erratum 113 ... */
|
||||
msr = rdmsr_amd(BU_CFG_MSR);
|
||||
msr.hi |= (1 << 16);
|
||||
wrmsr_amd(BU_CFG_MSR, msr);
|
||||
}
|
||||
|
||||
/* Erratum 122 */
|
||||
if (!is_cpu_pre_c0()) {
|
||||
msr = rdmsr(HWCR_MSR);
|
||||
msr.lo |= 1 << 6;
|
||||
wrmsr(HWCR_MSR, msr);
|
||||
}
|
||||
msr = rdmsr(HWCR_MSR);
|
||||
msr.lo |= 1 << 6;
|
||||
wrmsr(HWCR_MSR, msr);
|
||||
|
||||
/* Erratum 123? dual core deadlock? */
|
||||
|
||||
/* Erratum 131 */
|
||||
msr = rdmsr(NB_CFG_MSR);
|
||||
msr.lo |= 1 << 20;
|
||||
wrmsr(NB_CFG_MSR, msr);
|
||||
|
||||
}
|
||||
|
||||
|
||||
extern void model_fxx_update_microcode(unsigned cpu_deviceid);
|
||||
|
||||
void model_fxx_init(device_t cpu)
|
||||
void model_fxx_init(device_t dev)
|
||||
{
|
||||
unsigned long i;
|
||||
msr_t msr;
|
||||
struct node_core_id id;
|
||||
unsigned equivalent_processor_rev_id;
|
||||
#if CONFIG_LOGICAL_CPUS == 1
|
||||
unsigned siblings;
|
||||
#endif
|
||||
|
||||
/* Turn on caching if we haven't already */
|
||||
x86_enable_cache();
|
||||
|
@ -385,8 +420,8 @@ void model_fxx_init(device_t cpu)
|
|||
x86_mtrr_check();
|
||||
|
||||
/* Update the microcode */
|
||||
model_fxx_update_microcode(cpu->device);
|
||||
|
||||
model_fxx_update_microcode(dev->device);
|
||||
|
||||
disable_cache();
|
||||
|
||||
/* zero the machine check error status registers */
|
||||
|
@ -403,18 +438,37 @@ void model_fxx_init(device_t cpu)
|
|||
/* Enable the local cpu apics */
|
||||
setup_lapic();
|
||||
|
||||
/* Find our node and core */
|
||||
id = get_node_core_id();
|
||||
#if CONFIG_LOGICAL_CPUS == 1
|
||||
siblings = cpuid_ecx(0x80000008) & 0xff;
|
||||
|
||||
/* Is this a bad location? In particular can another node prefetch
|
||||
if(siblings>0) {
|
||||
msr = rdmsr_amd(CPU_ID_FEATURES_MSR);
|
||||
msr.lo |= 1 << 28;
|
||||
wrmsr_amd(CPU_ID_FEATURES_MSR, msr);
|
||||
|
||||
msr = rdmsr_amd(LOGICAL_CPUS_NUM_MSR);
|
||||
msr.lo = (siblings+1)<<16;
|
||||
wrmsr_amd(LOGICAL_CPUS_NUM_MSR, msr);
|
||||
|
||||
msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR);
|
||||
msr.hi |= 1<<(33-32);
|
||||
wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
id = get_node_core_id(read_nb_cfg_54()); // pre e0 nb_cfg_54 can not be set
|
||||
|
||||
/* Is this a bad location? In particular can another node prefecth
|
||||
* data from this node before we have initialized it?
|
||||
*/
|
||||
if (id.coreid == 0) {
|
||||
init_ecc_memory(id.nodeid); // only do it for core 0
|
||||
}
|
||||
if (id.coreid == 0) init_ecc_memory(id.nodeid); // only do it for core 0
|
||||
|
||||
#if CONFIG_LOGICAL_CPUS==1
|
||||
/* Start up my cpu siblings */
|
||||
// if(id.coreid==0) amd_sibling_init(dev); // Don't need core1 is already be put in the CPU BUS in bus_cpu_scan
|
||||
#endif
|
||||
|
||||
/* Deal with sibling cpus */
|
||||
amd_sibling_init(cpu, id);
|
||||
}
|
||||
|
||||
static struct device_operations cpu_dev_ops = {
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
#ifndef CPU_AMD_MODEL_FXX_MSR_H
|
||||
#define CPU_AMD_MODEL_FXX_MSR_H
|
||||
|
||||
#define HWCR_MSR 0xC0010015
|
||||
#define NB_CFG_MSR 0xC001001f
|
||||
#define LS_CFG_MSR 0xC0011020
|
||||
#define IC_CFG_MSR 0xC0011021
|
||||
#define DC_CFG_MSR 0xC0011022
|
||||
#define BU_CFG_MSR 0xC0011023
|
||||
|
||||
|
||||
#define CPU_ID_FEATURES_MSR 0xc0011004
|
||||
|
||||
/* D0 only */
|
||||
#define CPU_ID_HYPER_EXT_FEATURES 0xc001100d
|
||||
/* E0 only */
|
||||
#define LOGICAL_CPUS_NUM_MSR 0xc001100d
|
||||
|
||||
#define CPU_ID_EXT_FEATURES_MSR 0xc0011005
|
||||
|
||||
#endif /* CPU_AMD_MODEL_FXX_MSR_H */
|
|
@ -0,0 +1,21 @@
|
|||
#ifndef CPU_AMD_MODEL_FXX_MSR_H
|
||||
#define CPU_AMD_MODEL_FXX_MSR_H
|
||||
|
||||
#define HWCR_MSR 0xC0010015
|
||||
#define NB_CFG_MSR 0xC001001f
|
||||
#define LS_CFG_MSR 0xC0011020
|
||||
#define IC_CFG_MSR 0xC0011021
|
||||
#define DC_CFG_MSR 0xC0011022
|
||||
#define BU_CFG_MSR 0xC0011023
|
||||
|
||||
|
||||
#define CPU_ID_FEATURES_MSR 0xc0011004
|
||||
|
||||
/* D0 only */
|
||||
#define CPU_ID_HYPER_EXT_FEATURES 0xc001100d
|
||||
/* E0 only */
|
||||
#define LOGICAL_CPUS_NUM_MSR 0xc001100d
|
||||
|
||||
#define CPU_ID_EXT_FEATURES_MSR 0xc0011005
|
||||
|
||||
#endif /* CPU_AMD_MODEL_FXX_MSR_H */
|
|
@ -0,0 +1,78 @@
|
|||
#include <arch/cpu.h>
|
||||
|
||||
static inline int is_cpu_rev_a0(void)
|
||||
{
|
||||
return (cpuid_eax(1) & 0xfffef) == 0x0f00;
|
||||
}
|
||||
static inline int is_cpu_pre_c0(void)
|
||||
{
|
||||
return (cpuid_eax(1) & 0xfffef) < 0x0f48;
|
||||
}
|
||||
|
||||
static inline int is_cpu_c0(void)
|
||||
{
|
||||
return (cpuid_eax(1) & 0xfffef) == 0x0f48;
|
||||
}
|
||||
|
||||
static inline int is_cpu_pre_b3(void)
|
||||
{
|
||||
return (cpuid_eax(1) & 0xfffef) < 0x0f41;
|
||||
}
|
||||
|
||||
static inline int is_cpu_b3(void)
|
||||
{
|
||||
return (cpuid_eax(1) & 0xfffef) == 0x0f41;
|
||||
}
|
||||
//AMD_D0_SUPPORT
|
||||
static inline int is_cpu_pre_d0(void)
|
||||
{
|
||||
return (cpuid_eax(1) & 0xfff0f) < 0x10f00;
|
||||
}
|
||||
|
||||
static inline int is_cpu_d0(void)
|
||||
{
|
||||
return (cpuid_eax(1) & 0xfff0f) == 0x10f00;
|
||||
}
|
||||
|
||||
//AMD_E0_SUPPORT
|
||||
static inline int is_cpu_pre_e0(void)
|
||||
{
|
||||
return (cpuid_eax(1) & 0xfff0f) < 0x20f00;
|
||||
}
|
||||
|
||||
static inline int is_cpu_e0(void)
|
||||
{
|
||||
return (cpuid_eax(1) & 0xfff00) == 0x20f00;
|
||||
}
|
||||
|
||||
|
||||
#ifdef __ROMCC__
|
||||
static int is_e0_later_in_bsp(int nodeid)
|
||||
{
|
||||
uint32_t val;
|
||||
uint32_t val_old;
|
||||
int e0_later;
|
||||
if(nodeid==0) { // we don't need to do that for node 0 in core0/node0
|
||||
return !is_cpu_pre_e0();
|
||||
}
|
||||
// d0 will be treated as e0 with this methods, but the d0 nb_cfg_54 always 0
|
||||
device_t dev;
|
||||
dev = PCI_DEV(0, 0x18+nodeid,2);
|
||||
val_old = pci_read_config32(dev, 0x80);
|
||||
val = val_old;
|
||||
val |= (1<<3);
|
||||
pci_write_config32(dev, 0x80, val);
|
||||
val = pci_read_config32(dev, 0x80);
|
||||
e0_later = !!(val & (1<<3));
|
||||
if(e0_later) { // pre_e0 bit 3 always be 0 and can not be changed
|
||||
pci_write_config32(dev, 0x80, val_old); // restore it
|
||||
}
|
||||
|
||||
return e0_later;
|
||||
}
|
||||
#else
|
||||
int is_e0_later_in_bsp(int nodeid); //defined model_fxx_init.c
|
||||
#endif
|
||||
|
||||
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
#include <arch/cpu.h>
|
||||
static int is_cpu_rev_a0(void)
|
||||
{
|
||||
return (cpuid_eax(1) & 0xfffef) == 0x0f00;
|
||||
}
|
||||
|
||||
static int is_cpu_pre_b3(void)
|
||||
{
|
||||
return (cpuid_eax(1) & 0xfffef) < 0x0f41;
|
||||
}
|
||||
|
||||
static int is_cpu_pre_c0(void)
|
||||
{
|
||||
return (cpuid_eax(1) & 0xfffef) < 0x0f48;
|
||||
}
|
||||
|
||||
static int is_cpu_pre_d0(void)
|
||||
{
|
||||
return (cpuid_eax(1) & 0xfff0f) < 0x10000;
|
||||
}
|
||||
|
||||
static int is_cpu_pre_e0(void)
|
||||
{
|
||||
return (cpuid_eax(1) & 0xfff0f) < 0x20f00;
|
||||
}
|
|
@ -17,17 +17,21 @@
|
|||
#include <cpu/cpu.h>
|
||||
|
||||
#include <cpu/x86/lapic.h>
|
||||
#include <cpu/amd/dualcore.h>
|
||||
|
||||
#if CONFIG_LOGICAL_CPUS==1
|
||||
#include <cpu/amd/dualcore.h>
|
||||
#include <pc80/mc146818rtc.h>
|
||||
#endif
|
||||
|
||||
#include "chip.h"
|
||||
#include "root_complex/chip.h"
|
||||
#include "northbridge.h"
|
||||
|
||||
#include "amdk8.h"
|
||||
#include "cpu_rev.c"
|
||||
|
||||
#if K8_HW_MEM_HOLE_SIZEK != 0
|
||||
#include <cpu/amd/model_fxx_rev.h>
|
||||
#endif
|
||||
|
||||
#define FX_DEVS 8
|
||||
static device_t __f0_dev[FX_DEVS];
|
||||
|
@ -446,6 +450,10 @@ static void amdk8_set_resource(device_t dev, struct resource *resource, unsigned
|
|||
* I tried to reuse the resource allocation code in amdk8_set_resource()
|
||||
* but it is too diffcult to deal with the resource allocation magic.
|
||||
*/
|
||||
#if CONFIG_CONSOLE_VGA_MULTI == 1
|
||||
extern device_t vga_pri; // the primary vga device, defined in device.c
|
||||
#endif
|
||||
|
||||
static void amdk8_create_vga_resource(device_t dev, unsigned nodeid)
|
||||
{
|
||||
struct resource *resource;
|
||||
|
@ -457,18 +465,30 @@ static void amdk8_create_vga_resource(device_t dev, unsigned nodeid)
|
|||
* we only deal with the 'first' vga card */
|
||||
for (link = 0; link < dev->links; link++) {
|
||||
if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
|
||||
#if CONFIG_CONSOLE_VGA_MULTI == 1
|
||||
printk_debug("VGA: vga_pri bus num = %d dev->link[link] bus range [%d,%d]\n", vga_pri->bus->secondary,
|
||||
dev->link[link].secondary,dev->link[link].subordinate);
|
||||
/* We need to make sure the vga_pri is under the link */
|
||||
if((vga_pri->bus->secondary >= dev->link[link].secondary ) &&
|
||||
(vga_pri->bus->secondary <= dev->link[link].subordinate )
|
||||
)
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
printk_spew("%s: link %d has VGA device\n", __func__, link);
|
||||
|
||||
/* no VGA card installed */
|
||||
if (link == dev->links)
|
||||
return;
|
||||
|
||||
printk_debug("VGA: %s (aka node %d) link %d has VGA device\n", dev_path(dev), nodeid, link);
|
||||
|
||||
/* allocate a temp resrouce for legacy VGA buffer */
|
||||
resource = amdk8_find_mempair(dev, nodeid, link);
|
||||
if(!resource){
|
||||
printk_debug("VGA: Can not find free mmio reg for legacy VGA buffer\n");
|
||||
return;
|
||||
}
|
||||
resource->base = 0xa0000;
|
||||
resource->size = 0x20000;
|
||||
|
||||
|
@ -585,17 +605,42 @@ static void pci_domain_read_resources(device_t dev)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if CONFIG_PCI_64BIT_PREF_MEM == 0
|
||||
/* Initialize the system wide io space constraints */
|
||||
resource = new_resource(dev, IOINDEX_SUBTRACTIVE(0, 0));
|
||||
resource->base = 0x400;
|
||||
resource->limit = 0xffffUL;
|
||||
resource->flags = IORESOURCE_IO | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
|
||||
|
||||
/* Initialize the system wide memory resources constraints */
|
||||
resource = new_resource(dev, IOINDEX_SUBTRACTIVE(1, 0));
|
||||
resource->limit = 0xfcffffffffULL;
|
||||
resource->flags = IORESOURCE_MEM | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
|
||||
/* Initialize the system wide memory resources constraints */
|
||||
resource = new_resource(dev, IOINDEX_SUBTRACTIVE(1, 0));
|
||||
resource->limit = 0xfcffffffffULL;
|
||||
resource->flags = IORESOURCE_MEM | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
|
||||
#else
|
||||
/* Initialize the system wide io space constraints */
|
||||
resource = new_resource(dev, 0);
|
||||
resource->base = 0x400;
|
||||
resource->limit = 0xffffUL;
|
||||
resource->flags = IORESOURCE_IO;
|
||||
compute_allocate_resource(&dev->link[0], resource,
|
||||
IORESOURCE_IO, IORESOURCE_IO);
|
||||
|
||||
/* Initialize the system wide prefetchable memory resources constraints */
|
||||
resource = new_resource(dev, 1);
|
||||
resource->limit = 0xfcffffffffULL;
|
||||
resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
|
||||
compute_allocate_resource(&dev->link[0], resource,
|
||||
IORESOURCE_MEM | IORESOURCE_PREFETCH,
|
||||
IORESOURCE_MEM | IORESOURCE_PREFETCH);
|
||||
|
||||
/* Initialize the system wide memory resources constraints */
|
||||
resource = new_resource(dev, 2);
|
||||
resource->limit = 0xfcffffffffULL;
|
||||
resource->flags = IORESOURCE_MEM;
|
||||
compute_allocate_resource(&dev->link[0], resource,
|
||||
IORESOURCE_MEM | IORESOURCE_PREFETCH,
|
||||
IORESOURCE_MEM);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void ram_resource(device_t dev, unsigned long index,
|
||||
|
@ -637,66 +682,304 @@ static uint32_t find_pci_tolm(struct bus *bus)
|
|||
return tolm;
|
||||
}
|
||||
|
||||
static uint32_t hoist_memory(unsigned long mmio_basek, int i)
|
||||
#if CONFIG_PCI_64BIT_PREF_MEM == 1
|
||||
#define BRIDGE_IO_MASK (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH)
|
||||
#endif
|
||||
|
||||
#if K8_HW_MEM_HOLE_SIZEK != 0
|
||||
|
||||
struct hw_mem_hole_info {
|
||||
unsigned hole_startk;
|
||||
int node_id;
|
||||
};
|
||||
|
||||
static struct hw_mem_hole_info get_hw_mem_hole_info(void)
|
||||
{
|
||||
int ii;
|
||||
uint32_t carry_over;
|
||||
device_t dev;
|
||||
uint32_t base, limit;
|
||||
uint32_t basek;
|
||||
uint32_t hoist;
|
||||
struct hw_mem_hole_info mem_hole;
|
||||
int i;
|
||||
|
||||
carry_over = (4*1024*1024) - mmio_basek;
|
||||
for(ii=7;ii>i;ii--) {
|
||||
mem_hole.hole_startk = K8_HW_MEM_HOLE_SIZEK;
|
||||
mem_hole.node_id = -1;
|
||||
|
||||
base = f1_read_config32(0x40 + (ii << 3));
|
||||
limit = f1_read_config32(0x44 + (ii << 3));
|
||||
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
|
||||
continue;
|
||||
}
|
||||
f1_write_config32(0x44 + (ii << 3),limit + (carry_over << 2));
|
||||
f1_write_config32(0x40 + (ii << 3),base + (carry_over << 2));
|
||||
}
|
||||
limit = f1_read_config32(0x44 + (i << 3));
|
||||
f1_write_config32(0x44 + (i << 3),limit + (carry_over << 2));
|
||||
dev = __f1_dev[i];
|
||||
base = pci_read_config32(dev, 0x40 + (i << 3));
|
||||
basek = (pci_read_config32(dev, 0x40 + (i << 3)) & 0xffff0000) >> 2;
|
||||
hoist = /* hole start address */
|
||||
((mmio_basek << 10) & 0xff000000) +
|
||||
/* hole address to memory controller address */
|
||||
(((basek + carry_over) >> 6) & 0x0000ff00) +
|
||||
/* enable */
|
||||
1;
|
||||
pci_write_config32(dev, 0xf0, hoist);
|
||||
return carry_over;
|
||||
for (i = 0; i < 8; i++) {
|
||||
uint32_t base;
|
||||
uint32_t hole;
|
||||
base = f1_read_config32(0x40 + (i << 3));
|
||||
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
|
||||
continue;
|
||||
}
|
||||
|
||||
hole = pci_read_config32(__f1_dev[i], 0xf0);
|
||||
if(hole & 1) { // we find the hole
|
||||
mem_hole.hole_startk = (hole & (0xff<<24)) >> 10;
|
||||
mem_hole.node_id = i; // record the node No with hole
|
||||
break; // only one hole
|
||||
}
|
||||
}
|
||||
|
||||
//We need to double check if there is speical set on base reg and limit reg are not continous instead of hole, it will find out it's hole_startk
|
||||
if(mem_hole.node_id!=-1) {
|
||||
uint32_t limitk_pri = 0;
|
||||
for(i=0; i<8; i++) {
|
||||
uint32_t base, limit;
|
||||
unsigned base_k, limit_k;
|
||||
base = f1_read_config32(0x40 + (i << 3));
|
||||
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
|
||||
continue;
|
||||
}
|
||||
|
||||
base_k = (base & 0xffff0000) >> 2;
|
||||
if(limitk_pri != base_k) { // we find the hole
|
||||
mem_hole.hole_startk = limitk_pri;
|
||||
mem_hole.node_id = i;
|
||||
break; //only one hole
|
||||
}
|
||||
|
||||
limit = f1_read_config32(0x44 + (i << 3));
|
||||
limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
|
||||
limitk_pri = limit_k;
|
||||
}
|
||||
}
|
||||
|
||||
return mem_hole;
|
||||
|
||||
}
|
||||
static void disable_hoist_memory(unsigned long hole_startk, int i)
|
||||
{
|
||||
int ii;
|
||||
device_t dev;
|
||||
uint32_t base, limit;
|
||||
uint32_t hoist;
|
||||
uint32_t hole_sizek;
|
||||
|
||||
|
||||
//1. find which node has hole
|
||||
//2. change limit in that node.
|
||||
//3. change base and limit in later node
|
||||
//4. clear that node f0
|
||||
|
||||
//if there is not mem hole enabled, we need to change it's base instead
|
||||
|
||||
hole_sizek = (4*1024*1024) - hole_startk;
|
||||
|
||||
for(ii=7;ii>i;ii--) {
|
||||
|
||||
base = f1_read_config32(0x40 + (ii << 3));
|
||||
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
|
||||
continue;
|
||||
}
|
||||
limit = f1_read_config32(0x44 + (ii << 3));
|
||||
f1_write_config32(0x44 + (ii << 3),limit - (hole_sizek << 2));
|
||||
f1_write_config32(0x40 + (ii << 3),base - (hole_sizek << 2));
|
||||
}
|
||||
limit = f1_read_config32(0x44 + (i << 3));
|
||||
f1_write_config32(0x44 + (i << 3),limit - (hole_sizek << 2));
|
||||
dev = __f1_dev[i];
|
||||
hoist = pci_read_config32(dev, 0xf0);
|
||||
if(hoist & 1) {
|
||||
pci_write_config32(dev, 0xf0, 0);
|
||||
}
|
||||
else {
|
||||
base = pci_read_config32(dev, 0x40 + (i << 3));
|
||||
f1_write_config32(0x40 + (i << 3),base - (hole_sizek << 2));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static uint32_t hoist_memory(unsigned long hole_startk, int i)
|
||||
{
|
||||
int ii;
|
||||
uint32_t carry_over;
|
||||
device_t dev;
|
||||
uint32_t base, limit;
|
||||
uint32_t basek;
|
||||
uint32_t hoist;
|
||||
|
||||
carry_over = (4*1024*1024) - hole_startk;
|
||||
|
||||
for(ii=7;ii>i;ii--) {
|
||||
|
||||
base = f1_read_config32(0x40 + (ii << 3));
|
||||
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
|
||||
continue;
|
||||
}
|
||||
limit = f1_read_config32(0x44 + (ii << 3));
|
||||
f1_write_config32(0x44 + (ii << 3),limit + (carry_over << 2));
|
||||
f1_write_config32(0x40 + (ii << 3),base + (carry_over << 2));
|
||||
}
|
||||
limit = f1_read_config32(0x44 + (i << 3));
|
||||
f1_write_config32(0x44 + (i << 3),limit + (carry_over << 2));
|
||||
dev = __f1_dev[i];
|
||||
base = pci_read_config32(dev, 0x40 + (i << 3));
|
||||
basek = (base & 0xffff0000) >> 2;
|
||||
if(basek == hole_startk) {
|
||||
//don't need set memhole here, because hole off set will be 0, overflow
|
||||
//so need to change base reg instead, new basek will be 4*1024*1024
|
||||
base &= 0x0000ffff;
|
||||
base |= (4*1024*1024)<<2;
|
||||
f1_write_config32(0x40 + (i<<3), base);
|
||||
}
|
||||
else
|
||||
{
|
||||
hoist = /* hole start address */
|
||||
((hole_startk << 10) & 0xff000000) +
|
||||
/* hole address to memory controller address */
|
||||
(((basek + carry_over) >> 6) & 0x0000ff00) +
|
||||
/* enable */
|
||||
1;
|
||||
|
||||
pci_write_config32(dev, 0xf0, hoist);
|
||||
}
|
||||
|
||||
return carry_over;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void pci_domain_set_resources(device_t dev)
|
||||
{
|
||||
#if CONFIG_PCI_64BIT_PREF_MEM == 1
|
||||
struct resource *io, *mem1, *mem2;
|
||||
struct resource *resource, *last;
|
||||
#endif
|
||||
unsigned long mmio_basek;
|
||||
uint32_t pci_tolm;
|
||||
int i, idx;
|
||||
#if K8_HW_MEM_HOLE_SIZEK != 0
|
||||
struct hw_mem_hole_info mem_hole;
|
||||
unsigned reset_memhole = 1;
|
||||
#endif
|
||||
|
||||
#if 0
|
||||
/* Place the IO devices somewhere safe */
|
||||
io = find_resource(dev, 0);
|
||||
io->base = DEVICE_IO_START;
|
||||
#endif
|
||||
#if CONFIG_PCI_64BIT_PREF_MEM == 1
|
||||
/* Now reallocate the pci resources memory with the
|
||||
* highest addresses I can manage.
|
||||
*/
|
||||
mem1 = find_resource(dev, 1);
|
||||
mem2 = find_resource(dev, 2);
|
||||
|
||||
#if 1
|
||||
printk_debug("base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
|
||||
mem1->base, mem1->limit, mem1->size, mem1->align);
|
||||
printk_debug("base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
|
||||
mem2->base, mem2->limit, mem2->size, mem2->align);
|
||||
#endif
|
||||
|
||||
/* See if both resources have roughly the same limits */
|
||||
if (((mem1->limit <= 0xffffffff) && (mem2->limit <= 0xffffffff)) ||
|
||||
((mem1->limit > 0xffffffff) && (mem2->limit > 0xffffffff)))
|
||||
{
|
||||
/* If so place the one with the most stringent alignment first
|
||||
*/
|
||||
if (mem2->align > mem1->align) {
|
||||
struct resource *tmp;
|
||||
tmp = mem1;
|
||||
mem1 = mem2;
|
||||
mem2 = tmp;
|
||||
}
|
||||
/* Now place the memory as high up as it will go */
|
||||
mem2->base = resource_max(mem2);
|
||||
mem1->limit = mem2->base - 1;
|
||||
mem1->base = resource_max(mem1);
|
||||
}
|
||||
else {
|
||||
/* Place the resources as high up as they will go */
|
||||
mem2->base = resource_max(mem2);
|
||||
mem1->base = resource_max(mem1);
|
||||
}
|
||||
|
||||
#if 1
|
||||
printk_debug("base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
|
||||
mem1->base, mem1->limit, mem1->size, mem1->align);
|
||||
printk_debug("base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
|
||||
mem2->base, mem2->limit, mem2->size, mem2->align);
|
||||
#endif
|
||||
|
||||
last = &dev->resource[dev->resources];
|
||||
for(resource = &dev->resource[0]; resource < last; resource++)
|
||||
{
|
||||
#if 1
|
||||
resource->flags |= IORESOURCE_ASSIGNED;
|
||||
resource->flags &= ~IORESOURCE_STORED;
|
||||
#endif
|
||||
compute_allocate_resource(&dev->link[0], resource,
|
||||
BRIDGE_IO_MASK, resource->flags & BRIDGE_IO_MASK);
|
||||
|
||||
resource->flags |= IORESOURCE_STORED;
|
||||
report_resource_stored(dev, resource, "");
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
pci_tolm = find_pci_tolm(&dev->link[0]);
|
||||
|
||||
/* Work around for NUMA bug in all kernels before 2.6.13.
|
||||
If pci memory hole is too small, the kernel memory to NUMA
|
||||
node mapping will fail to initialize and system will run in
|
||||
non-NUMA mode.
|
||||
*/
|
||||
if(pci_tolm > 0xf8000000) pci_tolm = 0xf8000000;
|
||||
|
||||
#warning "FIXME handle interleaved nodes"
|
||||
mmio_basek = pci_tolm >> 10;
|
||||
/* Round mmio_basek to something the processor can support */
|
||||
mmio_basek &= ~((1 << 6) -1);
|
||||
|
||||
#if 1
|
||||
#warning "FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M MMIO hole"
|
||||
/* Round the mmio hold to 64M */
|
||||
mmio_basek &= ~((64*1024) - 1);
|
||||
#endif
|
||||
|
||||
#if K8_HW_MEM_HOLE_SIZEK != 0
|
||||
/* if the hw mem hole is already set in raminit stage, here we will compare mmio_basek and hole_basek
|
||||
* if mmio_basek is bigger that hole_basek and will use hole_basek as mmio_basek and we don't need to reset hole.
|
||||
* otherwise We reset the hole to the mmio_basek
|
||||
*/
|
||||
if (!is_cpu_pre_e0()) {
|
||||
|
||||
mem_hole = get_hw_mem_hole_info();
|
||||
|
||||
if ((mem_hole.node_id != -1) && (mmio_basek > mem_hole.hole_startk)) { //We will use hole_basek as mmio_basek, and we don't need to reset hole anymore
|
||||
mmio_basek = mem_hole.hole_startk;
|
||||
reset_memhole = 0;
|
||||
}
|
||||
|
||||
//mmio_basek = 3*1024*1024; // for debug to meet boundary
|
||||
|
||||
if(reset_memhole) {
|
||||
if(mem_hole.node_id!=-1) { // We need to select K8_HW_MEM_HOLE_SIZEK for raminit, it can not make hole_startk to some basek too....!
|
||||
// We need to reset our Mem Hole, because We want more big HOLE than we already set
|
||||
//Before that We need to disable mem hole at first, becase memhole could already be set on i+1 instead
|
||||
disable_hoist_memory(mem_hole.hole_startk, mem_hole.node_id);
|
||||
}
|
||||
|
||||
#if K8_HW_MEM_HOLE_SIZE_AUTO_INC == 1
|
||||
//We need to double check if the mmio_basek is valid for hole setting, if it is equal to basek, we need to decrease it some
|
||||
uint32_t basek_pri;
|
||||
for (i = 0; i < 8; i++) {
|
||||
uint32_t base;
|
||||
uint32_t basek;
|
||||
base = f1_read_config32(0x40 + (i << 3));
|
||||
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
|
||||
continue;
|
||||
}
|
||||
|
||||
basek = (base & 0xffff0000) >> 2;
|
||||
if(mmio_basek == basek) {
|
||||
mmio_basek -= (basek - basek_pri)>>1; // increase mem hole size to make sure it is on middle of pri node
|
||||
break;
|
||||
}
|
||||
basek_pri = basek;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
} // is_cpu_pre_e0
|
||||
|
||||
#endif
|
||||
|
||||
idx = 0x10;
|
||||
for(i = 0; i < 8; i++) {
|
||||
uint32_t base, limit;
|
||||
unsigned basek, limitk, sizek;
|
||||
|
||||
base = f1_read_config32(0x40 + (i << 3));
|
||||
limit = f1_read_config32(0x44 + (i << 3));
|
||||
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
|
||||
|
@ -715,18 +998,25 @@ static void pci_domain_set_resources(device_t dev)
|
|||
|
||||
}
|
||||
|
||||
|
||||
|
||||
// printk_debug("node %d : mmio_basek=%08x, basek=%08x, limitk=%08x\n", i, mmio_basek, basek, limitk); //yhlu
|
||||
|
||||
/* See if I need to split the region to accomodate pci memory space */
|
||||
if ((basek < mmio_basek) && (limitk > mmio_basek)) {
|
||||
if (basek < mmio_basek) {
|
||||
if ( (basek < 4*1024*1024 ) && (limitk > mmio_basek) ) {
|
||||
if (basek <= mmio_basek) {
|
||||
unsigned pre_sizek;
|
||||
pre_sizek = mmio_basek - basek;
|
||||
ram_resource(dev, (idx | i), basek, pre_sizek);
|
||||
idx += 0x10;
|
||||
sizek -= pre_sizek;
|
||||
if(! is_cpu_pre_e0() ) {
|
||||
sizek += hoist_memory(mmio_basek,i);
|
||||
}
|
||||
if(pre_sizek>0) {
|
||||
ram_resource(dev, (idx | i), basek, pre_sizek);
|
||||
idx += 0x10;
|
||||
sizek -= pre_sizek;
|
||||
}
|
||||
#if K8_HW_MEM_HOLE_SIZEK != 0
|
||||
if(reset_memhole)
|
||||
if(!is_cpu_pre_e0() )
|
||||
sizek += hoist_memory(mmio_basek,i);
|
||||
#endif
|
||||
|
||||
basek = mmio_basek;
|
||||
}
|
||||
if ((basek + sizek) <= 4*1024*1024) {
|
||||
|
@ -791,16 +1081,59 @@ static unsigned int cpu_bus_scan(device_t dev, unsigned int max)
|
|||
{
|
||||
struct bus *cpu_bus;
|
||||
device_t dev_mc;
|
||||
int bsp_apicid;
|
||||
int apicid_offset;
|
||||
int i,j;
|
||||
int nodes;
|
||||
unsigned nb_cfg_54;
|
||||
int enable_apic_ext_id;
|
||||
unsigned siblings;
|
||||
int e0_later_single_core;
|
||||
int disable_siblings;
|
||||
unsigned lift_bsp_apicid;
|
||||
|
||||
nb_cfg_54 = 0;
|
||||
enable_apic_ext_id = 0;
|
||||
lift_bsp_apicid = 0;
|
||||
siblings = 0;
|
||||
|
||||
/* Find the bootstrap processors apicid */
|
||||
bsp_apicid = lapicid();
|
||||
apicid_offset = bsp_apicid;
|
||||
|
||||
disable_siblings = !CONFIG_LOGICAL_CPUS;
|
||||
#if CONFIG_LOGICAL_CPUS == 1
|
||||
get_option(&disable_siblings, "dual_core");
|
||||
#endif
|
||||
|
||||
// for pre_e0, nb_cfg_54 can not be set, ( even set, when you read it still be 0)
|
||||
// How can I get the nb_cfg_54 of every node' nb_cfg_54 in bsp??? and differ d0 and e0 single core
|
||||
|
||||
nb_cfg_54 = read_nb_cfg_54();
|
||||
|
||||
dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
|
||||
if (!dev_mc) {
|
||||
die("0:18.0 not found?");
|
||||
}
|
||||
|
||||
nodes = ((pci_read_config32(dev_mc, 0x60)>>4) & 7) + 1;
|
||||
|
||||
if (pci_read_config32(dev_mc, 0x68) & (HTTC_APIC_EXT_ID|HTTC_APIC_EXT_BRD_CST))
|
||||
{
|
||||
enable_apic_ext_id = 1;
|
||||
if(bsp_apicid == 0) {
|
||||
/* bsp apic id is not changed */
|
||||
apicid_offset = APIC_ID_OFFSET;
|
||||
} else
|
||||
{
|
||||
lift_bsp_apicid = 1;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Find which cpus are present */
|
||||
cpu_bus = &dev->link[0];
|
||||
for(i = 0; i < 8; i++) {
|
||||
for(i = 0; i < nodes; i++) {
|
||||
device_t dev, cpu;
|
||||
struct device_path cpu_path;
|
||||
|
||||
|
@ -811,43 +1144,111 @@ static unsigned int cpu_bus_scan(device_t dev, unsigned int max)
|
|||
* ensure all of the cpu's pci devices are found.
|
||||
*/
|
||||
int j;
|
||||
device_t dev_f0;
|
||||
for(j = 0; j <= 3; j++) {
|
||||
dev = pci_probe_dev(NULL, dev_mc->bus,
|
||||
PCI_DEVFN(0x18 + i, j));
|
||||
}
|
||||
/* Ok, We need to set the links for that device.
|
||||
* otherwise the device under it will not be scanned
|
||||
*/
|
||||
dev_f0 = dev_find_slot(0, PCI_DEVFN(0x18+i,0));
|
||||
if(dev_f0) {
|
||||
dev_f0->links = 3;
|
||||
for(j=0;j<3;j++) {
|
||||
dev_f0->link[j].link = j;
|
||||
dev_f0->link[j].dev = dev_f0;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
e0_later_single_core = 0;
|
||||
if (dev && dev->enabled) {
|
||||
j = pci_read_config32(dev, 0xe8);
|
||||
j = (j >> 12) & 3; // dev is func 3
|
||||
printk_debug(" %s siblings=%d\r\n", dev_path(dev), j);
|
||||
|
||||
if(nb_cfg_54) {
|
||||
// For e0 single core if nb_cfg_54 is set, apicid will be 0, 2, 4....
|
||||
// ----> you can mixed single core e0 and dual core e0 at any sequence
|
||||
// That is the typical case
|
||||
|
||||
if(j == 0 ){
|
||||
e0_later_single_core = is_e0_later_in_bsp(i); // single core
|
||||
} else {
|
||||
e0_later_single_core = 0;
|
||||
}
|
||||
if(e0_later_single_core) {
|
||||
printk_debug("\tFound Rev E or Rev F later single core\r\n");
|
||||
|
||||
j=1;
|
||||
}
|
||||
|
||||
if(siblings > j ) {
|
||||
}
|
||||
else {
|
||||
siblings = j;
|
||||
}
|
||||
} else {
|
||||
siblings = j;
|
||||
}
|
||||
}
|
||||
|
||||
/* Build the cpu device path */
|
||||
cpu_path.type = DEVICE_PATH_APIC;
|
||||
cpu_path.u.apic.apic_id = 0x10 + i;
|
||||
|
||||
/* See if I can find the cpu */
|
||||
cpu = find_dev_path(cpu_bus, &cpu_path);
|
||||
|
||||
/* Enable the cpu if I have the processor */
|
||||
if (dev && dev->enabled) {
|
||||
if (!cpu) {
|
||||
cpu = alloc_dev(cpu_bus, &cpu_path);
|
||||
unsigned jj;
|
||||
if(e0_later_single_core || disable_siblings) {
|
||||
jj = 0;
|
||||
} else
|
||||
{
|
||||
jj = siblings;
|
||||
}
|
||||
#if 0
|
||||
jj = 0; // if create cpu core1 path in amd_siblings by core0
|
||||
#endif
|
||||
|
||||
for (j = 0; j <=jj; j++ ) {
|
||||
|
||||
/* Build the cpu device path */
|
||||
cpu_path.type = DEVICE_PATH_APIC;
|
||||
cpu_path.u.apic.apic_id = i * (nb_cfg_54?(siblings+1):1) + j * (nb_cfg_54?1:8);
|
||||
|
||||
/* See if I can find the cpu */
|
||||
cpu = find_dev_path(cpu_bus, &cpu_path);
|
||||
|
||||
/* Enable the cpu if I have the processor */
|
||||
if (dev && dev->enabled) {
|
||||
if (!cpu) {
|
||||
cpu = alloc_dev(cpu_bus, &cpu_path);
|
||||
}
|
||||
if (cpu) {
|
||||
cpu->enabled = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Disable the cpu if I don't have the processor */
|
||||
if (cpu && (!dev || !dev->enabled)) {
|
||||
cpu->enabled = 0;
|
||||
}
|
||||
|
||||
/* Report what I have done */
|
||||
if (cpu) {
|
||||
cpu->enabled = 1;
|
||||
cpu->path.u.apic.node_id = i;
|
||||
cpu->path.u.apic.core_id = j;
|
||||
if(enable_apic_ext_id) {
|
||||
if(lift_bsp_apicid) {
|
||||
cpu->path.u.apic.apic_id += apicid_offset;
|
||||
} else
|
||||
{
|
||||
if (cpu->path.u.apic.apic_id != 0)
|
||||
cpu->path.u.apic.apic_id += apicid_offset;
|
||||
}
|
||||
}
|
||||
printk_debug("CPU: %s %s\n",
|
||||
dev_path(cpu), cpu->enabled?"enabled":"disabled");
|
||||
}
|
||||
}
|
||||
|
||||
/* Disable the cpu if I don't have the processor */
|
||||
if (cpu && (!dev || !dev->enabled)) {
|
||||
cpu->enabled = 0;
|
||||
}
|
||||
|
||||
/* Report what I have done */
|
||||
if (cpu) {
|
||||
cpu->path.u.apic.node_id = i;
|
||||
cpu->path.u.apic.core_id = 0;
|
||||
printk_debug("CPU: %s %s\n",
|
||||
dev_path(cpu), cpu->enabled?"enabled":"disabled");
|
||||
}
|
||||
}
|
||||
|
||||
} //j
|
||||
}
|
||||
return max;
|
||||
}
|
||||
|
||||
|
|
|
@ -585,16 +585,6 @@ static void hw_enable_ecc(const struct mem_controller *ctrl)
|
|||
|
||||
}
|
||||
|
||||
static void e_step_cpu(const struct mem_controller *ctrl)
|
||||
{
|
||||
uint32_t dcl,data32;
|
||||
|
||||
/* set bit 29 (upper cs map) of function 2 offset 0x90 */
|
||||
dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
|
||||
dcl |= DCL_UpperCSMap;
|
||||
pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
|
||||
}
|
||||
|
||||
static int is_dual_channel(const struct mem_controller *ctrl)
|
||||
{
|
||||
uint32_t dcl;
|
||||
|
@ -724,6 +714,7 @@ hw_err:
|
|||
return sz;
|
||||
}
|
||||
|
||||
|
||||
static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
|
||||
{
|
||||
uint32_t base0, base1;
|
||||
|
@ -732,7 +723,7 @@ static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz
|
|||
if (sz.side1 != sz.side2) {
|
||||
sz.side2 = 0;
|
||||
}
|
||||
|
||||
|
||||
/* For each base register.
|
||||
* Place the dimm size in 32 MB quantities in the bits 31 - 21.
|
||||
* The initialize dimm size is in bits.
|
||||
|
@ -784,51 +775,50 @@ static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
static void set_dimm_map(const struct mem_controller *ctrl,
|
||||
struct dimm_size sz, unsigned index)
|
||||
static void set_dimm_map(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
|
||||
{
|
||||
static const unsigned cs_map_aa[15] = {
|
||||
static const unsigned cs_map_aa[] = {
|
||||
/* (row=12, col=8)(14, 12) ---> (0, 0) (2, 4) */
|
||||
0, 1, 3, 6, 0,
|
||||
0, 2, 4, 7, 9,
|
||||
0, 0, 5, 8,10,
|
||||
};
|
||||
|
||||
uint32_t map;
|
||||
int row,col;
|
||||
uint32_t dch;
|
||||
|
||||
map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
|
||||
map &= ~(0xf << (index * 4));
|
||||
|
||||
#if K8_4RANK_DIMM_SUPPORT == 1
|
||||
if(sz.rank == 4) {
|
||||
map &= ~(0xf << ( (index + 2) * 4));
|
||||
}
|
||||
#endif
|
||||
|
||||
if (is_cpu_pre_d0()) {
|
||||
map |= (sz.side1 - (25 + 3)) << (index *4);
|
||||
|
||||
/* Make certain side1 of the dimm is at least 32MB */
|
||||
if (sz.side1 >= (25 +3)) {
|
||||
if(is_cpu_pre_d0()) {
|
||||
map |= (sz.side1 - (25 + 3)) << (index *4);
|
||||
#if K8_4RANK_DIMM_SUPPORT == 1
|
||||
if(sz.rank == 4) {
|
||||
map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
unsigned val;
|
||||
val = cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ];
|
||||
if(val == 0) {
|
||||
print_err("Invalid Column or Row count\r\n");
|
||||
val = 7;
|
||||
}
|
||||
map |= val << (index*4);
|
||||
else {
|
||||
map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << (index*4);
|
||||
#if K8_4RANK_DIMM_SUPPORT == 1
|
||||
if(sz.rank == 4) {
|
||||
map |= val << ( (index + 2) * 4);
|
||||
}
|
||||
if(sz.rank == 4) {
|
||||
map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ( (index + 2) * 4);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
|
||||
{
|
||||
|
@ -844,7 +834,7 @@ static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
|
|||
return -1; /* Report SPD error */
|
||||
}
|
||||
set_dimm_size(ctrl, sz, i);
|
||||
set_dimm_map(ctrl, sz, i);
|
||||
set_dimm_map (ctrl, sz, i);
|
||||
}
|
||||
return dimm_mask;
|
||||
}
|
||||
|
@ -878,7 +868,7 @@ static void route_dram_accesses(const struct mem_controller *ctrl,
|
|||
}
|
||||
}
|
||||
|
||||
static void set_top_mem(unsigned tom_k)
|
||||
static void set_top_mem(unsigned tom_k, unsigned hole_startk)
|
||||
{
|
||||
/* Error if I don't have memory */
|
||||
if (!tom_k) {
|
||||
|
@ -900,7 +890,12 @@ static void set_top_mem(unsigned tom_k)
|
|||
* so I can see my rom chip and other I/O devices.
|
||||
*/
|
||||
if (tom_k >= 0x003f0000) {
|
||||
tom_k = 0x3f0000;
|
||||
#if K8_HW_MEM_HOLE_SIZEK != 0
|
||||
if(hole_startk != 0) {
|
||||
tom_k = hole_startk;
|
||||
} else
|
||||
#endif
|
||||
tom_k = 0x3f0000;
|
||||
}
|
||||
msr.lo = (tom_k & 0x003fffff) << 10;
|
||||
msr.hi = (tom_k & 0xffc00000) >> 22;
|
||||
|
@ -910,28 +905,28 @@ static void set_top_mem(unsigned tom_k)
|
|||
static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
|
||||
{
|
||||
/* 35 - 25 */
|
||||
static const uint32_t csbase_low[] = {
|
||||
/* 32MB */ (1 << (13 - 4)),
|
||||
/* 64MB */ (1 << (14 - 4)),
|
||||
/* 128MB */ (1 << (14 - 4)),
|
||||
/* 256MB */ (1 << (15 - 4)),
|
||||
/* 512MB */ (1 << (15 - 4)),
|
||||
/* 1GB */ (1 << (16 - 4)),
|
||||
/* 2GB */ (1 << (16 - 4)),
|
||||
static const uint8_t csbase_low_shift[] = {
|
||||
/* 32MB */ (13 - 4),
|
||||
/* 64MB */ (14 - 4),
|
||||
/* 128MB */ (14 - 4),
|
||||
/* 256MB */ (15 - 4),
|
||||
/* 512MB */ (15 - 4),
|
||||
/* 1GB */ (16 - 4),
|
||||
/* 2GB */ (16 - 4),
|
||||
};
|
||||
|
||||
static const uint32_t csbase_low_d0[] = {
|
||||
/* 32MB */ (1 << (13 - 4)),
|
||||
/* 64MB */ (1 << (14 - 4)),
|
||||
/* 128MB */ (1 << (14 - 4)),
|
||||
/* 128MB */ (1 << (15 - 4)),
|
||||
/* 256MB */ (1 << (15 - 4)),
|
||||
/* 512MB */ (1 << (15 - 4)),
|
||||
/* 256MB */ (1 << (16 - 4)),
|
||||
/* 512MB */ (1 << (16 - 4)),
|
||||
/* 1GB */ (1 << (16 - 4)),
|
||||
/* 1GB */ (1 << (17 - 4)),
|
||||
/* 2GB */ (1 << (17 - 4)),
|
||||
static const uint8_t csbase_low_d0_shift[] = {
|
||||
/* 32MB */ (13 - 4),
|
||||
/* 64MB */ (14 - 4),
|
||||
/* 128MB */ (14 - 4),
|
||||
/* 128MB */ (15 - 4),
|
||||
/* 256MB */ (15 - 4),
|
||||
/* 512MB */ (15 - 4),
|
||||
/* 256MB */ (16 - 4),
|
||||
/* 512MB */ (16 - 4),
|
||||
/* 1GB */ (16 - 4),
|
||||
/* 1GB */ (17 - 4),
|
||||
/* 2GB */ (17 - 4),
|
||||
};
|
||||
|
||||
/* cs_base_high is not changed */
|
||||
|
@ -992,21 +987,21 @@ static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
|
|||
|
||||
/* Find the bits of csbase that we need to interleave on */
|
||||
if(is_cpu_pre_d0()){
|
||||
csbase_inc = csbase_low[common_cs_mode];
|
||||
csbase_inc = 1 << csbase_low_shift[common_cs_mode];
|
||||
if(is_dual_channel(ctrl)) {
|
||||
/* Also we run out of address mask bits if we try and interleave 8 4GB dimms */
|
||||
if ((bits == 3) && (common_size == (1 << (32 - 3)))) {
|
||||
print_spew("8 4GB chip selects cannot be interleaved\r\n");
|
||||
// print_debug("8 4GB chip selects cannot be interleaved\r\n");
|
||||
return 0;
|
||||
}
|
||||
csbase_inc <<=1;
|
||||
}
|
||||
}
|
||||
else {
|
||||
csbase_inc = csbase_low_d0[common_cs_mode];
|
||||
csbase_inc = 1 << csbase_low_d0_shift[common_cs_mode];
|
||||
if(is_dual_channel(ctrl)) {
|
||||
if( (bits==3) && (common_cs_mode > 8)) {
|
||||
print_spew("8 cs_mode>8 chip selects cannot be interleaved\r\n");
|
||||
// print_debug("8 cs_mode>8 chip selects cannot be interleaved\r\n");
|
||||
return 0;
|
||||
}
|
||||
csbase_inc <<=1;
|
||||
|
@ -1142,7 +1137,7 @@ static void order_dimms(const struct mem_controller *ctrl)
|
|||
base_k = memory_end_k(ctrl, ctrl->node_id);
|
||||
tom_k += base_k;
|
||||
route_dram_accesses(ctrl, base_k, tom_k);
|
||||
set_top_mem(tom_k);
|
||||
set_top_mem(tom_k, 0);
|
||||
}
|
||||
|
||||
static long disable_dimm(const struct mem_controller *ctrl, unsigned index, long dimm_mask)
|
||||
|
@ -1239,7 +1234,7 @@ static long spd_enable_2channels(const struct mem_controller *ctrl, long dimm_ma
|
|||
int i;
|
||||
uint32_t nbcap;
|
||||
/* SPD addresses to verify are identical */
|
||||
static const unsigned addresses[] = {
|
||||
static const uint8_t addresses[] = {
|
||||
2, /* Type should be DDR SDRAM */
|
||||
3, /* *Row addresses */
|
||||
4, /* *Column addresses */
|
||||
|
@ -1399,7 +1394,7 @@ static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *
|
|||
int i;
|
||||
uint32_t value;
|
||||
|
||||
static const int latency_indicies[] = { 26, 23, 9 };
|
||||
static const uint8_t latency_indicies[] = { 26, 23, 9 };
|
||||
static const unsigned char min_cycle_times[] = {
|
||||
[NBCAP_MEMCLK_200MHZ] = 0x50, /* 5ns */
|
||||
[NBCAP_MEMCLK_166MHZ] = 0x60, /* 6ns */
|
||||
|
@ -2143,11 +2138,12 @@ static void sdram_set_spd_registers(const struct mem_controller *ctrl)
|
|||
struct spd_set_memclk_result result;
|
||||
const struct mem_param *param;
|
||||
long dimm_mask;
|
||||
|
||||
#if 1
|
||||
if (!controller_present(ctrl)) {
|
||||
print_debug("No memory controller present\r\n");
|
||||
// print_debug("No memory controller present\r\n");
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
hw_enable_ecc(ctrl);
|
||||
activate_spd_rom(ctrl);
|
||||
dimm_mask = spd_detect_dimms(ctrl);
|
||||
|
@ -2173,10 +2169,6 @@ static void sdram_set_spd_registers(const struct mem_controller *ctrl)
|
|||
if (dimm_mask < 0)
|
||||
goto hw_spd_err;
|
||||
order_dimms(ctrl);
|
||||
if( !is_cpu_pre_e0() ) {
|
||||
print_debug("E step CPU\r\n");
|
||||
// e_step_cpu(ctrl); // Socket 939 only.
|
||||
}
|
||||
return;
|
||||
hw_spd_err:
|
||||
/* Unrecoverable error reading SPD data */
|
||||
|
@ -2185,6 +2177,110 @@ static void sdram_set_spd_registers(const struct mem_controller *ctrl)
|
|||
return;
|
||||
}
|
||||
|
||||
#if K8_HW_MEM_HOLE_SIZEK != 0
|
||||
static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,unsigned hole_startk, int i)
|
||||
{
|
||||
int ii;
|
||||
uint32_t carry_over;
|
||||
device_t dev;
|
||||
uint32_t base, limit;
|
||||
uint32_t basek;
|
||||
uint32_t hoist;
|
||||
int j;
|
||||
|
||||
carry_over = (4*1024*1024) - hole_startk;
|
||||
|
||||
for(ii=controllers - 1;ii>i;ii--) {
|
||||
base = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
|
||||
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
|
||||
continue;
|
||||
}
|
||||
limit = pci_read_config32(ctrl[0].f1, 0x44 + (ii << 3));
|
||||
for(j = 0; j < controllers; j++) {
|
||||
pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit + (carry_over << 2));
|
||||
pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base + (carry_over << 2));
|
||||
}
|
||||
}
|
||||
limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
|
||||
for(j = 0; j < controllers; j++) {
|
||||
pci_write_config32(ctrl[j].f1, 0x44 + (i << 3), limit + (carry_over << 2));
|
||||
}
|
||||
dev = ctrl[i].f1;
|
||||
base = pci_read_config32(dev, 0x40 + (i << 3));
|
||||
basek = (base & 0xffff0000) >> 2;
|
||||
if(basek == hole_startk) {
|
||||
//don't need set memhole here, because hole off set will be 0, overflow
|
||||
//so need to change base reg instead, new basek will be 4*1024*1024
|
||||
base &= 0x0000ffff;
|
||||
base |= (4*1024*1024)<<2;
|
||||
for(j = 0; j < controllers; j++) {
|
||||
pci_write_config32(ctrl[j].f1, 0x40 + (i<<3), base);
|
||||
}
|
||||
}
|
||||
else {
|
||||
hoist = /* hole start address */
|
||||
((hole_startk << 10) & 0xff000000) +
|
||||
/* hole address to memory controller address */
|
||||
(((basek + carry_over) >> 6) & 0x0000ff00) +
|
||||
/* enable */
|
||||
1;
|
||||
pci_write_config32(dev, 0xf0, hoist);
|
||||
}
|
||||
|
||||
return carry_over;
|
||||
}
|
||||
|
||||
static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
|
||||
{
|
||||
|
||||
uint32_t hole_startk;
|
||||
int i;
|
||||
|
||||
hole_startk = 4*1024*1024 - K8_HW_MEM_HOLE_SIZEK;
|
||||
|
||||
#if K8_HW_MEM_HOLE_SIZE_AUTO_INC == 1
|
||||
//We need to double check if the hole_startk is valid, if it is equal to basek, we need to decrease it some
|
||||
uint32_t basek_pri;
|
||||
for(i=0; i<controllers; i++) {
|
||||
uint32_t base;
|
||||
unsigned base_k;
|
||||
base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
|
||||
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
|
||||
continue;
|
||||
}
|
||||
base_k = (base & 0xffff0000) >> 2;
|
||||
if(base_k == hole_startk) {
|
||||
hole_startk -= (base_k - basek_pri)>>1; // decrease mem hole startk to make sure it is on middle of privous node
|
||||
break; //only one hole
|
||||
}
|
||||
basek_pri = base_k;
|
||||
}
|
||||
|
||||
#endif
|
||||
//find node index that need do set hole
|
||||
for(i=0; i<controllers; i++) {
|
||||
uint32_t base, limit;
|
||||
unsigned base_k, limit_k;
|
||||
base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
|
||||
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
|
||||
continue;
|
||||
}
|
||||
limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
|
||||
base_k = (base & 0xffff0000) >> 2;
|
||||
limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
|
||||
if ((base_k <= hole_startk) && (limit_k > hole_startk)) {
|
||||
unsigned end_k;
|
||||
hoist_memory(controllers, ctrl, hole_startk, i);
|
||||
end_k = memory_end_k(ctrl, controllers);
|
||||
set_top_mem(end_k, hole_startk);
|
||||
break; //only one hole
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#define TIMEOUT_LOOPS 300000
|
||||
static void sdram_enable(int controllers, const struct mem_controller *ctrl)
|
||||
{
|
||||
|
@ -2260,6 +2356,7 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl)
|
|||
}
|
||||
|
||||
print_debug("Initializing memory: ");
|
||||
|
||||
int loops = 0;
|
||||
do {
|
||||
dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
|
||||
|
@ -2272,6 +2369,7 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl)
|
|||
print_debug(" failed\r\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!is_cpu_pre_c0()) {
|
||||
/* Wait until it is safe to touch memory */
|
||||
dcl &= ~(DCL_MemClrStatus | DCL_DramEnable);
|
||||
|
@ -2284,6 +2382,13 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl)
|
|||
print_debug(" done\r\n");
|
||||
}
|
||||
|
||||
#if K8_HW_MEM_HOLE_SIZEK != 0
|
||||
// init hw mem hole here
|
||||
/* DramHoleValid bit only can be set after MemClrStatus is set by Hardware */
|
||||
if(!is_cpu_pre_e0())
|
||||
set_hw_mem_hole(controllers, ctrl);
|
||||
#endif
|
||||
|
||||
//FIXME add enable node interleaving here -- yhlu
|
||||
/*needed?
|
||||
1. check how many nodes we have , if not all has ram installed get out
|
||||
|
@ -2309,3 +2414,55 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl)
|
|||
print_debug(" done\r\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
static int mem_inited(int controllers, const struct mem_controller *ctrl)
|
||||
{
|
||||
int i;
|
||||
|
||||
unsigned mask = 0;
|
||||
unsigned mask_inited = 0;
|
||||
|
||||
for(i = 0; i < controllers; i++) {
|
||||
uint32_t dcl;
|
||||
if (!controller_present(ctrl + i))
|
||||
continue;
|
||||
|
||||
mask |= (1<<i);
|
||||
dcl = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_LOW);
|
||||
|
||||
if (!is_cpu_pre_c0()) { // B3
|
||||
|
||||
if( (dcl & DCL_MemClrStatus) && (dcl & DCL_DramEnable) ) {
|
||||
mask_inited |= (1<<i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(mask == mask_inited) return 1;
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
#if USE_DCACHE_RAM == 1
|
||||
static void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a, const uint16_t *spd_addr)
|
||||
{
|
||||
int i;
|
||||
int j;
|
||||
struct mem_controller *ctrl;
|
||||
for(i=0;i<controllers; i++) {
|
||||
ctrl = &ctrl_a[i];
|
||||
ctrl->node_id = i;
|
||||
ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
|
||||
ctrl->f1 = PCI_DEV(0, 0x18+i, 1);
|
||||
ctrl->f2 = PCI_DEV(0, 0x18+i, 2);
|
||||
ctrl->f3 = PCI_DEV(0, 0x18+i, 3);
|
||||
|
||||
if(spd_addr == (void *)0) continue;
|
||||
|
||||
for(j=0;j<DIMM_SOCKETS;j++) {
|
||||
ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
|
||||
ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue