- Merge from linuxbios-lnxi (Linux Networx repository) up to public tree.

- Special version for HDAMA rev G with 33Mhz test and reboot out.
        - Support for CPU rev E, dual core, memory hoisting,
        - corrected an SST flashing problem. Kernel bug work around (NUMA)
        - added a Kernel bug work around for assigning CPU's to memory.

 r2@gog:  svnadmin | 2005-08-03 08:47:54 -0600
 Create local LNXI branch
 r1110@gog:  jschildt | 2005-08-09 10:35:51 -0600
 - Merge from Tom Zimmerman's additions to the hdama code for dual core
   and 33Mhz fix.
 
 
 r1111@gog:  jschildt | 2005-08-09 11:07:11 -0600
 Stable Release tag for HDAMA-1.1.8.10 and HDAMA-1.1.8.10LANL
 r1112@gog:  jschildt | 2005-08-09 15:09:32 -0600
 - temporarily removing hdama tag to update to public repository.  Will
   reset tag after update.
 
 


git-svn-id: svn://svn.coreboot.org/coreboot/trunk@2004 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
This commit is contained in:
Jason Schildt 2005-08-09 21:53:07 +00:00
parent dc2454eb94
commit 6e44b422b3
31 changed files with 903 additions and 1263 deletions

View File

@ -23,7 +23,7 @@ struct lb_memory *write_tables(void)
post_code(0x9a);
/* This table must be betweeen 0xf0000 & 0x100000 */
rom_table_end = write_pirq_routing_table(rom_table_end);
rom_table_end = copy_pirq_routing_table(rom_table_end);
rom_table_end = (rom_table_end + 1023) & ~1023;
/* copy the smp block to address 0 */

View File

@ -1,4 +1,5 @@
/* 2004.12 yhlu add dual core support */
/* 24 June 2005 Cleaned up dual core support Eric Biederman */
#include <console/console.h>
#include <cpu/cpu.h>
@ -14,59 +15,87 @@
static int first_time = 1;
static int disable_siblings = !CONFIG_LOGICAL_CPUS;
int is_e0_later_in_bsp(int nodeid)
void amd_sibling_init(device_t cpu, struct node_core_id id)
{
uint32_t val;
uint32_t val_old;
int e0_later;
if(nodeid==0) { // we don't need to do that for node 0 in core0/node0
return !is_cpu_pre_e0();
}
// d0 will be treated as e0 with this methods, but the d0 nb_cfg_54 always 0
device_t dev;
dev = dev_find_slot(0, PCI_DEVFN(0x18+nodeid,2));
if(!dev) return 0;
val_old = pci_read_config32(dev, 0x80);
val = val_old;
val |= (1<<3);
pci_write_config32(dev, 0x80, val);
val = pci_read_config32(dev, 0x80);
e0_later = !!(val & (1<<3));
if(e0_later) { // pre_e0 bit 3 always be 0 and can not be changed
pci_write_config32(dev, 0x80, val_old); // restore it
unsigned long i;
unsigned siblings, max_siblings;
/* On the bootstrap processor see if I want sibling cpus enabled */
if (first_time) {
first_time = 0;
get_option(&disable_siblings, "dual_core");
}
return e0_later;
siblings = cpuid_ecx(0x80000008) & 0xff;
printk_debug("%d Sibling Cores found\n", siblings);
/* For now assume all cpus have the same number of siblings */
max_siblings = siblings + 1;
/* Wishlist? make dual cores look like hyperthreading */
/* See if I am a sibling cpu */
if (disable_siblings && (id.coreid != 0)) {
cpu->enabled = 0;
}
if (id.coreid == 0) {
/* On the primary cpu find the siblings */
for (i = 1; i <= siblings; i++) {
struct device_path cpu_path;
device_t new;
/* Build the cpu device path */
cpu_path.type = DEVICE_PATH_APIC;
cpu_path.u.apic.apic_id =
(id.nodeid*max_siblings) + i;
new = alloc_dev(cpu->bus, &cpu_path);
if (!new) {
continue;
}
/* Report what I have done */
printk_debug("CPU: %s %s\n",
dev_path(new), new->enabled?"enabled":"disabled");
}
}
}
unsigned int read_nb_cfg_54(void)
struct node_core_id get_node_core_id(void)
{
msr_t msr;
msr = rdmsr(NB_CFG_MSR);
return ( ( msr.hi >> (54-32)) & 1);
}
struct node_core_id get_node_core_id(unsigned int nb_cfg_54) {
struct node_core_id id;
// get the apicid via cpuid(1) ebx[27:24]
if(nb_cfg_54) {
// when NB_CFG[54] is set, nodid = ebx[27:25], coreid = ebx[24]
id.coreid = (cpuid_ebx(1) >> 24) & 0xf;
id.nodeid = (id.coreid>>1);
id.coreid &= 1;
} else { // single core should be here too
unsigned siblings;
/* Get the apicid at reset */
id.nodeid = (cpuid_ebx(1) >> 24) & 0xff;
id.coreid = 0;
/* Find out how many siblings we have */
siblings = cpuid_ecx(0x80000008) & 0xff;
if (siblings) {
unsigned bits;
msr_t msr;
bits = 0;
while ((1 << bits) <= siblings)
bits++;
msr = rdmsr(NB_CFG_MSR);
if ((msr.hi >> (54-32)) & 1) {
// when NB_CFG[54] is set, nodeid = ebx[27:25], coreid = ebx[24]
id.coreid = id.nodeid & ((1 << bits) - 1);
id.nodeid >>= bits;
} else {
// when NB_CFG[54] is clear, nodeid = ebx[26:24], coreid = ebx[27]
id.nodeid = (cpuid_ebx(1) >> 24) & 0xf;
id.coreid = (id.nodeid>>3);
id.coreid = id.nodeid >> 3;
id.nodeid &= 7;
}
} else {
if (!is_cpu_pre_e0()) {
id.nodeid >>= 1;
}
}
return id;
}
#if 0
static int get_max_siblings(int nodes)
{
device_t dev;
@ -169,76 +198,5 @@ unsigned get_apicid_base(unsigned ioapic_num)
return apicid_base;
}
#if 0
void amd_sibling_init(device_t cpu)
{
unsigned i, siblings;
struct cpuid_result result;
unsigned nb_cfg_54;
struct node_core_id id;
/* On the bootstrap processor see if I want sibling cpus enabled */
if (first_time) {
first_time = 0;
get_option(&disable_siblings, "dual_core");
}
result = cpuid(0x80000008);
/* See how many sibling cpus we have */
/* Is dualcore supported */
siblings = (result.ecx & 0xff);
if ( siblings < 1) {
return;
}
#if 1
printk_debug("CPU: %u %d siblings\n",
cpu->path.u.apic.apic_id,
siblings);
#endif
nb_cfg_54 = read_nb_cfg_54();
#if 1
id = get_node_core_id(nb_cfg_54); // pre e0 nb_cfg_54 can not be set
/* See if I am a sibling cpu */
//if ((cpu->path.u.apic.apic_id>>(nb_cfg_54?0:3)) & siblings ) { // siblings = 1, 3, 7, 15,....
//if ( ( (cpu->path.u.apic.apic_id>>(nb_cfg_54?0:3)) % (siblings+1) ) != 0 ) {
if(id.coreid != 0) {
if (disable_siblings) {
cpu->enabled = 0;
}
return;
}
#endif
/* I am the primary cpu start up my siblings */
for(i = 1; i <= siblings; i++) {
struct device_path cpu_path;
device_t new;
/* Build the cpu device path */
cpu_path.type = DEVICE_PATH_APIC;
cpu_path.u.apic.apic_id = cpu->path.u.apic.apic_id + i * (nb_cfg_54?1:8);
/* See if I can find the cpu */
new = find_dev_path(cpu->bus, &cpu_path);
/* Allocate the new cpu device structure */
if(!new) {
new = alloc_dev(cpu->bus, &cpu_path);
new->enabled = 1;
new->initialized = 0;
}
#if 1
printk_debug("CPU: %u has sibling %u\n",
cpu->path.u.apic.apic_id,
new->path.u.apic.apic_id);
#endif
/* Start the new cpu */
if(new->enabled && !new->initialized)
start_cpu(new);
}
}
#endif

View File

@ -1,12 +1,15 @@
/* 2004.12 yhlu add dual core support */
#ifndef SET_NB_CFG_54
#define SET_NB_CFG_54 1
#endif
#include "cpu/amd/dualcore/dualcore_id.c"
#if 0
static inline unsigned get_core_num_in_bsp(unsigned nodeid)
{
return ((pci_read_config32(PCI_DEV(0, 0x18+nodeid, 3), 0xe8)>>12) & 3);
@ -97,3 +100,56 @@ static inline void start_other_cores(void) {
}
}
#endif
static void k8_init_and_stop_secondaries(void)
{
struct node_core_id id;
device_t dev;
unsigned apicid;
unsigned max_siblings;
int init_detected;
msr_t msr;
/* Skip this if there was a built in self test failure */
init_detected = early_mtrr_init_detected();
amd_early_mtrr_init();
enable_lapic();
init_timer();
if (init_detected) {
asm volatile ("jmp __cpu_reset");
}
if (is_cpu_pre_e0()) {
id.nodeid = lapicid() & 0x7;
id.coreid = 0;
} else {
/* Which cpu are we on? */
id = get_node_core_id_x();
/* Set NB_CFG_MSR
* Linux expect the core to be in the least signficant bits.
*/
msr = rdmsr(NB_CFG_MSR);
msr.hi |= (1<<(54-32)); // InitApicIdCpuIdLo
wrmsr(NB_CFG_MSR, msr);
}
/* For now assume all cpus have the same number of siblings */
max_siblings = (cpuid_ecx(0x80000008) & 0xff) + 1;
/* Set the lapicid */
lapic_write(LAPIC_ID,((id.nodeid*max_siblings) + id.coreid) << 24);
/* Remember the cpuid */
if (id.coreid == 0) {
dev = PCI_DEV(0, 0x18 + id.nodeid, 2);
pci_write_config32(dev, 0x9c, cpuid_eax(1));
}
/* Maybe call distinguish_cpu_resets only on the last core? */
distinguish_cpu_resets(id.nodeid);
if (!boot_cpu()) {
stop_this_cpu();
}
}

View File

@ -11,8 +11,8 @@ static inline unsigned int read_nb_cfg_54(void)
}
struct node_core_id {
unsigned nodeid;
unsigned coreid;
unsigned nodeid:8;
unsigned coreid:8;
};
static inline struct node_core_id get_node_core_id(unsigned nb_cfg_54) {

View File

@ -21,10 +21,7 @@
#include <cpu/x86/cache.h>
#include <cpu/x86/mtrr.h>
#include <cpu/x86/mem.h>
#if CONFIG_LOGICAL_CPUS==1
#include <cpu/amd/dualcore.h>
#endif
#include "model_fxx_msr.h"
@ -152,9 +149,6 @@ static void set_init_ecc_mtrrs(void)
static void init_ecc_memory(unsigned node_id)
{
unsigned long startk, begink, endk;
#if K8_E0_MEM_HOLE_SIZEK != 0
unsigned long hole_startk = 0, hole_endk = 0;
#endif
unsigned long basek;
struct mtrr_state mtrr_state;
device_t f1_dev, f2_dev, f3_dev;
@ -199,25 +193,13 @@ static void init_ecc_memory(unsigned node_id)
startk = (pci_read_config32(f1_dev, 0x40 + (node_id*8)) & 0xffff0000) >> 2;
endk = ((pci_read_config32(f1_dev, 0x44 + (node_id*8)) & 0xffff0000) >> 2) + 0x4000;
#if K8_E0_MEM_HOLE_SIZEK != 0
if (!is_cpu_pre_e0()) {
uint32_t val;
val = pci_read_config32(f1_dev, 0xf0);
if((val & 1)==1) {
hole_startk = ((val & (0xff<<24)) >> 10);
hole_endk = ((val & (0xff<<8))<<(16-10)) - startk;
hole_endk += hole_startk;
}
}
#endif
/* Don't start too early */
begink = startk;
if (begink < CONFIG_LB_MEM_TOPK) {
begink = CONFIG_LB_MEM_TOPK;
}
printk_debug("Clearing memory %uK - %uK: ", startk, endk);
printk_debug("Clearing memory %uK - %uK: ", begink, endk);
/* Save the normal state */
save_mtrr_state(&mtrr_state);
@ -234,9 +216,6 @@ static void init_ecc_memory(unsigned node_id)
unsigned long size;
void *addr;
#if K8_E0_MEM_HOLE_SIZEK != 0
if ((basek >= hole_startk) && (basek < hole_endk)) continue;
#endif
/* Report every 64M */
if ((basek % (64*1024)) == 0) {
/* Restore the normal state */
@ -340,6 +319,7 @@ static inline void k8_errata(void)
/* Erratum 91 prefetch miss is handled in the kernel */
/* Erratum 106 ... */
msr = rdmsr_amd(LS_CFG_MSR);
msr.lo |= 1 << 25;
@ -350,7 +330,7 @@ static inline void k8_errata(void)
msr.hi |= 1 << (43 - 32);
wrmsr_amd(BU_CFG_MSR, msr);
if(is_cpu_d0()) {
if (is_cpu_pre_e0() && !is_cpu_pre_d0()) {
/* Erratum 110 ...*/
msr = rdmsr_amd(CPU_ID_HYPER_EXT_FEATURES);
msr.hi |=1;
@ -362,26 +342,34 @@ static inline void k8_errata(void)
msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR);
msr.hi |=1;
wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr);
/* Erratum 113 ... */
msr = rdmsr_amd(BU_CFG_MSR);
msr.hi |= (1 << 16);
wrmsr_amd(BU_CFG_MSR, msr);
}
/* Erratum 122 */
if (!is_cpu_pre_c0()) {
msr = rdmsr(HWCR_MSR);
msr.lo |= 1 << 6;
wrmsr(HWCR_MSR, msr);
}
/* Erratum 123? dual core deadlock? */
/* Erratum 131 */
msr = rdmsr(NB_CFG_MSR);
msr.lo |= 1 << 20;
wrmsr(NB_CFG_MSR, msr);
}
void model_fxx_init(device_t dev)
void model_fxx_init(device_t cpu)
{
unsigned long i;
msr_t msr;
#if CONFIG_LOGICAL_CPUS
struct node_core_id id;
unsigned siblings;
id.coreid=0;
#else
unsigned nodeid;
#endif
/* Turn on caching if we haven't already */
x86_enable_cache();
@ -404,43 +392,18 @@ void model_fxx_init(device_t dev)
/* Enable the local cpu apics */
setup_lapic();
#if CONFIG_LOGICAL_CPUS == 1
siblings = cpuid_ecx(0x80000008) & 0xff;
/* Find our node and core */
id = get_node_core_id();
id = get_node_core_id(read_nb_cfg_54()); // pre e0 nb_cfg_54 can not be set
if(siblings>0) {
msr = rdmsr_amd(CPU_ID_FEATURES_MSR);
msr.lo |= 1 << 28;
wrmsr_amd(CPU_ID_FEATURES_MSR, msr);
msr = rdmsr_amd(LOGICAL_CPUS_NUM_MSR);
msr.lo = (siblings+1)<<16;
wrmsr_amd(LOGICAL_CPUS_NUM_MSR, msr);
msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR);
msr.hi |= 1<<(33-32);
wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr);
/* Is this a bad location? In particular can another node prefetch
* data from this node before we have initialized it?
*/
if (id.coreid == 0) {
init_ecc_memory(id.nodeid); // only do it for core 0
}
/* Is this a bad location? In particular can another node prefecth
* data from this node before we have initialized it?
*/
if (id.coreid == 0) init_ecc_memory(id.nodeid); // only do it for core 0
#else
/* Is this a bad location? In particular can another node prefecth
* data from this node before we have initialized it?
*/
nodeid = lapicid() & 0xf;
init_ecc_memory(nodeid);
#endif
#if CONFIG_LOGICAL_CPUS==1
/* Start up my cpu siblings */
// if(id.coreid==0) amd_sibling_init(dev); // Don't need core1 is already be put in the CPU BUS in bus_cpu_scan
#endif
/* Deal with sibling cpus */
amd_sibling_init(cpu, id);
}
static struct device_operations cpu_dev_ops = {
@ -451,7 +414,7 @@ static struct cpu_device_id cpu_table[] = {
{ X86_VENDOR_AMD, 0xf51 }, /* SH7-B3 */
{ X86_VENDOR_AMD, 0xf58 }, /* SH7-C0 */
{ X86_VENDOR_AMD, 0xf48 },
#if 1
{ X86_VENDOR_AMD, 0xf5A }, /* SH7-CG */
{ X86_VENDOR_AMD, 0xf4A },
{ X86_VENDOR_AMD, 0xf7A },
@ -483,7 +446,6 @@ static struct cpu_device_id cpu_table[] = {
{ X86_VENDOR_AMD, 0x20fc2 },
{ X86_VENDOR_AMD, 0x20f12 }, /* JH-E6 */
{ X86_VENDOR_AMD, 0x20f32 },
#endif
{ 0, 0 },
};

View File

@ -117,4 +117,17 @@ static void early_mtrr_init(void)
enable_cache();
}
static int early_mtrr_init_detected(void)
{
msr_t msr;
/* See if MTRR's are enabled.
* a #RESET disables them while an #INIT
* preserves their state. This works
* on both Intel and AMD cpus, at least
* according to the documentation.
*/
msr = rdmsr(MTRRdefType_MSR);
return msr.lo & 0x00000800;
}
#endif /* EARLYMTRR_C */

View File

@ -2,18 +2,13 @@
#define CPU_AMD_DUALCORE_H
struct device;
void amd_sibling_init(struct device *cpu);
int is_e0_later_in_bsp(int nodeid);
unsigned int read_nb_cfg_54(void);
struct node_core_id {
unsigned nodeid;
unsigned coreid;
};
// it can be used to get unitid and coreid it running only
struct node_core_id get_node_core_id(unsigned int nb_cfg_54);
unsigned get_apicid_base(unsigned ioapic_num);
void amd_sibling_init(struct device *cpu, struct node_core_id id);
struct node_core_id get_node_core_id(void);
#endif /* CPU_AMD_DUALCORE_H */

View File

@ -129,6 +129,11 @@ config chip.h
# config for arima/hdama
chip northbridge/amd/amdk8/root_complex
device apic_cluster 0 on
chip cpu/amd/socket_940
device apic 0 on end
end
end
device pci_domain 0 on
chip northbridge/amd/amdk8
device pci 18.0 on # northbridge
@ -317,13 +322,5 @@ chip northbridge/amd/amdk8/root_complex
device pci 19.3 on end
end
end
device apic_cluster 0 on
chip cpu/amd/socket_940
device apic 0 on end
end
chip cpu/amd/socket_940
device apic 1 on end
end
end
end

View File

@ -49,6 +49,7 @@ uses HOSTCC
uses OBJCOPY
uses CONFIG_CONSOLE_VGA
uses CONFIG_PCI_ROM_RUN
uses CONFIG_LOGICAL_CPUS
uses CONFIG_USE_INIT
@ -56,6 +57,11 @@ uses CONFIG_USE_INIT
### Build options
###
##
## CONFIG_LOGICAL_CPUS enables dual core support
##
default CONFIG_LOGICAL_CPUS=1
##
## ROM_SIZE is the size of boot ROM that this board will use.
##
@ -105,7 +111,7 @@ default LB_CKS_LOC=123
## Only worry about 2 micro processors
##
default CONFIG_SMP=1
default CONFIG_MAX_CPUS=2
default CONFIG_MAX_CPUS=4
default CONFIG_MAX_PHYSICAL_CPUS=2
##

View File

@ -22,6 +22,7 @@
#include "superio/NSC/pc87360/pc87360_early_serial.c"
#include "cpu/amd/mtrr/amd_earlymtrr.c"
#include "cpu/x86/bist.h"
#include "cpu/amd/dualcore/dualcore.c"
#define SERIAL_DEV PNP_DEV(0x2e, PC87360_SP1)
@ -135,7 +136,7 @@ static unsigned int generate_row(uint8_t node, uint8_t row, uint8_t maxnodes)
};
if (maxnodes > 2) {
print_debug("this mainboard is only designed for 2 cpus\r\n");
print_spew("this mainboard is only designed for 2 cpus\r\n");
maxnodes = 2;
}
@ -165,6 +166,8 @@ static inline int spd_read_byte(unsigned device, unsigned address)
#define FIRST_CPU 1
#define SECOND_CPU 1
#define TOTAL_CPUS (FIRST_CPU + SECOND_CPU)
static void main(unsigned long bist)
{
static const struct mem_controller cpu[] = {
@ -194,21 +197,7 @@ static void main(unsigned long bist)
int needs_reset;
if (bist == 0) {
unsigned nodeid;
/* Skip this if there was a built in self test failure */
amd_early_mtrr_init();
enable_lapic();
init_timer();
nodeid = lapicid() & 0xf;
/* Has this cpu already booted? */
if (cpu_init_detected(nodeid)) {
asm volatile ("jmp __cpu_reset");
}
distinguish_cpu_resets(nodeid);
if (!boot_cpu()) {
stop_this_cpu();
}
k8_init_and_stop_secondaries();
}
/* Setup the console */
pc87360_enable_serial(SERIAL_DEV, TTYS0_BASE);
@ -241,6 +230,7 @@ static void main(unsigned long bist)
#endif
#if 0
dump_pci_device(PCI_DEV(0, 0x18, 2));
dump_pci_device(PCI_DEV(0, 0x18, 3));
#endif
#if 0

View File

@ -32,6 +32,7 @@ entries
395 1 e 1 hw_scrubber
396 1 e 1 interleave_chip_selects
397 2 e 8 max_mem_clock
399 1 e 2 dual_core
400 1 e 1 power_on_after_fail
412 4 e 6 debug_level
416 4 e 7 boot_first

View File

@ -9,23 +9,20 @@
#include "southbridge/amd/amd8111/amd8111_enable_rom.c"
#include "northbridge/amd/amdk8/early_ht.c"
#include "cpu/x86/lapic/boot_cpu.c"
#include "cpu/x86/mtrr/earlymtrr.c"
#include "northbridge/amd/amdk8/reset_test.c"
static unsigned long main(unsigned long bist)
{
unsigned nodeid;
/* Make cerain my local apic is useable */
enable_lapic();
nodeid = lapicid() & 0xf;
/* Is this a cpu only reset? */
if (cpu_init_detected(nodeid)) {
if (early_mtrr_init_detected()) {
if (last_boot_normal()) {
goto normal_image;
} else {
goto cpu_reset;
goto fallback_image;
}
}
/* Is this a secondary cpu? */
@ -62,12 +59,6 @@ static unsigned long main(unsigned long bist)
: "a" (bist) /* inputs */
: /* clobbers */
);
cpu_reset:
asm volatile ("jmp __cpu_reset"
: /* outputs */
: "a"(bist) /* inputs */
: /* clobbers */
);
fallback_image:
return bist;
}

View File

@ -42,7 +42,3 @@ const struct irq_routing_table intel_irq_routing_table = {
IRQ_SLOT(0, 1,4,3, 0,0,0,0 ),
}
};
unsigned long write_pirq_routing_table(unsigned long addr)
{
return copy_pirq_routing_table(addr);
}

View File

@ -3,6 +3,60 @@
#include <device/pci.h>
#include <string.h>
#include <stdint.h>
#include <cpu/x86/lapic.h>
#include <arch/cpu.h>
#include <arch/io.h>
#define HT_INIT_CONTROL 0x6c
#define HTIC_BIOSR_Detect (1<<5)
/* If we assume a symmetric processor configuration we can
* get all of the information we need to write the processor
* entry from the bootstrap processor.
* Plus I don't think linux really even cares.
* Having the proper apicid's in the table so the non-bootstrap
* processors can be woken up should be enough.
*/
void smp_write_processors_inorder(struct mp_config_table *mc)
{
int boot_apic_id;
int order_id;
unsigned apic_version;
unsigned cpu_features;
unsigned cpu_feature_flags;
struct cpuid_result result;
device_t cpu;
boot_apic_id = lapicid();
apic_version = lapic_read(LAPIC_LVR) & 0xff;
result = cpuid(1);
cpu_features = result.eax;
cpu_feature_flags = result.edx;
/* order the output of the cpus to fix a bug in kernel 6 11 */
for(order_id = 0;order_id <256; order_id++) {
for(cpu = all_devices; cpu; cpu = cpu->next) {
unsigned long cpu_flag;
if ((cpu->path.type != DEVICE_PATH_APIC) ||
(cpu->bus->dev->path.type != DEVICE_PATH_APIC_CLUSTER))
{
continue;
}
if (!cpu->enabled) {
continue;
}
cpu_flag = MPC_CPU_ENABLED;
if (boot_apic_id == cpu->path.u.apic.apic_id) {
cpu_flag = MPC_CPU_ENABLED | MPC_CPU_BOOTPROCESSOR;
}
if(cpu->path.u.apic.apic_id == order_id) {
smp_write_processor(mc,
cpu->path.u.apic.apic_id, apic_version,
cpu_flag, cpu_features, cpu_feature_flags);
break;
}
}
}
}
static unsigned node_link_to_bus(unsigned node, unsigned link)
{
@ -38,6 +92,21 @@ static unsigned node_link_to_bus(unsigned node, unsigned link)
return 0;
}
unsigned max_apicid(void)
{
unsigned max_apicid;
device_t dev;
max_apicid = 0;
for(dev = all_devices; dev; dev = dev->next) {
if (dev->path.type != DEVICE_PATH_APIC)
continue;
if (dev->path.u.apic.apic_id > max_apicid) {
max_apicid = dev->path.u.apic.apic_id;
}
}
return max_apicid;
}
void *smp_write_config_table(void *v)
{
static const char sig[4] = "PCMP";
@ -50,6 +119,10 @@ void *smp_write_config_table(void *v)
unsigned char bus_8131_1;
unsigned char bus_8131_2;
unsigned char bus_8111_1;
unsigned apicid_base;
unsigned apicid_8111;
unsigned apicid_8131_1;
unsigned apicid_8131_2;
mc = (void *)(((char *)v) + SMP_FLOATING_TABLE_LEN);
memset(mc, 0, sizeof(*mc));
@ -68,8 +141,12 @@ void *smp_write_config_table(void *v)
mc->mpe_checksum = 0;
mc->reserved = 0;
smp_write_processors(mc);
smp_write_processors_inorder(mc);
apicid_base = max_apicid() + 1;
apicid_8111 = apicid_base;
apicid_8131_1 = apicid_base + 1;
apicid_8131_2 = apicid_base + 2;
{
device_t dev;
@ -124,7 +201,7 @@ void *smp_write_config_table(void *v)
smp_write_bus(mc, bus_isa, "ISA ");
/* IOAPIC handling */
smp_write_ioapic(mc, 2, 0x11, 0xfec00000);
smp_write_ioapic(mc, apicid_8111, 0x11, 0xfec00000);
{
device_t dev;
struct resource *res;
@ -133,7 +210,7 @@ void *smp_write_config_table(void *v)
if (dev) {
res = find_resource(dev, PCI_BASE_ADDRESS_0);
if (res) {
smp_write_ioapic(mc, 0x03, 0x11, res->base);
smp_write_ioapic(mc, apicid_8131_1, 0x11, res->base);
}
}
/* 8131 apic 4 */
@ -141,44 +218,44 @@ void *smp_write_config_table(void *v)
if (dev) {
res = find_resource(dev, PCI_BASE_ADDRESS_0);
if (res) {
smp_write_ioapic(mc, 0x04, 0x11, res->base);
smp_write_ioapic(mc, apicid_8131_2, 0x11, res->base);
}
}
}
/* ISA backward compatibility interrupts */
smp_write_intsrc(mc, mp_ExtINT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT,
bus_isa, 0x00, 0x02, 0x00);
bus_isa, 0x00, apicid_8111, 0x00);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT,
bus_isa, 0x01, 0x02, 0x01);
bus_isa, 0x01, apicid_8111, 0x01);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT,
bus_isa, 0x00, 0x02, 0x02);
bus_isa, 0x00, apicid_8111, 0x02);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT,
bus_isa, 0x03, 0x02, 0x03);
bus_isa, 0x03, apicid_8111, 0x03);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT,
bus_isa, 0x04, 0x02, 0x04);
bus_isa, 0x04, apicid_8111, 0x04);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT,
bus_isa, 0x05, 0x02, 0x05);
bus_isa, 0x05, apicid_8111, 0x05);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT,
bus_isa, 0x06, 0x02, 0x06);
bus_isa, 0x06, apicid_8111, 0x06);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT,
bus_isa, 0x07, 0x02, 0x07);
bus_isa, 0x07, apicid_8111, 0x07);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT,
bus_isa, 0x08, 0x02, 0x08);
bus_isa, 0x08, apicid_8111, 0x08);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT,
bus_isa, 0x09, 0x02, 0x09);
bus_isa, 0x09, apicid_8111, 0x09);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT,
bus_isa, 0x0a, 0x02, 0x0a);
bus_isa, 0x0a, apicid_8111, 0x0a);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT,
bus_isa, 0x0b, 0x02, 0x0b);
bus_isa, 0x0b, apicid_8111, 0x0b);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT,
bus_isa, 0x0c, 0x02, 0x0c);
bus_isa, 0x0c, apicid_8111, 0x0c);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT,
bus_isa, 0x0d, 0x02, 0x0d);
bus_isa, 0x0d, apicid_8111, 0x0d);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT,
bus_isa, 0x0e, 0x02, 0x0e);
bus_isa, 0x0e, apicid_8111, 0x0e);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT,
bus_isa, 0x0f, 0x02, 0x0f);
bus_isa, 0x0f, apicid_8111, 0x0f);
/* Standard local interrupt assignments */
smp_write_lintsrc(mc, mp_ExtINT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT,
@ -188,46 +265,48 @@ void *smp_write_config_table(void *v)
/* PCI Ints: Type Trigger Polarity Bus ID PCIDEVNUM|IRQ APIC ID PIN# */
/* On board nics */
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x03<<2)|0, 0x02, 0x13);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x04<<2)|0, 0x02, 0x13);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x03<<2)|0, apicid_8111, 0x13);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x04<<2)|0, apicid_8111, 0x13);
/* On board SATA */
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x05<<2)|0, apicid_8111, 0x11);
/* PCI Slot 1 */
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_2, (0x01<<2)|0, 0x02, 0x11);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_2, (0x01<<2)|1, 0x02, 0x12);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_2, (0x01<<2)|2, 0x02, 0x13);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_2, (0x01<<2)|3, 0x02, 0x10);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_2, (0x01<<2)|0, apicid_8111, 0x11);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_2, (0x01<<2)|1, apicid_8111, 0x12);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_2, (0x01<<2)|2, apicid_8111, 0x13);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_2, (0x01<<2)|3, apicid_8111, 0x10);
/* PCI Slot 2 */
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_2, (0x02<<2)|0, 0x02, 0x12);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_2, (0x02<<2)|1, 0x02, 0x13);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_2, (0x02<<2)|2, 0x02, 0x10);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_2, (0x02<<2)|3, 0x02, 0x11);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_2, (0x02<<2)|0, apicid_8111, 0x12);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_2, (0x02<<2)|1, apicid_8111, 0x13);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_2, (0x02<<2)|2, apicid_8111, 0x10);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_2, (0x02<<2)|3, apicid_8111, 0x11);
/* PCI Slot 3 */
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x01<<2)|0, 0x02, 0x11);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x01<<2)|1, 0x02, 0x12);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x01<<2)|2, 0x02, 0x13);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x01<<2)|3, 0x02, 0x10);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x01<<2)|0, apicid_8111, 0x11);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x01<<2)|1, apicid_8111, 0x12);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x01<<2)|2, apicid_8111, 0x13);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x01<<2)|3, apicid_8111, 0x10);
/* PCI Slot 4 */
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x02<<2)|0, 0x02, 0x12);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x02<<2)|1, 0x02, 0x13);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x02<<2)|2, 0x02, 0x10);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x02<<2)|3, 0x02, 0x11);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x02<<2)|0, apicid_8111, 0x12);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x02<<2)|1, apicid_8111, 0x13);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x02<<2)|2, apicid_8111, 0x10);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8131_1, (0x02<<2)|3, apicid_8111, 0x11);
/* PCI Slot 5 */
#warning "FIXME get the irqs right, it's just hacked to work for now"
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8111_1, (0x05<<2)|0, 0x02, 0x11);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8111_1, (0x05<<2)|1, 0x02, 0x12);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8111_1, (0x05<<2)|2, 0x02, 0x13);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8111_1, (0x05<<2)|3, 0x02, 0x10);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8111_1, (0x05<<2)|0, apicid_8111, 0x11);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8111_1, (0x05<<2)|1, apicid_8111, 0x12);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8111_1, (0x05<<2)|2, apicid_8111, 0x13);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8111_1, (0x05<<2)|3, apicid_8111, 0x10);
/* PCI Slot 6 */
#warning "FIXME get the irqs right, it's just hacked to work for now"
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8111_1, (0x04<<2)|0, 0x02, 0x10);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8111_1, (0x04<<2)|1, 0x02, 0x11);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8111_1, (0x04<<2)|2, 0x02, 0x12);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8111_1, (0x04<<2)|3, 0x02, 0x13);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8111_1, (0x04<<2)|0, apicid_8111, 0x10);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8111_1, (0x04<<2)|1, apicid_8111, 0x11);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8111_1, (0x04<<2)|2, apicid_8111, 0x12);
smp_write_intsrc(mc, mp_INT, MP_IRQ_TRIGGER_DEFAULT|MP_IRQ_POLARITY_DEFAULT, bus_8111_1, (0x04<<2)|3, apicid_8111, 0x13);
/* There is no extension information... */
@ -242,6 +321,91 @@ void *smp_write_config_table(void *v)
unsigned long write_smp_table(unsigned long addr)
{
void *v;
#if 0 /* The whole patch on the 33 Mhz problem */
#if 1
#define debug1 0
/* Hack patch work around for hot swap enable 33mhz problem */
{
device_t dev;
uint32_t data;
unsigned long htic;
int reset;
int i;
reset = 0;
printk_debug("Looking for bad PCIX MHz input\n");
dev = dev_find_slot(1, PCI_DEVFN(0x02,0));
data = pci_read_config32(dev, 0xa0);
if(!(((data>>16)&0x0ff)==0xc3)) {
reset=1;
printk_debug("Bad PCIX MHz - Reset\n");
}
printk_debug("Looking for bad Hot Swap Enable\n");
dev = dev_find_slot(1, PCI_DEVFN(0x01,0));
data = pci_read_config32(dev, 0x48);
#if debug1
if(!(data & 0x0c)) {
reset=1;
printk_debug("Good Hot Swap start - Reset\n");
}
#else
if(data & 0x0c) {
reset=1;
printk_debug("Bad Hot Swap start - Reset\n");
}
#endif
if(reset) {
#if 0
/* dump pci registers */
printk_debug("PCI Registers for 1:1.0\n");
for(i = 0; i <= 255; i++) {
unsigned char val;
if ((i & 0x0f) == 0) {
printk_debug("%2.2X:",i);
}
val = pci_read_config8(dev, i);
printk_debug(" %2.2X",val);
if ((i & 0x0f) == 0x0f) {
printk_debug("\n");
}
}
dev = dev_find_slot(1, PCI_DEVFN(0x02,0));
printk_debug("PCI Registers for 1:2.0\n");
for(i = 0; i <= 255; i++) {
unsigned char val;
if ((i & 0x0f) == 0) {
printk_debug("%2.2X:",i);
}
val = pci_read_config8(dev, i);
printk_debug(" %2.2X",val);
if ((i & 0x0f) == 0x0f) {
printk_debug("\n");
}
}
#endif
/* enable cf9 */
dev = dev_find_slot(node_link_to_bus(0, 0), PCI_DEVFN(0x04,3));
pci_write_config8(dev, 0x41, 0xf1);
/* reset */
dev = dev_find_slot(0, PCI_DEVFN(0x18,0));
htic = pci_read_config32(dev, HT_INIT_CONTROL);
htic &= ~HTIC_BIOSR_Detect;
pci_write_config32(dev, HT_INIT_CONTROL, htic);
outb(0x0e, 0x0cf9);
}
else {
#if debug1
printk_debug("Hot Swap is on\n");
#else
printk_debug("OK 133MHz & Hot Swap is off\n");
#endif
}
}
#endif
#endif /* end of the patch on the whole 33 Mhz problem */
v = smp_write_floating_table(addr);
return (unsigned long)smp_write_config_table(v);
}

View File

@ -136,6 +136,7 @@
#define DCL_DisInRcvrs (1<<24)
#define DCL_BypMax_SHIFT 25
#define DCL_En2T (1<<28)
#define DCL_UpperCSMap (1<<29)
#define DRAM_CONFIG_HIGH 0x94
#define DCH_ASYNC_LAT_SHIFT 0
#define DCH_ASYNC_LAT_MASK 0xf

View File

@ -155,23 +155,6 @@ static void disable_probes(void)
}
#ifndef ENABLE_APIC_EXT_ID
#define ENABLE_APIC_EXT_ID 0
#endif
static void enable_apic_ext_id(u8 node)
{
#if ENABLE_APIC_EXT_ID==1
#warning "FIXME Is the right place to enable apic ext id here?"
u32 val;
val = pci_read_config32(NODE_HT(node), 0x68);
val |= (HTTC_APIC_EXT_SPUR | HTTC_APIC_EXT_ID | HTTC_APIC_EXT_BRD_CST);
pci_write_config32(NODE_HT(node), 0x68, val);
#endif
}
static void enable_routing(u8 node)
{
u32 val;
@ -292,27 +275,27 @@ static int verify_connection(u8 dest)
return 1;
}
static uint16_t read_freq_cap(device_t dev, uint8_t pos)
static unsigned read_freq_cap(device_t dev, unsigned pos)
{
/* Handle bugs in valid hypertransport frequency reporting */
uint16_t freq_cap;
unsigned freq_cap;
uint32_t id;
freq_cap = pci_read_config16(dev, pos);
freq_cap &= ~(1 << HT_FREQ_VENDOR); /* Ignore Vendor HT frequencies */
#if K8_HT_FREQ_1G_SUPPORT == 1
if (!is_cpu_pre_e0()) {
return freq_cap;
}
#endif
id = pci_read_config32(dev, 0);
/* AMD K8 Unsupported 1Ghz? */
if (id == (PCI_VENDOR_ID_AMD | (0x1100 << 16))) {
if (is_cpu_pre_e0()) {
freq_cap &= ~(1 << HT_FREQ_1000Mhz);
}
}
return freq_cap;
}
@ -339,8 +322,10 @@ static int optimize_connection(device_t node1, uint8_t link1, device_t node2, ui
/* See if I am changing the link freqency */
old_freq = pci_read_config8(node1, link1 + PCI_HT_CAP_HOST_FREQ);
old_freq &= 0x0f;
needs_reset |= old_freq != freq;
old_freq = pci_read_config8(node2, link2 + PCI_HT_CAP_HOST_FREQ);
old_freq &= 0x0f;
needs_reset |= old_freq != freq;
/* Set the Calulcated link frequency */
@ -382,7 +367,6 @@ static int optimize_connection(device_t node1, uint8_t link1, device_t node2, ui
/* Set node2's widths */
pci_write_config8(node2, link2 + PCI_HT_CAP_HOST_WIDTH + 1, width);
return needs_reset;
}
@ -1625,9 +1609,9 @@ static void clear_dead_routes(unsigned nodes)
}
#endif /* CONFIG_MAX_PHYSICAL_CPUS > 1 */
#if CONFIG_LOGICAL_CPUS==1
static unsigned verify_dualcore(unsigned nodes)
static unsigned count_cpus(unsigned nodes)
{
#if CONFIG_LOGICAL_CPUS==1
unsigned node, totalcpus, tmp;
totalcpus = 0;
@ -1637,24 +1621,20 @@ static unsigned verify_dualcore(unsigned nodes)
}
return totalcpus;
#else
return nodes;
#endif
}
#endif
static void coherent_ht_finalize(unsigned nodes)
{
unsigned total_cpus;
unsigned cpu_node_count;
unsigned node;
int rev_a0;
#if CONFIG_LOGICAL_CPUS==1
unsigned total_cpus;
if(read_option(CMOS_VSTART_dual_core, CMOS_VLEN_dual_core, 0) == 0) { /* dual_core */
total_cpus = verify_dualcore(nodes);
}
else {
total_cpus = nodes;
}
#endif
total_cpus = count_cpus(nodes);
cpu_node_count = ((total_cpus -1)<<16)|((nodes - 1) << 4);
/* set up cpu count and node count and enable Limit
* Config Space Range for all available CPUs.
@ -1672,11 +1652,7 @@ static void coherent_ht_finalize(unsigned nodes)
/* Set the Total CPU and Node count in the system */
val = pci_read_config32(dev, 0x60);
val &= (~0x000F0070);
#if CONFIG_LOGICAL_CPUS==1
val |= ((total_cpus-1)<<16)|((nodes-1)<<4);
#else
val |= ((nodes-1)<<16)|((nodes-1)<<4);
#endif
val |= cpu_node_count;
pci_write_config32(dev, 0x60, val);
/* Only respond to real cpu pci configuration cycles
@ -1786,6 +1762,33 @@ static int optimize_link_read_pointers(unsigned nodes, int needs_reset)
return needs_reset;
}
static void startup_other_cores(unsigned nodes)
{
unsigned node;
for(node = 0; node < nodes; node++) {
device_t dev;
unsigned siblings;
dev = NODE_MC(node);
siblings = (pci_read_config32(dev, 0xe8) >> 12) & 0x3;
if (siblings) {
device_t dev_f0;
unsigned val;
/* Redirect all MC4 accesses and error logging to core0 */
val = pci_read_config32(dev, 0x44);
val |= (1 << 27); //NbMcaToMstCpuEn bit
pci_write_config32(dev, 0x44, val);
dev_f0 = NODE_HT(node);
/* Enable extended apic id's and second core */
val = pci_read_config32(dev_f0, 0x68);
val |= (1 << 18) | (1 << 17) | ( 1 << 5);
pci_write_config32(dev_f0, 0x68, val);
}
}
}
static int setup_coherent_ht_domain(void)
{
struct setup_smp_result result;
@ -1800,14 +1803,14 @@ static int setup_coherent_ht_domain(void)
#if CONFIG_MAX_PHYSICAL_CPUS > 1
result = setup_smp();
#endif
result.nodes = verify_mp_capabilities(result.nodes);
clear_dead_routes(result.nodes);
#endif
if (result.nodes == 1) {
setup_uniprocessor();
}
coherent_ht_finalize(result.nodes);
startup_other_cores(result.nodes);
result.needs_reset = apply_cpu_errata_fixes(result.nodes, result.needs_reset);
result.needs_reset = optimize_link_read_pointers(result.nodes, result.needs_reset);
return result.needs_reset;

View File

@ -3,44 +3,23 @@ static int is_cpu_rev_a0(void)
{
return (cpuid_eax(1) & 0xfffef) == 0x0f00;
}
//AMD_D0_SUPPORT
static int is_cpu_pre_d0(void)
{
return (cpuid_eax(1) & 0xfff0f) < 0x10f00;
}
static int is_cpu_d0(void)
{
return (cpuid_eax(1) & 0xfff0f) == 0x10f00;
}
//AMD_E0_SUPPORT
static int is_cpu_pre_e0(void)
{
return (cpuid_eax(1) & 0xfff0f) < 0x20f00;
}
static int is_cpu_e0(void)
{
return (cpuid_eax(1) & 0xfff00) == 0x20f00;
}
static int is_cpu_pre_c0(void)
{
return (cpuid_eax(1) & 0xfffef) < 0x0f48;
}
static int is_cpu_c0(void)
{
return (cpuid_eax(1) & 0xfffef) == 0x0f48;
}
static int is_cpu_pre_b3(void)
{
return (cpuid_eax(1) & 0xfffef) < 0x0f41;
}
static int is_cpu_b3(void)
static int is_cpu_pre_c0(void)
{
return (cpuid_eax(1) & 0xfffef) == 0x0f41;
return (cpuid_eax(1) & 0xfffef) < 0x0f48;
}
static int is_cpu_pre_d0(void)
{
return (cpuid_eax(1) & 0xfff0f) < 0x10000;
}
static int is_cpu_pre_e0(void)
{
return (cpuid_eax(1) & 0xfff0f) < 0x20f00;
}

View File

@ -5,16 +5,12 @@
#if 1
static void print_debug_pci_dev(unsigned dev)
{
#if CONFIG_USE_INIT
printk_debug("PCI: %02x:%02x.%02x", (dev>>16) & 0xff, (dev>>11) & 0x1f, (dev>>8) & 0x7);
#else
print_debug("PCI: ");
print_debug_hex8((dev >> 16) & 0xff);
print_debug_char(':');
print_debug_hex8((dev >> 11) & 0x1f);
print_debug_char('.');
print_debug_hex8((dev >> 8) & 7);
#endif
}
static void print_pci_devices(void)
@ -31,19 +27,7 @@ static void print_pci_devices(void)
continue;
}
print_debug_pci_dev(dev);
#if CONFIG_USE_INIT
printk_debug(" %04x:%04x\r\n", (id & 0xffff), (id>>16));
#else
print_debug_hex32(id);
print_debug("\r\n");
#endif
if(((dev>>8) & 0x07) == 0) {
uint8_t hdr_type;
hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE);
if((hdr_type & 0x80) != 0x80) {
dev += PCI_DEV(0,0,7);
}
}
}
}
@ -88,14 +72,6 @@ static void dump_pci_devices(void)
continue;
}
dump_pci_device(dev);
if(((dev>>8) & 0x07) == 0) {
uint8_t hdr_type;
hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE);
if((hdr_type & 0x80) != 0x80) {
dev += PCI_DEV(0,0,7);
}
}
}
}
@ -113,14 +89,6 @@ static void dump_pci_devices_on_bus(unsigned busn)
continue;
}
dump_pci_device(dev);
if(((dev>>8) & 0x07) == 0) {
uint8_t hdr_type;
hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE);
if((hdr_type & 0x80) != 0x80) {
dev += PCI_DEV(0,0,7);
}
}
}
}

View File

@ -23,14 +23,6 @@ static int enumerate_ht_chain(void)
break;
}
#if CK804_DEVN_BASE==0
//CK804 workaround:
// CK804 UnitID changes not use
if(id == 0x005e10de) {
break;
}
#endif
hdr_type = pci_read_config8(PCI_DEV(0,0,0), PCI_HEADER_TYPE);
pos = 0;
hdr_type &= 0x7f;

View File

@ -1,55 +1,38 @@
/*
This should be done by Eric
2004.12 yhlu add multi ht chain dynamically support
*/
#include <device/pci_def.h>
#include <device/pci_ids.h>
#include <device/hypertransport_def.h>
#ifndef K8_HT_FREQ_1G_SUPPORT
#define K8_HT_FREQ_1G_SUPPORT 0
#endif
#ifndef K8_SCAN_PCI_BUS
#define K8_SCAN_PCI_BUS 0
#endif
static inline void print_linkn_in (const char *strval, uint8_t byteval)
static inline void print_linkn_in (const char *strval, unsigned byteval)
{
#if 1
#if CONFIG_USE_INIT
printk_debug("%s%02x\r\n", strval, byteval);
#else
print_debug(strval); print_debug_hex8(byteval); print_debug("\r\n");
#endif
#endif
}
static uint8_t ht_lookup_capability(device_t dev, uint16_t val)
static unsigned ht_lookup_slave_capability(device_t dev)
{
uint8_t pos;
uint8_t hdr_type;
unsigned pos;
unsigned hdr_type;
hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE);
pos = 0;
hdr_type &= 0x7f;
if ((hdr_type == PCI_HEADER_TYPE_NORMAL) ||
(hdr_type == PCI_HEADER_TYPE_BRIDGE)) {
(hdr_type == PCI_HEADER_TYPE_BRIDGE))
{
pos = PCI_CAPABILITY_LIST;
}
if (pos > PCI_CAP_LIST_NEXT) {
pos = pci_read_config8(dev, pos);
}
while(pos != 0) { /* loop through the linked list */
uint8_t cap;
unsigned cap;
cap = pci_read_config8(dev, pos + PCI_CAP_LIST_ID);
if (cap == PCI_CAP_ID_HT) {
uint16_t flags;
unsigned flags;
flags = pci_read_config16(dev, pos + PCI_CAP_FLAGS);
if ((flags >> 13) == val) {
/* Entry is a slave or host , success... */
if ((flags >> 13) == 0) {
/* Entry is a Slave secondary, success... */
break;
}
}
@ -58,51 +41,23 @@ static uint8_t ht_lookup_capability(device_t dev, uint16_t val)
return pos;
}
static uint8_t ht_lookup_slave_capability(device_t dev)
{
return ht_lookup_capability(dev, 0); // Slave/Primary Interface Block Format
}
static uint8_t ht_lookup_host_capability(device_t dev)
{
return ht_lookup_capability(dev, 1); // Host/Secondary Interface Block Format
}
static void ht_collapse_previous_enumeration(uint8_t bus)
static void ht_collapse_previous_enumeration(unsigned bus)
{
device_t dev;
uint32_t id;
/* Check if is already collapsed */
dev = PCI_DEV(bus, 0, 0);
id = pci_read_config32(dev, PCI_VENDOR_ID);
if ( ! ( (id == 0xffffffff) || (id == 0x00000000) ||
(id == 0x0000ffff) || (id == 0xffff0000) ) ) {
return;
}
/* Spin through the devices and collapse any previous
* hypertransport enumeration.
*/
for(dev = PCI_DEV(bus, 1, 0); dev <= PCI_DEV(bus, 0x1f, 0x7); dev += PCI_DEV(0, 1, 0)) {
uint32_t id;
uint8_t pos;
uint16_t flags;
unsigned id;
unsigned pos, flags;
id = pci_read_config32(dev, PCI_VENDOR_ID);
if ((id == 0xffffffff) || (id == 0x00000000) ||
(id == 0x0000ffff) || (id == 0xffff0000)) {
if ( (id == 0xffffffff) || (id == 0x00000000) ||
(id == 0x0000ffff) || (id == 0xffff0000))
{
continue;
}
#if 0
#if CK804_DEVN_BASE==0
//CK804 workaround:
// CK804 UnitID changes not use
if(id == 0x005e10de) {
break;
}
#endif
#endif
pos = ht_lookup_slave_capability(dev);
if (!pos) {
@ -116,11 +71,11 @@ static void ht_collapse_previous_enumeration(uint8_t bus)
}
}
static uint16_t ht_read_freq_cap(device_t dev, uint8_t pos)
static unsigned ht_read_freq_cap(device_t dev, unsigned pos)
{
/* Handle bugs in valid hypertransport frequency reporting */
uint16_t freq_cap;
uint32_t id;
unsigned freq_cap;
unsigned id;
freq_cap = pci_read_config16(dev, pos);
freq_cap &= ~(1 << HT_FREQ_VENDOR); /* Ignore Vendor HT frequencies */
@ -130,25 +85,25 @@ static uint16_t ht_read_freq_cap(device_t dev, uint8_t pos)
/* AMD 8131 Errata 48 */
if (id == (PCI_VENDOR_ID_AMD | (PCI_DEVICE_ID_AMD_8131_PCIX << 16))) {
freq_cap &= ~(1 << HT_FREQ_800Mhz);
return freq_cap;
}
/* AMD 8151 Errata 23 */
if (id == (PCI_VENDOR_ID_AMD | (PCI_DEVICE_ID_AMD_8151_SYSCTRL << 16))) {
freq_cap &= ~(1 << HT_FREQ_800Mhz);
return freq_cap;
}
/* AMD K8 Unsupported 1Ghz? */
if (id == (PCI_VENDOR_ID_AMD | (0x1100 << 16))) {
#if K8_HT_FREQ_1G_SUPPORT == 1
if (is_cpu_pre_e0()) // CK804 support 1G?
#endif
/* Supported starting with E0 */
device_t dev_2 = PCI_DEV(0,0x18,2);
if(pci_read_config32(dev_2,0x9c) < 0x20f00) {
freq_cap &= ~(1 << HT_FREQ_1000Mhz);
}
}
return freq_cap;
}
#define LINK_OFFS(CTRL, WIDTH,FREQ,FREQ_CAP) \
(((CTRL & 0xff) << 24) | ((WIDTH & 0xff) << 16) | ((FREQ & 0xff) << 8) | (FREQ_CAP & 0xFF))
@ -176,14 +131,14 @@ static uint16_t ht_read_freq_cap(device_t dev, uint8_t pos)
PCI_HT_CAP_SLAVE_FREQ_CAP1)
static int ht_optimize_link(
device_t dev1, uint8_t pos1, unsigned offs1,
device_t dev2, uint8_t pos2, unsigned offs2)
device_t dev1, unsigned pos1, unsigned offs1,
device_t dev2, unsigned pos2, unsigned offs2)
{
static const uint8_t link_width_to_pow2[]= { 3, 4, 0, 5, 1, 2, 0, 0 };
static const uint8_t pow2_to_link_width[] = { 0x7, 4, 5, 0, 1, 3 };
uint16_t freq_cap1, freq_cap2;
uint8_t width_cap1, width_cap2, width, old_width, ln_width1, ln_width2;
uint8_t freq, old_freq;
unsigned freq_cap1, freq_cap2;
unsigned width_cap1, width_cap2, width, old_width, ln_width1, ln_width2;
unsigned freq, old_freq;
int needs_reset;
/* Set link width and frequency */
@ -199,8 +154,10 @@ static int ht_optimize_link(
/* See if I am changing the link freqency */
old_freq = pci_read_config8(dev1, pos1 + LINK_FREQ(offs1));
old_freq &= 0x0f;
needs_reset |= old_freq != freq;
old_freq = pci_read_config8(dev2, pos2 + LINK_FREQ(offs2));
old_freq &= 0x0f;
needs_reset |= old_freq != freq;
/* Set the Calulcated link frequency */
@ -245,138 +202,23 @@ static int ht_optimize_link(
return needs_reset;
}
#if (USE_DCACHE_RAM == 1) && (K8_SCAN_PCI_BUS == 1)
static int ht_setup_chainx(device_t udev, uint8_t upos, uint8_t bus);
static int scan_pci_bus( unsigned bus)
static int ht_setup_chainx(device_t udev, unsigned upos, unsigned bus)
{
/*
here we already can access PCI_DEV(bus, 0, 0) to PCI_DEV(bus, 0x1f, 0x7)
So We can scan these devices to find out if they are bridge
If it is pci bridge, We need to set busn in bridge, and go on
For ht bridge, We need to set the busn in bridge and ht_setup_chainx, and the scan_pci_bus
*/
unsigned int devfn;
unsigned new_bus;
unsigned max_bus;
new_bus = (bus & 0xff); // mask out the reset_needed
if(new_bus<0x40) {
max_bus = 0x3f;
} else if (new_bus<0x80) {
max_bus = 0x7f;
} else if (new_bus<0xc0) {
max_bus = 0xbf;
} else {
max_bus = 0xff;
}
new_bus = bus;
#if 0
#if CONFIG_USE_INIT == 1
printk_debug("bus_num=%02x\r\n", bus);
#endif
#endif
for (devfn = 0; devfn <= 0xff; devfn++) {
uint8_t hdr_type;
uint16_t class;
uint32_t buses;
device_t dev;
uint16_t cr;
dev = PCI_DEV((bus & 0xff), ((devfn>>3) & 0x1f), (devfn & 0x7));
hdr_type = pci_read_config8(dev, PCI_HEADER_TYPE);
class = pci_read_config16(dev, PCI_CLASS_DEVICE);
#if 0
#if CONFIG_USE_INIT == 1
if(hdr_type !=0xff ) {
printk_debug("dev=%02x fn=%02x hdr_type=%02x class=%04x\r\n",
(devfn>>3)& 0x1f, (devfn & 0x7), hdr_type, class);
}
#endif
#endif
switch(hdr_type & 0x7f) { /* header type */
case PCI_HEADER_TYPE_BRIDGE:
if (class != PCI_CLASS_BRIDGE_PCI) goto bad;
/* set the bus range dev */
/* Clear all status bits and turn off memory, I/O and master enables. */
cr = pci_read_config16(dev, PCI_COMMAND);
pci_write_config16(dev, PCI_COMMAND, 0x0000);
pci_write_config16(dev, PCI_STATUS, 0xffff);
buses = pci_read_config32(dev, PCI_PRIMARY_BUS);
buses &= 0xff000000;
new_bus++;
buses |= (((unsigned int) (bus & 0xff) << 0) |
((unsigned int) (new_bus & 0xff) << 8) |
((unsigned int) max_bus << 16));
pci_write_config32(dev, PCI_PRIMARY_BUS, buses);
{
/* here we need to figure out if dev is a ht bridge
if it is ht bridge, we need to call ht_setup_chainx at first
Not verified --- yhlu
*/
uint8_t upos;
upos = ht_lookup_host_capability(dev); // one func one ht sub
if (upos) { // sub ht chain
uint8_t busn;
busn = (new_bus & 0xff);
/* Make certain the HT bus is not enumerated */
ht_collapse_previous_enumeration(busn);
/* scan the ht chain */
new_bus |= (ht_setup_chainx(dev,upos,busn)<<16); // store reset_needed to upword
}
}
new_bus = scan_pci_bus(new_bus);
/* set real max bus num in that */
buses = (buses & 0xff00ffff) |
((unsigned int) (new_bus & 0xff) << 16);
pci_write_config32(dev, PCI_PRIMARY_BUS, buses);
pci_write_config16(dev, PCI_COMMAND, cr);
break;
default:
bad:
;
}
/* if this is not a multi function device,
* or the device is not present don't waste
* time probing another function.
* Skip to next device.
*/
if ( ((devfn & 0x07) == 0x00) && ((hdr_type & 0x80) != 0x80))
{
devfn += 0x07;
}
}
return new_bus;
}
#endif
static int ht_setup_chainx(device_t udev, uint8_t upos, uint8_t bus)
{
uint8_t next_unitid, last_unitid;
unsigned next_unitid, last_unitid;
int reset_needed;
unsigned uoffs;
int reset_needed=0;
reset_needed = 0;
uoffs = PCI_HT_HOST_OFFS;
next_unitid = 1;
do {
uint32_t id;
uint8_t pos;
uint16_t flags, ctrl;
uint8_t count;
unsigned offs;
unsigned id;
unsigned pos;
unsigned flags, count, offs, ctrl;
last_unitid = next_unitid;
/* Wait until the link initialization is complete */
do {
@ -392,10 +234,7 @@ static int ht_setup_chainx(device_t udev, uint8_t upos, uint8_t bus)
} while((ctrl & (1 << 5)) == 0);
device_t dev = PCI_DEV(bus, 0, 0);
last_unitid = next_unitid;
id = pci_read_config32(dev, PCI_VENDOR_ID);
/* If the chain is enumerated quit */
if ( (id == 0xffffffff) || (id == 0x00000000) ||
(id == 0x0000ffff) || (id == 0xffff0000))
@ -409,13 +248,6 @@ static int ht_setup_chainx(device_t udev, uint8_t upos, uint8_t bus)
break;
}
#if CK804_DEVN_BASE==0
//CK804 workaround:
// CK804 UnitID changes not use
id = pci_read_config32(dev, PCI_VENDOR_ID);
if(id != 0x005e10de) {
#endif
/* Update the Unitid of the current device */
flags = pci_read_config16(dev, pos + PCI_CAP_FLAGS);
flags &= ~0x1f; /* mask out the bse Unit ID */
@ -424,12 +256,6 @@ static int ht_setup_chainx(device_t udev, uint8_t upos, uint8_t bus)
/* Note the change in device number */
dev = PCI_DEV(bus, next_unitid, 0);
#if CK804_DEVN_BASE==0
}
else {
dev = PCI_DEV(bus, 0, 0);
}
#endif
/* Compute the number of unitids consumed */
count = (flags >> 5) & 0x1f;
@ -440,24 +266,17 @@ static int ht_setup_chainx(device_t udev, uint8_t upos, uint8_t bus)
* came from.
*/
flags = pci_read_config16(dev, pos + PCI_CAP_FLAGS);
offs = ((flags>>10) & 1) ? PCI_HT_SLAVE1_OFFS : PCI_HT_SLAVE0_OFFS;
offs = ((flags >> 10) & 1) ? PCI_HT_SLAVE1_OFFS : PCI_HT_SLAVE0_OFFS;
/* Setup the Hypertransport link */
reset_needed |= ht_optimize_link(udev, upos, uoffs, dev, pos, offs);
#if CK804_DEVN_BASE==0
if(id == 0x005e10de) {
break;
}
#endif
/* Remeber the location of the last device */
/* Remember the location of the last device */
udev = dev;
upos = pos;
uoffs = ( offs != PCI_HT_SLAVE0_OFFS ) ? PCI_HT_SLAVE0_OFFS : PCI_HT_SLAVE1_OFFS;
} while((last_unitid != next_unitid) && (next_unitid <= 0x1f));
return reset_needed;
}
@ -474,10 +293,11 @@ static int ht_setup_chain(device_t udev, unsigned upos)
return ht_setup_chainx(udev, upos, 0);
}
static int optimize_link_read_pointer(uint8_t node, uint8_t linkn, uint8_t linkt, uint8_t val)
static int optimize_link_read_pointer(unsigned node, unsigned linkn, unsigned linkt, unsigned val)
{
uint32_t dword, dword_old;
uint8_t link_type;
unsigned dword, dword_old;
unsigned link_type;
/* This works on an Athlon64 because unimplemented links return 0 */
dword = pci_read_config32(PCI_DEV(0,0x18+node,0), 0x98 + (linkn * 0x20));
@ -498,18 +318,18 @@ static int optimize_link_read_pointer(uint8_t node, uint8_t linkn, uint8_t linkt
return 0;
}
static int optimize_link_in_coherent(uint8_t ht_c_num)
static int optimize_link_in_coherent(unsigned ht_c_num)
{
int reset_needed;
uint8_t i;
unsigned i;
reset_needed = 0;
for (i = 0; i < ht_c_num; i++) {
uint32_t reg;
uint8_t nodeid, linkn;
uint8_t busn;
uint8_t val;
unsigned reg;
unsigned nodeid, linkn;
unsigned busn;
unsigned val;
reg = pci_read_config32(PCI_DEV(0,0x18,1), 0xe0 + i * 4);
@ -520,9 +340,11 @@ static int optimize_link_in_coherent(uint8_t ht_c_num)
reg = pci_read_config32( PCI_DEV(busn, 1, 0), PCI_VENDOR_ID);
if ( (reg & 0xffff) == PCI_VENDOR_ID_AMD) {
val = 0x25;
} else if ( (reg & 0xffff) == PCI_VENDOR_ID_NVIDIA ) {
}
else if ( (reg & 0xffff) == PCI_VENDOR_ID_NVIDIA) {
val = 0x25;//???
} else {
}
else {
continue;
}
@ -533,7 +355,7 @@ static int optimize_link_in_coherent(uint8_t ht_c_num)
return reset_needed;
}
static int ht_setup_chains(uint8_t ht_c_num)
static int ht_setup_chains(unsigned ht_c_num)
{
/* Assumption the HT chain that is bus 0 has the HT I/O Hub on it.
* On most boards this just happens. If a cpu has multiple
@ -541,21 +363,18 @@ static int ht_setup_chains(uint8_t ht_c_num)
* links needs to be programed to point at bus 0.
*/
int reset_needed;
uint8_t upos;
unsigned upos;
device_t udev;
uint8_t i;
int i;
reset_needed = 0;
for (i = 0; i < ht_c_num; i++) {
uint32_t reg;
uint8_t devpos;
unsigned reg;
unsigned devpos;
unsigned regpos;
uint32_t dword;
uint8_t busn;
#if (USE_DCACHE_RAM == 1) && (K8_SCAN_PCI_BUS == 1)
unsigned bus;
#endif
unsigned dword;
unsigned busn;
reg = pci_read_config32(PCI_DEV(0,0x18,1), 0xe0 + i * 4);
@ -575,13 +394,7 @@ static int ht_setup_chains(uint8_t ht_c_num)
upos = ((reg & 0xf00)>>8) * 0x20 + 0x80;
udev = PCI_DEV(0, devpos, 0);
reset_needed |= ht_setup_chainx(udev,upos,busn);
#if (USE_DCACHE_RAM == 1) && (K8_SCAN_PCI_BUS == 1)
/* You can use use this in romcc, because there is function call in romcc, recursive will kill you */
bus = busn; // we need 32 bit
reset_needed |= (scan_pci_bus(bus)>>16); // take out reset_needed that stored in upword
#endif
reset_needed |= ht_setup_chainx(udev, upos, busn);
}
reset_needed |= optimize_link_in_coherent(ht_c_num);
@ -589,53 +402,33 @@ static int ht_setup_chains(uint8_t ht_c_num)
return reset_needed;
}
#ifndef K8_ALLOCATE_IO_RANGE
#define K8_ALLOCATE_IO_RANGE 0
#endif
static int ht_setup_chains_x(void)
{
uint8_t nodeid;
uint32_t reg;
uint32_t tempreg;
uint8_t next_busn;
uint8_t ht_c_num;
uint8_t nodes;
#if K8_ALLOCATE_IO_RANGE == 1
unsigned next_io_base;
#endif
unsigned nodeid;
unsigned reg;
unsigned tempreg;
unsigned next_busn;
unsigned ht_c_num;
unsigned nodes;
/* read PCI_DEV(0,0x18,0) 0x64 bit [8:9] to find out SbLink m */
reg = pci_read_config32(PCI_DEV(0, 0x18, 0), 0x64);
/* update PCI_DEV(0, 0x18, 1) 0xe0 to 0x05000m03, and next_busn=0x3f+1 */
/* update PCI_DEV(0, 0x18, 1) 0xe0 to 0x05000m03, and next_busn=5+1 */
print_linkn_in("SBLink=", ((reg>>8) & 3) );
tempreg = 3 | ( 0<<4) | (((reg>>8) & 3)<<8) | (0<<16)| (0x3f<<24);
tempreg = 3 | ( 0<<4) | (((reg>>8) & 3)<<8) | (0<<16)| (5<<24);
pci_write_config32(PCI_DEV(0, 0x18, 1), 0xe0, tempreg);
next_busn=0x3f+1; /* 0 will be used ht chain with SB we need to keep SB in bus0 in auto stage*/
#if K8_ALLOCATE_IO_RANGE == 1
/* io range allocation */
tempreg = 0 | (((reg>>8) & 0x3) << 4 )| (0x3<<12); //limit
pci_write_config32(PCI_DEV(0, 0x18, 1), 0xC4, tempreg);
tempreg = 3 | ( 3<<4) | (0<<12); //base
pci_write_config32(PCI_DEV(0, 0x18, 1), 0xC0, tempreg);
next_io_base = 0x3+0x1;
#endif
next_busn=5+1; /* 0 will be used ht chain with SB we need to keep SB in bus0 in auto stage*/
/* clean others */
for(ht_c_num=1;ht_c_num<4; ht_c_num++) {
pci_write_config32(PCI_DEV(0, 0x18, 1), 0xe0 + ht_c_num * 4, 0);
/* io range allocation */
pci_write_config32(PCI_DEV(0, 0x18, 1), 0xc4 + ht_c_num * 8, 0);
pci_write_config32(PCI_DEV(0, 0x18, 1), 0xc0 + ht_c_num * 8, 0);
}
nodes = ((pci_read_config32(PCI_DEV(0, 0x18, 0), 0x60)>>4) & 7) + 1;
for(nodeid=0; nodeid<nodes; nodeid++) {
device_t dev;
uint8_t linkn;
unsigned linkn;
dev = PCI_DEV(0, 0x18+nodeid,0);
for(linkn = 0; linkn<3; linkn++) {
unsigned regpos;
@ -651,23 +444,13 @@ static int ht_setup_chains_x(void)
break;
}
}
if(ht_c_num == 4) break; /*used up only 4 non conherent allowed*/
if(ht_c_num == 4) break; /* used up, only 4 non conherent allowed */
/*update to 0xe0...*/
if((reg & 0xf) == 3) continue; /*SbLink so don't touch it */
print_linkn_in("\tbusn=", next_busn);
tempreg |= (next_busn<<16)|((next_busn+0x3f)<<24);
tempreg |= (next_busn<<16)|((next_busn+5)<<24);
pci_write_config32(PCI_DEV(0, 0x18, 1), 0xe0 + ht_c_num * 4, tempreg);
next_busn+=0x3f+1;
#if K8_ALLOCATE_IO_RANGE == 1
/* io range allocation */
tempreg = nodeid | (linkn<<4) | ((next_io_base+0x3)<<12); //limit
pci_write_config32(PCI_DEV(0, 0x18, 1), 0xC4 + ht_c_num * 8, tempreg);
tempreg = 3 | ( 3<<4) | (next_io_base<<12); //base
pci_write_config32(PCI_DEV(0, 0x18, 1), 0xC0 + ht_c_num * 8, tempreg);
next_io_base += 0x3+0x1;
#endif
next_busn+=5+1;
}
}
/*update 0xe0, 0xe4, 0xe8, 0xec from PCI_DEV(0, 0x18,1) to PCI_DEV(0, 0x19,1) to PCI_DEV(0, 0x1f,1);*/
@ -681,27 +464,12 @@ static int ht_setup_chains_x(void)
regpos = 0xe0 + i * 4;
reg = pci_read_config32(PCI_DEV(0, 0x18, 1), regpos);
pci_write_config32(dev, regpos, reg);
}
#if K8_ALLOCATE_IO_RANGE == 1
/* io range allocation */
for(i = 0; i< 4; i++) {
unsigned regpos;
regpos = 0xc4 + i * 8;
reg = pci_read_config32(PCI_DEV(0, 0x18, 1), regpos);
pci_write_config32(dev, regpos, reg);
}
for(i = 0; i< 4; i++) {
unsigned regpos;
regpos = 0xc0 + i * 8;
reg = pci_read_config32(PCI_DEV(0, 0x18, 1), regpos);
pci_write_config32(dev, regpos, reg);
}
#endif
}
/* recount ht_c_num*/
uint8_t i=0;
unsigned i=0;
for(ht_c_num=0;ht_c_num<4; ht_c_num++) {
reg = pci_read_config32(PCI_DEV(0, 0x18, 1), 0xe0 + ht_c_num * 4);
if(((reg & 0xf) != 0x0)) {

View File

@ -17,9 +17,9 @@
#include <cpu/cpu.h>
#include <cpu/x86/lapic.h>
#include <cpu/amd/dualcore.h>
#if CONFIG_LOGICAL_CPUS==1
#include <cpu/amd/dualcore.h>
#include <pc80/mc146818rtc.h>
#endif
@ -27,10 +27,7 @@
#include "root_complex/chip.h"
#include "northbridge.h"
#include "amdk8.h"
#if K8_E0_MEM_HOLE_SIZEK != 0
#include "./cpu_rev.c"
#endif
#include "cpu_rev.c"
#define FX_DEVS 8
static device_t __f0_dev[FX_DEVS];
@ -640,6 +637,41 @@ static uint32_t find_pci_tolm(struct bus *bus)
return tolm;
}
static uint32_t hoist_memory(unsigned long mmio_basek, int i)
{
int ii;
uint32_t carry_over;
device_t dev;
uint32_t base, limit;
uint32_t basek;
uint32_t hoist;
carry_over = (4*1024*1024) - mmio_basek;
for(ii=7;ii>i;ii--) {
base = f1_read_config32(0x40 + (ii << 3));
limit = f1_read_config32(0x44 + (ii << 3));
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
continue;
}
f1_write_config32(0x44 + (ii << 3),limit + (carry_over << 2));
f1_write_config32(0x40 + (ii << 3),base + (carry_over << 2));
}
limit = f1_read_config32(0x44 + (i << 3));
f1_write_config32(0x44 + (i << 3),limit + (carry_over << 2));
dev = __f1_dev[i];
base = pci_read_config32(dev, 0x40 + (i << 3));
basek = (pci_read_config32(dev, 0x40 + (i << 3)) & 0xffff0000) >> 2;
hoist = /* hole start address */
((mmio_basek << 10) & 0xff000000) +
/* hole address to memory controller address */
(((basek + carry_over) >> 6) & 0x0000ff00) +
/* enable */
1;
pci_write_config32(dev, 0xf0, hoist);
return carry_over;
}
static void pci_domain_set_resources(device_t dev)
{
unsigned long mmio_basek;
@ -647,42 +679,22 @@ static void pci_domain_set_resources(device_t dev)
int i, idx;
pci_tolm = find_pci_tolm(&dev->link[0]);
#if 1
/* work around for kernel dual core NUMA bug */
printk_debug("find_pci_tolm = %x\n",pci_tolm);
if(pci_tolm > 0xf8000000) pci_tolm = 0xf8000000;
#endif
#warning "FIXME handle interleaved nodes"
mmio_basek = pci_tolm >> 10;
/* Round mmio_basek to something the processor can support */
mmio_basek &= ~((1 << 6) -1);
#if 1
#warning "FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M MMIO hole"
/* Round the mmio hold to 64M */
mmio_basek &= ~((64*1024) - 1);
#endif
#if K8_E0_MEM_HOLE_SIZEK != 0
if (!is_cpu_pre_e0())
for (i = 0; i < 8; i++) {
uint32_t base;
base = f1_read_config32(0x40 + (i << 3));
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
continue;
}
base = pci_read_config32(__f1_dev[i], 0xf0);
if((base & 1)==0) continue;
base &= 0xff<<24;
base >>= 10;
if (mmio_basek > base) {
mmio_basek = base;
}
break; // only one hole
}
#endif
idx = 10;
for(i = 0; i < 8; i++) {
uint32_t base, limit;
unsigned basek, limitk, sizek;
base = f1_read_config32(0x40 + (i << 3));
limit = f1_read_config32(0x44 + (i << 3));
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
@ -708,6 +720,9 @@ static void pci_domain_set_resources(device_t dev)
pre_sizek = mmio_basek - basek;
ram_resource(dev, idx++, basek, pre_sizek);
sizek -= pre_sizek;
if(! is_cpu_pre_e0() ) {
sizek += hoist_memory(mmio_basek,i);
}
basek = mmio_basek;
}
if ((basek + sizek) <= 4*1024*1024) {
@ -767,54 +782,20 @@ static struct device_operations pci_domain_ops = {
.ops_pci_bus = &pci_cf8_conf1,
};
#define APIC_ID_OFFSET 0x10
static unsigned int cpu_bus_scan(device_t dev, unsigned int max)
{
struct bus *cpu_bus;
device_t dev_mc;
int bsp_apic_id;
int apic_id_offset;
unsigned max_siblings;
int i,j;
unsigned nb_cfg_54;
int enable_apic_ext_id;
unsigned siblings;
#if CONFIG_LOGICAL_CPUS == 1
int e0_later_single_core;
int disable_siblings;
#endif
nb_cfg_54 = 0;
enable_apic_ext_id = 0;
siblings = 0;
/* Find the bootstrap processors apicid */
bsp_apic_id = lapicid();
/* See if I will enable extended ids' */
apic_id_offset = bsp_apic_id;
#if CONFIG_LOGICAL_CPUS == 1
disable_siblings = !CONFIG_LOGICAL_CPUS;
get_option(&disable_siblings, "dual_core");
// for pre_e0, nb_cfg_54 can not be set, ( even set, when you read it still be 0)
// How can I get the nb_cfg_54 of every node' nb_cfg_54 in bsp??? and differ d0 and e0 single core
nb_cfg_54 = read_nb_cfg_54();
#endif
dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
if (!dev_mc) {
die("0:18.0 not found?");
}
if (pci_read_config32(dev_mc, 0x68) & (HTTC_APIC_EXT_ID|HTTC_APIC_EXT_BRD_CST))
{
enable_apic_ext_id = 1;
if (apic_id_offset == 0) {
/* bsp apic id is not changed */
apic_id_offset = APIC_ID_OFFSET;
}
}
/* For now assume all cpus have the same number of siblings */
max_siblings = (cpuid_ecx(0x80000008) & 0xff) + 1;
/* Find which cpus are present */
cpu_bus = &dev->link[0];
@ -835,49 +816,9 @@ static unsigned int cpu_bus_scan(device_t dev, unsigned int max)
}
}
#if CONFIG_LOGICAL_CPUS == 1
e0_later_single_core = 0;
if ((!disable_siblings) && dev && dev->enabled) {
j = (pci_read_config32(dev, 0xe8) >> 12) & 3; // dev is func 3
printk_debug(" %s siblings=%d\r\n", dev_path(dev), j);
if(nb_cfg_54) {
// For e0 single core if nb_cfg_54 is set, apicid will be 0, 2, 4....
// ----> you can mixed single core e0 and dual core e0 at any sequence
// That is the typical case
if(j == 0 ){
e0_later_single_core = is_e0_later_in_bsp(i); // single core
} else {
e0_later_single_core = 0;
}
if(e0_later_single_core) {
printk_debug("\tFound e0 single core\r\n");
j=1;
}
if(siblings > j ) {
//actually we can't be here, because d0 nb_cfg_54 can not be set
//even worse is_e0_later_in_bsp() can not find out if it is d0 or e0
die("When NB_CFG_54 is set, if you want to mix e0 (single core and dual core) and single core(pre e0) CPUs, you need to put all the single core (pre e0) CPUs before all the (e0 single or dual core) CPUs\r\n");
}
else {
siblings = j;
}
} else {
siblings = j;
}
}
#endif
#if CONFIG_LOGICAL_CPUS==1
for (j = 0; j <= (e0_later_single_core?0:siblings); j++ ) {
#else
for (j = 0; j <= siblings; j++ ) {
#endif
/* Build the cpu device path */
cpu_path.type = DEVICE_PATH_APIC;
cpu_path.u.apic.apic_id = i * (nb_cfg_54?(siblings+1):1) + j * (nb_cfg_54?1:8);
cpu_path.u.apic.apic_id = i*max_siblings;
/* See if I can find the cpu */
cpu = find_dev_path(cpu_bus, &cpu_path);
@ -899,17 +840,11 @@ static unsigned int cpu_bus_scan(device_t dev, unsigned int max)
/* Report what I have done */
if (cpu) {
if(enable_apic_ext_id) {
if(cpu->path.u.apic.apic_id<apic_id_offset) { //all add offset except bsp core0
if( (cpu->path.u.apic.apic_id > siblings) || (bsp_apic_id!=0) )
cpu->path.u.apic.apic_id += apic_id_offset;
}
}
printk_debug("CPU: %s %s\n",
dev_path(cpu), cpu->enabled?"enabled":"disabled");
}
} //j
}
return max;
}

View File

@ -585,6 +585,16 @@ static void hw_enable_ecc(const struct mem_controller *ctrl)
}
static void e_step_cpu(const struct mem_controller *ctrl)
{
uint32_t dcl,data32;
/* set bit 29 (upper cs map) of function 2 offset 0x90 */
dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
dcl |= DCL_UpperCSMap;
pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
}
static int is_dual_channel(const struct mem_controller *ctrl)
{
uint32_t dcl;
@ -714,28 +724,14 @@ hw_err:
return sz;
}
static const unsigned cs_map_aa[15] = {
/* (row=12, col=8)(14, 12) ---> (0, 0) (2, 4) */
0, 1, 3, 6, 0,
0, 2, 4, 7, 9,
0, 0, 5, 8,10,
};
static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
{
uint32_t base0, base1, map;
uint32_t base0, base1;
uint32_t dch;
if (sz.side1 != sz.side2) {
sz.side2 = 0;
}
map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
map &= ~(0xf << (index * 4));
#if K8_4RANK_DIMM_SUPPORT == 1
if(sz.rank == 4) {
map &= ~(0xf << ( (index + 2) * 4));
}
#endif
/* For each base register.
* Place the dimm size in 32 MB quantities in the bits 31 - 21.
@ -747,22 +743,6 @@ static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz
/* Make certain side1 of the dimm is at least 32MB */
if (sz.side1 >= (25 +3)) {
if(is_cpu_pre_d0()) {
map |= (sz.side1 - (25 + 3)) << (index *4);
#if K8_4RANK_DIMM_SUPPORT == 1
if(sz.rank == 4) {
map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
}
#endif
}
else {
map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << (index*4);
#if K8_4RANK_DIMM_SUPPORT == 1
if(sz.rank == 4) {
map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ( (index + 2) * 4);
}
#endif
}
base0 = (1 << ((sz.side1 - (25 + 3)) + 21)) | 1;
}
@ -791,8 +771,6 @@ static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz
}
#endif
pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
/* Enable the memory clocks for this DIMM */
if (base0) {
dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
@ -806,6 +784,52 @@ static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz
}
}
static void set_dimm_map(const struct mem_controller *ctrl,
struct dimm_size sz, unsigned index)
{
static const unsigned cs_map_aa[15] = {
/* (row=12, col=8)(14, 12) ---> (0, 0) (2, 4) */
0, 1, 3, 6, 0,
0, 2, 4, 7, 9,
0, 0, 5, 8,10,
};
uint32_t map;
int row,col;
map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
map &= ~(0xf << (index * 4));
#if K8_4RANK_DIMM_SUPPORT == 1
if(sz.rank == 4) {
map &= ~(0xf << ( (index + 2) * 4));
}
#endif
if (is_cpu_pre_d0()) {
map |= (sz.side1 - (25 + 3)) << (index *4);
#if K8_4RANK_DIMM_SUPPORT == 1
if(sz.rank == 4) {
map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
}
#endif
} else {
unsigned val;
val = cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ];
if(val == 0) {
print_err("Invalid Column or Row count\r\n");
val = 7;
}
map |= val << (index*4);
#if K8_4RANK_DIMM_SUPPORT == 1
if(sz.rank == 4) {
map |= val << ( (index + 2) * 4);
}
#endif
}
pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
}
static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
{
int i;
@ -820,6 +844,7 @@ static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
return -1; /* Report SPD error */
}
set_dimm_size(ctrl, sz, i);
set_dimm_map(ctrl, sz, i);
}
return dimm_mask;
}
@ -865,6 +890,13 @@ static void set_top_mem(unsigned tom_k)
print_spew_hex32(tom_k);
print_spew(" KB\r\n");
#if 0
/* Report the amount of memory. */
print_debug("RAM: 0x");
print_debug_hex32(tom_k);
print_debug(" KB\r\n");
#endif
/* Now set top of memory */
msr_t msr;
msr.lo = (tom_k & 0x003fffff) << 10;
@ -971,7 +1003,7 @@ static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
if(is_dual_channel(ctrl)) {
/* Also we run out of address mask bits if we try and interleave 8 4GB dimms */
if ((bits == 3) && (common_size == (1 << (32 - 3)))) {
// print_debug("8 4GB chip selects cannot be interleaved\r\n");
print_spew("8 4GB chip selects cannot be interleaved\r\n");
return 0;
}
csbase_inc <<=1;
@ -981,7 +1013,7 @@ static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
csbase_inc = csbase_low_d0[common_cs_mode];
if(is_dual_channel(ctrl)) {
if( (bits==3) && (common_cs_mode > 8)) {
// print_debug("8 cs_mode>8 chip selects cannot be interleaved\r\n");
print_spew("8 cs_mode>8 chip selects cannot be interleaved\r\n");
return 0;
}
csbase_inc <<=1;
@ -1100,25 +1132,6 @@ unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
return end_k;
}
#if K8_E0_MEM_HOLE_SIZEK != 0
#define K8_E0_MEM_HOLE_LIMITK 4*1024*1024
#define K8_E0_MEM_HOLE_BASEK (K8_E0_MEM_HOLE_LIMITK - K8_E0_MEM_HOLE_SIZEK )
static void set_e0_mem_hole(const struct mem_controller *ctrl, unsigned base_k)
{
/* Route the addresses to the controller node */
unsigned val;
val = pci_read_config32(ctrl->f1,0xf0);
val &= 0x00ff00fe;
val = (K8_E0_MEM_HOLE_BASEK << 10) | ((K8_E0_MEM_HOLE_SIZEK+base_k)>>(16-10)) | 1;
pci_write_config32(ctrl->f1, 0xf0, val);
}
#endif
static void order_dimms(const struct mem_controller *ctrl)
{
unsigned long tom_k, base_k;
@ -1135,14 +1148,6 @@ static void order_dimms(const struct mem_controller *ctrl)
/* Compute the memory base address */
base_k = memory_end_k(ctrl, ctrl->node_id);
tom_k += base_k;
#if K8_E0_MEM_HOLE_SIZEK != 0
if(!is_cpu_pre_e0()) {
/* See if I need to check the range cover hole */
if ((base_k <= K8_E0_MEM_HOLE_BASEK) && (tom_k > K8_E0_MEM_HOLE_BASEK)) {
tom_k += K8_E0_MEM_HOLE_SIZEK;
}
}
#endif
route_dram_accesses(ctrl, base_k, tom_k);
set_top_mem(tom_k);
}
@ -2145,12 +2150,11 @@ static void sdram_set_spd_registers(const struct mem_controller *ctrl)
struct spd_set_memclk_result result;
const struct mem_param *param;
long dimm_mask;
#if 1
if (!controller_present(ctrl)) {
// print_debug("No memory controller present\r\n");
print_debug("No memory controller present\r\n");
return;
}
#endif
hw_enable_ecc(ctrl);
activate_spd_rom(ctrl);
dimm_mask = spd_detect_dimms(ctrl);
@ -2176,6 +2180,10 @@ static void sdram_set_spd_registers(const struct mem_controller *ctrl)
if (dimm_mask < 0)
goto hw_spd_err;
order_dimms(ctrl);
if( !is_cpu_pre_e0() ) {
print_debug("E step CPU\r\n");
e_step_cpu(ctrl);
}
return;
hw_spd_err:
/* Unrecoverable error reading SPD data */
@ -2280,22 +2288,6 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl)
} while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) );
}
// init e0 mem hole here
#if K8_E0_MEM_HOLE_SIZEK != 0
if (!is_cpu_pre_e0()) {
uint32_t base, limit;
unsigned base_k, limit_k;
base = pci_read_config32(ctrl->f1, 0x40 + (i << 3));
limit = pci_read_config32(ctrl->f1, 0x44 + (i << 3));
base_k = (base & 0xffff0000) >> 2;
limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
if ((base_k <= K8_E0_MEM_HOLE_BASEK) && (limit_k > K8_E0_MEM_HOLE_BASEK)) {
set_e0_mem_hole(ctrl+i, base_k);
}
}
#endif
print_debug(" done\r\n");
}
@ -2308,7 +2300,7 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl)
5. for node interleaving we need to set mem hole to every node ( need recalcute hole offset in f0 for every node)
*/
#if USE_DCACHE_RAM == 0
#if CONFIG_DCACHE_RAM == 0
/* Make certain the first 1M of memory is intialized */
print_debug("Clearing initial memory region: ");

View File

@ -1,148 +1,16 @@
#define RES_DEBUG 0
static void setup_resource_map_offset(const unsigned int *register_values, int max, unsigned offset_pci_dev, unsigned offset_io_base)
{
int i;
// print_debug("setting up resource map offset....");
#if 0
print_debug("\r\n");
#endif
for(i = 0; i < max; i += 3) {
device_t dev;
unsigned where;
unsigned long reg;
#if 0
#if CONFIG_USE_INIT
prink_debug("%08x <- %08x\r\n", register_values[i] + offset_pci_dev, register_values[i+2]);
#else
print_debug_hex32(register_values[i] + offset_pci_dev);
print_debug(" <-");
print_debug_hex32(register_values[i+2]);
print_debug("\r\n");
#endif
#endif
dev = (register_values[i] & ~0xff) + offset_pci_dev;
where = register_values[i] & 0xff;
reg = pci_read_config32(dev, where);
reg &= register_values[i+1];
reg |= register_values[i+2] + offset_io_base;
pci_write_config32(dev, where, reg);
#if 0
reg = pci_read_config32(register_values[i]);
reg &= register_values[i+1];
reg |= register_values[i+2] & ~register_values[i+1];
pci_write_config32(register_values[i], reg);
#endif
}
// print_debug("done.\r\n");
}
#define RES_PCI_IO 0x10
#define RES_PORT_IO_8 0x22
#define RES_PORT_IO_32 0x20
#define RES_MEM_IO 0x40
#define RES_MEM_IO 0x30
static void setup_resource_map_x_offset(const unsigned int *register_values, int max, unsigned offset_pci_dev, unsigned offset_io_base)
{
int i;
#if RES_DEBUG
print_debug("setting up resource map ex offset....");
#endif
#if RES_DEBUG
print_debug("\r\n");
#endif
for(i = 0; i < max; i += 4) {
#if RES_DEBUG
#if CONFIG_USE_INIT
printk_debug("%04x: %02x %08x <- & %08x | %08x\r\n",
i/4, register_values[i],
register_values[i+1] + ( (register_values[i]==RES_PCI_IO) ? offset_pci_dev : 0),
register_values[i+2],
register_values[i+3] + ( ( (register_values[i] & RES_PORT_IO_32) == RES_PORT_IO_32) ? offset_io_base : 0)
);
#else
print_debug_hex16(i/4);
print_debug(": ");
print_debug_hex8(register_values[i]);
print_debug(" ");
print_debug_hex32(register_values[i+1] + ( (register_values[i]==RES_PCI_IO) ? offset_pci_dev : 0) );
print_debug(" <- & ");
print_debug_hex32(register_values[i+2]);
print_debug(" | ");
print_debug_hex32(register_values[i+3] +
(((register_values[i] & RES_PORT_IO_32) == RES_PORT_IO_32) ? offset_io_base : 0)
);
print_debug("\r\n");
#endif
#endif
switch (register_values[i]) {
case RES_PCI_IO: //PCI
{
device_t dev;
unsigned where;
unsigned long reg;
dev = (register_values[i+1] & ~0xff) + offset_pci_dev;
where = register_values[i+1] & 0xff;
reg = pci_read_config32(dev, where);
reg &= register_values[i+2];
reg |= register_values[i+3];
pci_write_config32(dev, where, reg);
}
break;
case RES_PORT_IO_8: // io 8
{
unsigned where;
unsigned reg;
where = register_values[i+1] + offset_io_base;
reg = inb(where);
reg &= register_values[i+2];
reg |= register_values[i+3];
outb(reg, where);
}
break;
case RES_PORT_IO_32: //io32
{
unsigned where;
unsigned long reg;
where = register_values[i+1] + offset_io_base;
reg = inl(where);
reg &= register_values[i+2];
reg |= register_values[i+3];
outl(reg, where);
}
break;
#if 0
case RES_MEM_IO: //mem
{
unsigned where;
unsigned long reg;
where = register_values[i+1];
reg = read32(where);
reg &= register_values[i+2];
reg |= register_values[i+3];
write32( where, reg);
}
break;
#endif
} // switch
}
#if RES_DEBUG
print_debug("done.\r\n");
#endif
}
static void setup_resource_map_x(const unsigned int *register_values, int max)
{
int i;
#if RES_DEBUG
print_debug("setting up resource map ex offset....");
print_debug("setting up resource map ex....");
#endif
@ -227,6 +95,7 @@ static void setup_resource_map_x(const unsigned int *register_values, int max)
#endif
}
static void setup_iob_resource_map(const unsigned int *register_values, int max)
{
int i;

View File

@ -1,12 +1,11 @@
config chip.h
driver amd8111.o
#driver amd8111_usb.o
driver amd8111_usb.o
driver amd8111_lpc.o
driver amd8111_ide.o
driver amd8111_acpi.o
#driver amd8111_usb2.o
#driver amd8111_ac97.o
#driver amd8111_nic.o
driver amd8111_usb2.o
driver amd8111_ac97.o
driver amd8111_nic.o
driver amd8111_pci.o
driver amd8111_smbus.o
object amd8111_reset.o

View File

@ -97,6 +97,7 @@ static void acpi_init(struct device *dev)
#endif
/* power after power fail */
on = MAINBOARD_POWER_ON_AFTER_POWER_FAIL;
get_option(&on, "power_on_after_fail");
byte = pci_read_config8(dev, PREVIOUS_POWER_STATE);
@ -177,7 +178,7 @@ static struct device_operations acpi_ops = {
.enable_resources = acpi_enable_resources,
.init = acpi_init,
.scan_bus = scan_static_bus,
// .enable = amd8111_enable,
.enable = amd8111_enable,
.ops_pci = &lops_pci,
.ops_smbus_bus = &lops_smbus_bus,
};

View File

@ -113,13 +113,9 @@ static void lpc_init(struct device *dev)
byte = pci_read_config8(dev, 0x46);
pci_write_config8(dev, 0x46, byte | (1<<0));
/* power after power fail */
/* Enable 5Mib Rom window */
byte = pci_read_config8(dev, 0x43);
if (pwr_on) {
byte &= ~(1<<6);
} else {
byte |= (1<<6);
}
byte |= 0xC0;
pci_write_config8(dev, 0x43, byte);
/* Enable Port 92 fast reset */

View File

@ -55,6 +55,7 @@ static struct device_operations pci_ops = {
.enable_resources = pci_bus_enable_resources,
.init = pci_init,
.scan_bus = pci_scan_bridge,
/* PCI Subordinate bus reset is not implemented */
.ops_pci = &lops_pci,
};

View File

@ -26,7 +26,7 @@ static struct device_operations usb_ops = {
.enable_resources = pci_dev_enable_resources,
.init = 0,
.scan_bus = scan_static_bus,
// .enable = amd8111_enable,
.enable = amd8111_enable,
.ops_pci = &lops_pci,
};

View File

@ -66,7 +66,7 @@ static struct pnp_info pnp_dev_info[] = {
static void enable_dev(struct device *dev)
{
pnp_enable_devices(dev, &pnp_ops,
pnp_enable_devices(dev, &ops,
sizeof(pnp_dev_info)/sizeof(pnp_dev_info[0]), pnp_dev_info);
}

Binary file not shown.

View File

@ -1301,10 +1301,7 @@ static struct triple *transform_to_arch_instruction(
struct compile_state *state, struct triple *ins);
static struct triple *flatten(
struct compile_state *state, struct triple *first, struct triple *ptr);
static void print_dominators(struct compile_state *state,
FILE *fp, struct basic_blocks *bb);
static void print_dominance_frontiers(struct compile_state *state,
FILE *fp, struct basic_blocks *bb);
@ -9873,7 +9870,15 @@ static void simplify_load(struct compile_state *state, struct triple *ins)
src += addr->u.cval;
if (src > end) {
error(state, ins, "Load address out of bounds");
/*
* The constant puts the load address out of bounds for
* the array. However the load may be only conditionally
* called and it may never be called with this argument.
* So we can't error here because we don't know
* if the load will actually be executed. So instead
* simply avoid performing the the optimization.
*/
return;
}
memset(buffer, 0, sizeof(buffer));
@ -15296,6 +15301,8 @@ static void romcc_print_blocks(struct compile_state *state, FILE *fp)
}
static void print_blocks(struct compile_state *state, const char *func, FILE *fp)
{
static void print_dominators(struct compile_state *state, FILE *fp, struct basic_blocks *bb);
static void print_dominance_frontiers(struct compile_state *state, FILE *fp, struct basic_blocks *bb);
if (state->compiler->debug & DEBUG_BASIC_BLOCKS) {
fprintf(fp, "After %s\n", func);
romcc_print_blocks(state, fp);