cpu: Fix spelling
Change-Id: I69c46648de0689e9bed84c7726906024ad65e769 Signed-off-by: Martin Roth <martin.roth@se-eng.com> Reviewed-on: http://review.coreboot.org/3729 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
This commit is contained in:
parent
0cb07e3476
commit
4c3ab7376e
|
@ -88,7 +88,7 @@ static void post_cache_as_ram(void)
|
|||
#endif
|
||||
#if 1
|
||||
{
|
||||
/* Check value of esp to verify if we have enough rom for stack in Cache as RAM */
|
||||
/* Check value of esp to verify if we have enough room for stack in Cache as RAM */
|
||||
unsigned v_esp;
|
||||
__asm__ volatile (
|
||||
"movl %%esp, %0\n\t"
|
||||
|
@ -123,7 +123,7 @@ static void post_cache_as_ram(void)
|
|||
/* set new esp */ /* before CONFIG_RAMBASE */
|
||||
"subl %0, %%esp\n\t"
|
||||
::"a"( (CONFIG_DCACHE_RAM_BASE + CONFIG_DCACHE_RAM_SIZE)- (CONFIG_RAMTOP) )
|
||||
/* discard all registers (eax is used for %0), so gcc redo everything
|
||||
/* discard all registers (eax is used for %0), so gcc redoes everything
|
||||
after the stack is moved */
|
||||
: "cc", "memory", "%ebx", "%ecx", "%edx", "%esi", "%edi", "%ebp"
|
||||
);
|
||||
|
|
|
@ -91,10 +91,10 @@ unsigned get_apicid_base(unsigned ioapic_num)
|
|||
}
|
||||
#endif
|
||||
|
||||
//contruct apicid_base
|
||||
//Construct apicid_base
|
||||
|
||||
if((!disable_siblings) && (siblings>0) ) {
|
||||
/* for 8 way dual core, we will used up apicid 16:16, actualy 16 is not allowed by current kernel
|
||||
/* for 8 way dual core, we will used up apicid 16:16, actually 16 is not allowed by current kernel
|
||||
and the kernel will try to get one that is small than 16 to make io apic work.
|
||||
I don't know when the kernel can support 256 apic id. (APIC_EXT_ID is enabled) */
|
||||
|
||||
|
|
|
@ -163,7 +163,7 @@ static void eng2900(void)
|
|||
* clocks when CPU is snooped. Because setting XSTATE to 0
|
||||
* overrides any other XSTATE action, the code will always
|
||||
* stall for 4 GeodeLink clocks after a snoop request goes
|
||||
* away even if it occured a clock or two later than a
|
||||
* away even if it occurred a clock or two later than a
|
||||
* different snoop; the stall signal will never 'glitch high'
|
||||
* for only one or two CPU clocks with this code.
|
||||
*/
|
||||
|
@ -201,7 +201,7 @@ static void eng2900(void)
|
|||
msr.lo = 0x30000;
|
||||
wrmsr(MSR_GLCP + 0x0073, msr);
|
||||
|
||||
/* Writing action number 5: STALL_CPU_PIPE when exitting idle
|
||||
/* Writing action number 5: STALL_CPU_PIPE when exiting idle
|
||||
state or not in idle state */
|
||||
msr.hi = 0;
|
||||
msr.lo = 0x00430000;
|
||||
|
@ -293,7 +293,7 @@ static void bug118339(void)
|
|||
*
|
||||
* PBZ 3659:
|
||||
* The MC reordered transactions incorrectly and breaks coherency.
|
||||
* Disable reording and take a potential performance hit.
|
||||
* Disable reordering and take a potential performance hit.
|
||||
* This is safe to do here and not in MC init since there is nothing
|
||||
* to maintain coherency with and the cache is not enabled yet.
|
||||
*/
|
||||
|
|
|
@ -15,7 +15,7 @@ void cpuRegInit (void)
|
|||
/* Set up GLCP to grab BTM data. */
|
||||
msrnum = GLCP_DBGOUT; /* GLCP_DBGOUT MSR */
|
||||
msr.hi = 0x0;
|
||||
msr.lo = 0x08; /* reset value (SCOPE_SEL = 0) causes FIFO toshift out, */
|
||||
msr.lo = 0x08; /* reset value (SCOPE_SEL = 0) causes FIFO to shift out, */
|
||||
wrmsr(msrnum, msr); /* exchange it to anything else to prevent this */
|
||||
|
||||
/* Turn off debug clock */
|
||||
|
@ -119,7 +119,7 @@ void cpuRegInit (void)
|
|||
wrmsr(msrnum, msr);
|
||||
}
|
||||
|
||||
/* FPU impercise exceptions bit */
|
||||
/* FPU imprecise exceptions bit */
|
||||
{
|
||||
msrnum = CPU_FPU_MSR_MODE;
|
||||
msr = rdmsr(msrnum);
|
||||
|
|
|
@ -67,7 +67,7 @@ static void pcideadlock(void)
|
|||
/***/
|
||||
/** PBZ 3659:*/
|
||||
/** The MC reordered transactions incorrectly and breaks coherency.*/
|
||||
/** Disable reording and take a potential performance hit.*/
|
||||
/** Disable reordering and take a potential performance hit.*/
|
||||
/** This is safe to do here and not in MC init since there is nothing*/
|
||||
/** to maintain coherency with and the cache is not enabled yet.*/
|
||||
/***/
|
||||
|
|
|
@ -54,7 +54,7 @@ static const struct {
|
|||
|
||||
{ DC_CFG, AMD_DR_Bx, AMD_PTYPE_SVR,
|
||||
0x00000000, 0x00000000,
|
||||
0x00000000, 0x00000C00 }, /* Errata 326 */
|
||||
0x00000000, 0x00000C00 }, /* Erratum 326 */
|
||||
|
||||
{ NB_CFG, AMD_FAM10_ALL, AMD_PTYPE_DC | AMD_PTYPE_MC,
|
||||
0x00000000, 1 << 22,
|
||||
|
|
|
@ -68,7 +68,7 @@ Fam10 Bios and Kernel Development Guide #31116, rev 3.48, April 22, 2010
|
|||
|
||||
9.- TODO Requires information on current delivery capability
|
||||
(depends on mainboard and maybe power supply ?). One might use a config
|
||||
option with the maximum number of Ampers that the board can deliver to CPU.
|
||||
option with the maximum number of Amperes that the board can deliver to CPU.
|
||||
|
||||
10.- [Multiprocessor] TODO 2.4.2.12
|
||||
[Uniprocessor] FIXME ? We call setPStateMaxVal() in init_fidvid_stage2,
|
||||
|
@ -79,7 +79,7 @@ Fam10 Bios and Kernel Development Guide #31116, rev 3.48, April 22, 2010
|
|||
11.- finalPstateChange() from init_fidvid_Stage2 (BKDG says just "may", anyway)
|
||||
|
||||
12.- generate ACPI for p-states. FIXME
|
||||
Needs more assesment. There's some kind of fixed support that
|
||||
Needs more assessment. There's some kind of fixed support that
|
||||
does not seem to depend on CPU revision or actual MSRC001_00[68:64]
|
||||
as BKDG apparently requires.
|
||||
http://www.coreboot.org/ACPI#CPU_Power_Management
|
||||
|
@ -935,7 +935,7 @@ static void fixPsNbVidAfterWR(u32 newNbVid, u8 NbVidUpdatedAll,u8 pviMode)
|
|||
|
||||
static void finalPstateChange(void)
|
||||
{
|
||||
/* Enble P0 on all cores for best performance.
|
||||
/* Enable P0 on all cores for best performance.
|
||||
* Linux can slow them down later if need be.
|
||||
* It is safe since they will be in C1 halt
|
||||
* most of the time anyway.
|
||||
|
|
|
@ -424,7 +424,7 @@ static void start_node(u8 node)
|
|||
/**
|
||||
* static void setup_remote_node(u32 node)
|
||||
*
|
||||
* Copy the BSP Adress Map to each AP.
|
||||
* Copy the BSP Address Map to each AP.
|
||||
*/
|
||||
static void setup_remote_node(u8 node)
|
||||
{
|
||||
|
|
|
@ -496,7 +496,7 @@ static void init_fidvid_bsp_stage2(unsigned ap_apicid, void *gp)
|
|||
continue;
|
||||
if ((readback & 0xff) == 2) {
|
||||
timeout = 0;
|
||||
break; /* target ap is stage 2, it's FID has beed set */
|
||||
break; /* target ap is stage 2, its FID has been set */
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -603,7 +603,7 @@ static void init_fidvid_bsp(unsigned bsp_apicid)
|
|||
|
||||
/* For all APs ( We know the APIC ID of all AP even the APIC ID is lifted)
|
||||
* send signal to the AP it could change it's fid/vid */
|
||||
/* remote read singnal from AP that AP is done */
|
||||
/* remote read signal from AP that AP is done */
|
||||
|
||||
fv.common_fidvid &= 0xffff00;
|
||||
|
||||
|
|
|
@ -509,7 +509,7 @@ static void model_fxx_init(device_t dev)
|
|||
|
||||
id = get_node_core_id(read_nb_cfg_54()); // pre e0 nb_cfg_54 can not be set
|
||||
|
||||
/* Is this a bad location? In particular can another node prefecth
|
||||
/* Is this a bad location? In particular can another node prefetch
|
||||
* data from this node before we have initialized it?
|
||||
*/
|
||||
if (id.coreid == 0)
|
||||
|
|
|
@ -75,7 +75,7 @@ static int write_pstates_for_core(u8 pstate_num, u16 *pstate_feq, u8 *pstate_vid
|
|||
|
||||
#if CONFIG_K8_REV_F_SUPPORT
|
||||
/*
|
||||
* Details about this algorithm , refert to BDKG 10.5.1
|
||||
* Details about this algorithm , refer to BDKG 10.5.1
|
||||
* Two parts are included, the another is the DSDT reconstruction process
|
||||
*/
|
||||
|
||||
|
@ -202,7 +202,7 @@ static int pstates_algorithm(u32 pcontrol_blk, u8 plen, u8 onlyBSP)
|
|||
goto write_pstates;
|
||||
}
|
||||
|
||||
/* Get the multipier of the fid frequency */
|
||||
/* Get the multiplier of the fid frequency */
|
||||
/*
|
||||
* Fid multiplier is always 100 revF and revG.
|
||||
*/
|
||||
|
@ -316,7 +316,7 @@ static int pstates_algorithm(u32 pcontrol_blk, u8 plen, u8 onlyBSP)
|
|||
Pstate_num++;
|
||||
}
|
||||
|
||||
/* Constuct P[Min] State */
|
||||
/* Construct P[Min] State */
|
||||
if (Max_fid == 0x2A && Max_vid != 0x0) {
|
||||
Pstate_fid[Pstate_num] = 0x2;
|
||||
Pstate_feq[Pstate_num] =
|
||||
|
|
|
@ -128,10 +128,10 @@ void amd_setup_mtrrs(void)
|
|||
}
|
||||
|
||||
/* Now that I have mapped what is memory and what is not
|
||||
* Setup the mtrrs so we can cache the memory.
|
||||
* Set up the mtrrs so we can cache the memory.
|
||||
*/
|
||||
|
||||
// Rev. F K8 supports has SYSCFG_MSR_TOM2WB and dont need
|
||||
// Rev. F K8 supports has SYSCFG_MSR_TOM2WB and doesn't need
|
||||
// variable MTRR to span memory above 4GB
|
||||
// Lower revisions K8 need variable MTRR over 4GB
|
||||
x86_setup_var_mtrrs(address_bits, has_tom2wb ? 0 : 1);
|
||||
|
|
|
@ -95,10 +95,10 @@ u32 get_apicid_base(u32 ioapic_num)
|
|||
nb_cfg_54 = read_nb_cfg_54();
|
||||
|
||||
|
||||
//contruct apicid_base
|
||||
//Construct apicid_base
|
||||
|
||||
if((!disable_siblings) && (siblings>0) ) {
|
||||
/* for 8 way dual core, we will used up apicid 16:16, actualy
|
||||
/* for 8 way dual core, we will used up apicid 16:16, actually
|
||||
16 is not allowed by current kernel and the kernel will try
|
||||
to get one that is small than 16 to make io apic work. I don't
|
||||
know when the kernel can support 256 apic id.
|
||||
|
|
|
@ -169,7 +169,7 @@ void smm_initiate_relocation(void);
|
|||
void smm_initiate_relocation_parallel(void);
|
||||
struct bus;
|
||||
void bsp_init_and_start_aps(struct bus *cpu_bus);
|
||||
/* Returns 0 on succes. < 0 on failure. */
|
||||
/* Returns 0 on success. < 0 on failure. */
|
||||
int setup_ap_init(struct bus *cpu_bus, int *max_cpus,
|
||||
const void *microcode_patch);
|
||||
/* Returns 0 on success, < 0 on failure. */
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
#include "chip.h"
|
||||
|
||||
/*
|
||||
* List of suported C-states in this processor
|
||||
* List of supported C-states in this processor
|
||||
*
|
||||
* Latencies are typical worst-case package exit time in uS
|
||||
* taken from the SandyBridge BIOS specification.
|
||||
|
@ -324,7 +324,7 @@ static void configure_thermal_target(void)
|
|||
return;
|
||||
conf = lapic->chip_info;
|
||||
|
||||
/* Set TCC activaiton offset if supported */
|
||||
/* Set TCC activation offset if supported */
|
||||
msr = rdmsr(MSR_PLATFORM_INFO);
|
||||
if ((msr.lo & (1 << 30)) && conf->tcc_offset) {
|
||||
msr = rdmsr(MSR_TEMPERATURE_TARGET);
|
||||
|
@ -508,8 +508,8 @@ void bsp_init_and_start_aps(struct bus *cpu_bus)
|
|||
int num_aps;
|
||||
const void *microcode_patch;
|
||||
|
||||
/* Perform any necesarry BSP initialization before APs are brought up.
|
||||
* This call alos allows the BSP to prepare for any secondary effects
|
||||
/* Perform any necessary BSP initialization before APs are brought up.
|
||||
* This call also allows the BSP to prepare for any secondary effects
|
||||
* from calling cpu_initialize() such as smm_init(). */
|
||||
bsp_init_before_ap_bringup(cpu_bus);
|
||||
|
||||
|
@ -529,7 +529,7 @@ void bsp_init_and_start_aps(struct bus *cpu_bus)
|
|||
}
|
||||
|
||||
if (smm_initialize()) {
|
||||
printk(BIOS_CRIT, "SMM Initialiazation failed...\n");
|
||||
printk(BIOS_CRIT, "SMM Initialization failed...\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ static device_t cpu_devs[CONFIG_MAX_CPUS];
|
|||
static atomic_t num_aps;
|
||||
/* Number of APs that have relocated their SMM handler. */
|
||||
static atomic_t num_aps_relocated_smm;
|
||||
/* Barrier to stop APs from performing SMM relcoation. */
|
||||
/* Barrier to stop APs from performing SMM relocation. */
|
||||
static int smm_relocation_barrier_begin __attribute__ ((aligned (64)));
|
||||
/* Determine if hyperthreading is disabled. */
|
||||
int ht_disabled;
|
||||
|
@ -145,7 +145,7 @@ void release_aps_for_smm_relocation(int do_parallel)
|
|||
|
||||
/* The mtrr code sets up ROM caching on the BSP, but not the others. However,
|
||||
* the boot loader payload disables this. In order for Linux not to complain
|
||||
* ensure the caching is disabled for tha APs before going to sleep. */
|
||||
* ensure the caching is disabled for the APs before going to sleep. */
|
||||
static void cleanup_rom_caching(void)
|
||||
{
|
||||
x86_mtrr_disable_rom_caching();
|
||||
|
@ -178,7 +178,7 @@ static void asmlinkage ap_init(unsigned int cpu, void *microcode_ptr)
|
|||
|
||||
ap_initiate_smm_relocation();
|
||||
|
||||
/* Indicate that SMM relocation has occured on this thread. */
|
||||
/* Indicate that SMM relocation has occurred on this thread. */
|
||||
atomic_inc(&num_aps_relocated_smm);
|
||||
|
||||
/* After SMM relocation a 2nd microcode load is required. */
|
||||
|
@ -401,7 +401,7 @@ static int allocate_cpu_devices(struct bus *cpu_bus, int *total_hw_threads)
|
|||
/* Allocate the new cpu device structure */
|
||||
new = alloc_find_dev(cpu_bus, &cpu_path);
|
||||
if (new == NULL) {
|
||||
printk(BIOS_CRIT, "Could not allocte cpu device\n");
|
||||
printk(BIOS_CRIT, "Could not allocate cpu device\n");
|
||||
max_cpus--;
|
||||
}
|
||||
cpu_devs[i] = new;
|
||||
|
|
|
@ -348,7 +348,7 @@ static void setup_ied_area(struct smm_relocation_params *params)
|
|||
memset(ied_base + (1 << 20), 0, (32 << 10));
|
||||
|
||||
/* According to the BWG MP init section 2MiB of memory at IEDBASE +
|
||||
* 2MiB should be zeroed as well. However, I suspect what is inteneded
|
||||
* 2MiB should be zeroed as well. However, I suspect what is intended
|
||||
* is to clear the memory covered by EMRR. TODO(adurbin): figure out if * this is really required. */
|
||||
//memset(ied_base + (2 << 20), 0, (2 << 20));
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
#include "chip.h"
|
||||
|
||||
/*
|
||||
* List of suported C-states in this processor
|
||||
* List of supported C-states in this processor
|
||||
*
|
||||
* Latencies are typical worst-case package exit time in uS
|
||||
* taken from the SandyBridge BIOS specification.
|
||||
|
@ -249,7 +249,7 @@ static void configure_thermal_target(void)
|
|||
return;
|
||||
conf = lapic->chip_info;
|
||||
|
||||
/* Set TCC activaiton offset if supported */
|
||||
/* Set TCC activation offset if supported */
|
||||
msr = rdmsr(MSR_PLATFORM_INFO);
|
||||
if ((msr.lo & (1 << 30)) && conf->tcc_offset) {
|
||||
msr = rdmsr(MSR_TEMPERATURE_TARGET);
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
#include "chip.h"
|
||||
|
||||
/*
|
||||
* List of suported C-states in this processor
|
||||
* List of supported C-states in this processor
|
||||
*
|
||||
* Latencies are typical worst-case package exit time in uS
|
||||
* taken from the SandyBridge BIOS specification.
|
||||
|
@ -374,7 +374,7 @@ static void configure_thermal_target(void)
|
|||
return;
|
||||
conf = lapic->chip_info;
|
||||
|
||||
/* Set TCC activaiton offset if supported */
|
||||
/* Set TCC activation offset if supported */
|
||||
msr = rdmsr(MSR_PLATFORM_INFO);
|
||||
if ((msr.lo & (1 << 30)) && conf->tcc_offset) {
|
||||
msr = rdmsr(MSR_TEMPERATURE_TARGET);
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
*/
|
||||
|
||||
/* This code is ported from coreboot v1.
|
||||
* The L2 cache initalization sequence here only apply to SECC/SECC2 P6 family
|
||||
* The L2 cache initialization sequence here only apply to SECC/SECC2 P6 family
|
||||
* CPUs with Klamath (63x), Deschutes (65x) and Katmai (67x) cores.
|
||||
* It is not required for Coppermine (68x) and Tualatin (6bx) cores.
|
||||
* It is currently not known if Celerons with Mendocino (66x) core require the
|
||||
|
@ -295,7 +295,7 @@ int write_l2(u32 address, u32 data)
|
|||
// data1 = ffffffff
|
||||
// data2 = 000000dc
|
||||
// address = 00aaaaaa
|
||||
// Final address signalled:
|
||||
// Final address signaled:
|
||||
// 000fffff fff000c0 000dcaaa aaa00000
|
||||
data1 = data & 0xff;
|
||||
data1 = data1 << 21;
|
||||
|
@ -343,7 +343,7 @@ int test_l2_address_alias(u32 address1, u32 address2,
|
|||
|
||||
/* Calculates the L2 cache size.
|
||||
*
|
||||
* Reference: Intel(R) 64 and IA-32 Architectures Software Developer’s Manual
|
||||
* Reference: Intel(R) 64 and IA-32 Architectures Software Developer<EFBFBD>s Manual
|
||||
* Volume 3B: System Programming Guide, Part 2, Intel pub. 253669, pg. B-172.
|
||||
*
|
||||
*/
|
||||
|
|
|
@ -72,7 +72,7 @@ unsigned long clock_get_periph_rate(enum periph_id peripheral);
|
|||
#define MCT_HZ 24000000
|
||||
|
||||
/*
|
||||
* Set mshci controller instances clock drivder
|
||||
* Set mshci controller instances clock divider
|
||||
*
|
||||
* @param enum periph_id instance of the mshci controller
|
||||
*
|
||||
|
@ -521,7 +521,7 @@ struct exynos5_mct_regs {
|
|||
};
|
||||
|
||||
#define EXYNOS5_EPLLCON0_LOCKED_SHIFT 29 /* EPLL Locked bit position*/
|
||||
#define EPLL_SRC_CLOCK 24000000 /*24 MHz Cristal Input */
|
||||
#define EPLL_SRC_CLOCK 24000000 /*24 MHz Crystal Input */
|
||||
#define TIMEOUT_EPLL_LOCK 1000
|
||||
|
||||
#define AUDIO_0_RATIO_MASK 0x0f
|
||||
|
|
|
@ -160,7 +160,7 @@ static struct clk_bit_info clk_bit_info[PERIPH_ID_COUNT] = {
|
|||
{24, 1, 20, -1}, /* PERIPH_ID_SATA */
|
||||
};
|
||||
|
||||
/* Epll Clock division values to achive different frequency output */
|
||||
/* Epll Clock division values to achieve different frequency output */
|
||||
static struct st_epll_con_val epll_div[] = {
|
||||
{ 192000000, 0, 48, 3, 1, 0 },
|
||||
{ 180000000, 0, 45, 3, 1, 0 },
|
||||
|
@ -405,7 +405,7 @@ void clock_ll_set_pre_ratio(enum periph_id periph_id, unsigned divisor)
|
|||
u32 *reg;
|
||||
|
||||
/*
|
||||
* For now we only handle a very small subset of peipherals here.
|
||||
* For now we only handle a very small subset of peripherals here.
|
||||
* Others will need to (and do) mangle the clock registers
|
||||
* themselves, At some point it is hoped that this function can work
|
||||
* from a table or calculated register offset / mask. For now this
|
||||
|
@ -636,7 +636,7 @@ int clock_epll_set_rate(unsigned long rate)
|
|||
epll_con |= epll_div[i].s_div << EPLL_CON0_SDIV_SHIFT;
|
||||
|
||||
/*
|
||||
* Required period ( in cycles) to genarate a stable clock output.
|
||||
* Required period ( in cycles) to generate a stable clock output.
|
||||
* The maximum clock time can be up to 3000 * PDIV cycles of PLLs
|
||||
* frequency input (as per spec)
|
||||
*/
|
||||
|
@ -675,7 +675,7 @@ int clock_set_i2s_clk_prescaler(unsigned int src_frq, unsigned int dst_frq)
|
|||
unsigned int div ;
|
||||
|
||||
if ((dst_frq == 0) || (src_frq == 0)) {
|
||||
printk(BIOS_DEBUG, "%s: Invalid requency input for prescaler\n", __func__);
|
||||
printk(BIOS_DEBUG, "%s: Invalid frequency input for prescaler\n", __func__);
|
||||
printk(BIOS_DEBUG, "src frq = %d des frq = %d ", src_frq, dst_frq);
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -107,7 +107,7 @@ static void exynos_displayport_init(device_t dev)
|
|||
*
|
||||
* Note: We may want to do something clever to ensure the framebuffer
|
||||
* region is aligned such that we don't change dcache policy for other
|
||||
* stuff inadvertantly.
|
||||
* stuff inadvertently.
|
||||
*/
|
||||
uint32_t lower = ALIGN_DOWN(lcdbase, MiB);
|
||||
uint32_t upper = ALIGN_UP(lcdbase + fb_size, MiB);
|
||||
|
|
|
@ -328,7 +328,7 @@ struct mem_timings {
|
|||
uint8_t chips_per_channel; /* number of chips per channel */
|
||||
uint8_t chips_to_configure; /* number of chips to configure */
|
||||
uint8_t send_zq_init; /* 1 to send this command */
|
||||
unsigned int impedance; /* drive strength impedeance */
|
||||
unsigned int impedance; /* drive strength impedance */
|
||||
uint8_t gate_leveling_enable; /* check gate leveling is enabled */
|
||||
};
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ int dmc_config_zq(struct mem_timings *mem,
|
|||
val &= ~ZQ_MANUAL_STR;
|
||||
|
||||
/*
|
||||
* Since we are manaully calibrating the ZQ values,
|
||||
* Since we are manually calibrating the ZQ values,
|
||||
* we are looping for the ZQ_init to complete.
|
||||
*/
|
||||
i = ZQ_INIT_TIMEOUT;
|
||||
|
@ -97,12 +97,12 @@ void update_reset_dll(struct exynos5_dmc *dmc, enum ddr_mode mode)
|
|||
writel(val, &dmc->phycontrol0);
|
||||
}
|
||||
|
||||
/* Update DLL Information: Force DLL Resyncronization */
|
||||
/* Update DLL Information: Force DLL Resynchronization */
|
||||
val = readl(&dmc->phycontrol0);
|
||||
val |= FP_RSYNC;
|
||||
writel(val, &dmc->phycontrol0);
|
||||
|
||||
/* Reset Force DLL Resyncronization */
|
||||
/* Reset Force DLL Resynchronization */
|
||||
val = readl(&dmc->phycontrol0);
|
||||
val &= ~FP_RSYNC;
|
||||
writel(val, &dmc->phycontrol0);
|
||||
|
|
|
@ -139,9 +139,9 @@ int ddr3_mem_ctrl_init(struct mem_timings *mem, unsigned long mem_iv_size,
|
|||
writel(mem->concontrol | (mem->rd_fetch << CONCONTROL_RD_FETCH_SHIFT),
|
||||
&dmc->concontrol);
|
||||
|
||||
/* Memory Channel Inteleaving Size */
|
||||
/* Memory Channel Interleaving Size */
|
||||
printk(BIOS_SPEW, "ddr3_mem_ctrl_init: "
|
||||
"Memory Channel Inteleaving Size\n");
|
||||
"Memory Channel Interleaving Size\n");
|
||||
writel(mem->iv_size, &dmc->ivcontrol);
|
||||
|
||||
/* Set DMC MEMCONTROL register */
|
||||
|
@ -161,7 +161,7 @@ int ddr3_mem_ctrl_init(struct mem_timings *mem, unsigned long mem_iv_size,
|
|||
|
||||
/* Power Down mode Configuration */
|
||||
printk(BIOS_SPEW, "ddr3_mem_ctrl_init: "
|
||||
"Power Down mode Configuraation\n");
|
||||
"Power Down mode Configuration\n");
|
||||
writel(mem->dpwrdn_cyc << PWRDNCONFIG_DPWRDN_CYC_SHIFT |
|
||||
mem->dsref_cyc << PWRDNCONFIG_DSREF_CYC_SHIFT,
|
||||
&dmc->pwrdnconfig);
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#define MAX_CR_LOOP 5
|
||||
#define MAX_EQ_LOOP 4
|
||||
|
||||
/* Link tare type */
|
||||
/* Link rate type */
|
||||
enum link_rate {
|
||||
LINK_RATE_1_62GBPS = 0x06,
|
||||
LINK_RATE_2_70GBPS = 0x0a
|
||||
|
@ -126,7 +126,7 @@ struct s5p_dp_device {
|
|||
*/
|
||||
void s5p_dp_reset(struct s5p_dp_device *dp);
|
||||
/*
|
||||
* Initialize DP to recieve video stream
|
||||
* Initialize DP to receive video stream
|
||||
*
|
||||
* param dp pointer to main s5p-dp structure
|
||||
*/
|
||||
|
@ -214,8 +214,8 @@ int s5p_dp_is_slave_video_stream_clock_on(struct s5p_dp_device *dp);
|
|||
*
|
||||
* param dp pointer to main s5p-dp structure
|
||||
* param type clock_recovery_m_value_type
|
||||
* param m_value to caluculate m_vid value
|
||||
* param n_value to caluculate n_vid value
|
||||
* param m_value to calculate m_vid value
|
||||
* param n_value to calculate n_vid value
|
||||
*/
|
||||
void s5p_dp_set_video_cr_mn(struct s5p_dp_device *dp,
|
||||
enum clock_recovery_m_value_type type,
|
||||
|
|
|
@ -91,7 +91,7 @@ void s5p_dp_reset(struct s5p_dp_device *dp)
|
|||
/* Set interrupt pin assertion polarity as high */
|
||||
writel(INT_POL0 | INT_POL1, &base->int_ctl);
|
||||
|
||||
/* Clear pending regisers */
|
||||
/* Clear pending registers */
|
||||
writel(0xff, &base->common_int_sta_1);
|
||||
writel(0x4f, &base->common_int_sta_2);
|
||||
writel(0xe0, &base->common_int_sta_3);
|
||||
|
@ -156,7 +156,7 @@ void s5p_dp_init_aux(struct s5p_dp_device *dp)
|
|||
u32 reg;
|
||||
struct exynos5_dp *base = dp->base;
|
||||
|
||||
/* Clear inerrupts related to AUX channel */
|
||||
/* Clear interrupts related to AUX channel */
|
||||
reg = RPLY_RECEIV | AUX_ERR;
|
||||
writel(reg, &base->dp_int_sta);
|
||||
|
||||
|
|
|
@ -86,11 +86,11 @@ struct exynos5_fimd_panel {
|
|||
unsigned int upper_margin; /* Vertical Backporch */
|
||||
unsigned int lower_margin; /* Vertical frontporch */
|
||||
unsigned int vsync; /* Vertical Sync Pulse Width */
|
||||
unsigned int left_margin; /* Horizantal Backporch */
|
||||
unsigned int left_margin; /* Horizontal Backporch */
|
||||
unsigned int right_margin; /* Horizontal Frontporch */
|
||||
unsigned int hsync; /* Horizontal Sync Pulse Width */
|
||||
unsigned int xres; /* X Resolution */
|
||||
unsigned int yres; /* Y Resopultion */
|
||||
unsigned int yres; /* Y Resolution */
|
||||
};
|
||||
|
||||
/* LCDIF Register Map */
|
||||
|
|
|
@ -94,7 +94,7 @@ void exynos_pinmux_sdmmc2(void)
|
|||
void exynos_pinmux_sdmmc3(void)
|
||||
{
|
||||
/*
|
||||
* TODO: Need to add defintions for GPC4 before
|
||||
* TODO: Need to add definitions for GPC4 before
|
||||
* enabling this.
|
||||
*/
|
||||
printk(BIOS_DEBUG, "SDMMC3 not supported yet");
|
||||
|
|
|
@ -661,7 +661,7 @@ struct exynos5_phy_control;
|
|||
#define MEM_TERM_EN (1 << 31) /* Termination enable for memory */
|
||||
#define PHY_TERM_EN (1 << 30) /* Termination enable for PHY */
|
||||
#define DMC_CTRL_SHGATE (1 << 29) /* Duration of DQS gating signal */
|
||||
#define FP_RSYNC (1 << 3) /* Force DLL resyncronization */
|
||||
#define FP_RSYNC (1 << 3) /* Force DLL resynchronization */
|
||||
|
||||
/* Driver strength for CK, CKE, CS & CA */
|
||||
#define IMP_OUTPUT_DRV_40_OHM 0x5
|
||||
|
@ -676,7 +676,7 @@ struct exynos5_phy_control;
|
|||
|
||||
struct mem_timings;
|
||||
|
||||
/* Errors that we can encourter in low-level setup */
|
||||
/* Errors that we can encounter in low-level setup */
|
||||
enum {
|
||||
SETUP_ERR_OK,
|
||||
SETUP_ERR_RDLV_COMPLETE_TIMEOUT = -1,
|
||||
|
|
|
@ -42,7 +42,7 @@ static void exynos_spi_rx_tx(struct exynos_spi *regs, int todo,
|
|||
unsigned int *rxp = (unsigned int *)(dinp + (i * (32 * 1024)));
|
||||
unsigned int out_bytes, in_bytes;
|
||||
|
||||
// TODO In currrent implementation, every read/write must be aligned to
|
||||
// TODO In current implementation, every read/write must be aligned to
|
||||
// 4 bytes, otherwise you may get timeout or other unexpected results.
|
||||
ASSERT(todo % 4 == 0);
|
||||
|
||||
|
@ -90,7 +90,7 @@ int exynos_spi_open(struct exynos_spi *regs)
|
|||
SPI_MODE_CH_WIDTH_WORD | SPI_MODE_BUS_WIDTH_WORD);
|
||||
clrbits_le32(®s->ch_cfg, SPI_CH_CPOL_L); /* CPOL: active high */
|
||||
|
||||
/* clear rx and tx channel if set priveously */
|
||||
/* clear rx and tx channel if set previously */
|
||||
clrbits_le32(®s->ch_cfg, SPI_RX_CH_ON | SPI_TX_CH_ON);
|
||||
|
||||
setbits_le32(®s->swap_cfg,
|
||||
|
|
|
@ -69,8 +69,8 @@ struct tmu_info exynos5250_tmu_info = {
|
|||
|
||||
/*
|
||||
* After reading temperature code from register, compensating
|
||||
* its value and calculating celsius temperatue,
|
||||
* get current temperatue.
|
||||
* its value and calculating celsius temperature,
|
||||
* get current temperature.
|
||||
*
|
||||
* @return current temperature of the chip as sensed by TMU
|
||||
*/
|
||||
|
|
|
@ -65,7 +65,7 @@ enum tmu_status_t {
|
|||
TMU_STATUS_TRIPPED,
|
||||
};
|
||||
|
||||
/* Tmeperature threshold values for various thermal events */
|
||||
/* Temperature threshold values for various thermal events */
|
||||
struct temperature_params {
|
||||
/* minimum value in temperature code range */
|
||||
unsigned int min_val;
|
||||
|
|
|
@ -37,7 +37,7 @@ static uint32_t base_port = CONFIG_CONSOLE_SERIAL_UART_ADDRESS;
|
|||
* The coefficient, used to calculate the baudrate on S5P UARTs is
|
||||
* calculated as
|
||||
* C = UBRDIV * 16 + number_of_set_bits_in_UDIVSLOT
|
||||
* however, section 31.6.11 of the datasheet doesn't recomment using 1 for 1,
|
||||
* however, section 31.6.11 of the datasheet doesn't recommend using 1 for 1,
|
||||
* 3 for 2, ... (2^n - 1) for n, instead, they suggest using these constants:
|
||||
*/
|
||||
static const int udivslot[] = {
|
||||
|
@ -129,7 +129,7 @@ static int exynos5_uart_err_check(int op)
|
|||
|
||||
/*
|
||||
* Read a single byte from the serial port. Returns 1 on success, 0
|
||||
* otherwise. When the function is succesfull, the character read is
|
||||
* otherwise. When the function is successful, the character read is
|
||||
* written into its argument c.
|
||||
*/
|
||||
static unsigned char exynos5_uart_rx_byte(void)
|
||||
|
|
|
@ -147,7 +147,7 @@ static void am335x_uart_init_dev(void)
|
|||
|
||||
/*
|
||||
* Read a single byte from the serial port. Returns 1 on success, 0
|
||||
* otherwise. When the function is succesfull, the character read is
|
||||
* otherwise. When the function is successful, the character read is
|
||||
* written into its argument c.
|
||||
*/
|
||||
static unsigned char am335x_uart_rx_byte(void)
|
||||
|
|
|
@ -130,7 +130,7 @@ struct am335x_uart {
|
|||
uint8_t rsvd_0x36[2];
|
||||
uint16_t blr; /* BOF control */
|
||||
uint8_t rsvd_0x3a[2];
|
||||
uint16_t acreg; /* auxilliary control */
|
||||
uint16_t acreg; /* auxiliary control */
|
||||
uint8_t rsvd_0x3e[2];
|
||||
|
||||
/* 0x40 */
|
||||
|
@ -139,7 +139,7 @@ struct am335x_uart {
|
|||
uint16_t ssr; /* supplementary status */
|
||||
uint8_t rsvd_0x46[2];
|
||||
|
||||
uint16_t eblr; /* BOF length (operatoinal mode only) */
|
||||
uint16_t eblr; /* BOF length (operational mode only) */
|
||||
uint8_t rsvd_0x4a[6];
|
||||
|
||||
/* 0x50 */
|
||||
|
|
|
@ -37,7 +37,7 @@ static ucode_update_status nano_apply_ucode(const nano_ucode_header *ucode)
|
|||
msr.hi = 0;
|
||||
wrmsr(MSR_IA32_BIOS_UPDT_TRIG, msr);
|
||||
|
||||
/* Let's see if we updated succesfully */
|
||||
/* Let's see if we updated successfully */
|
||||
msr = rdmsr(MSR_UCODE_UPDATE_STATUS);
|
||||
|
||||
return msr.lo & 0x07;
|
||||
|
@ -80,7 +80,7 @@ static void nano_print_ucode_status(ucode_update_status stat)
|
|||
switch(stat)
|
||||
{
|
||||
case UCODE_UPDATE_SUCCESS:
|
||||
printk(BIOS_INFO, "Microcode update succesful.\n");
|
||||
printk(BIOS_INFO, "Microcode update successful.\n");
|
||||
break;
|
||||
case UCODE_UPDATE_FAIL:
|
||||
printk(BIOS_ALERT, "Microcode update failed, bad environment."
|
||||
|
|
|
@ -56,11 +56,11 @@ typedef struct {
|
|||
u32 applicable_fms; /* Fam/model/stepping to which ucode applies */
|
||||
u32 checksum; /* Two's complement checksum of ucode+header */
|
||||
u32 loader_revision; /* Revision of hardware ucode update loader*/
|
||||
u32 rfu_1; /* Reservod for future use */
|
||||
u32 rfu_1; /* Reserved for future use */
|
||||
u32 payload_size; /* Size of the ucode payload only */
|
||||
u32 total_size; /* Size of the ucode, including header */
|
||||
char name[8]; /* ASCII string of ucode filename */
|
||||
u32 rfu_2; /* Reservod for future use */
|
||||
u32 rfu_2; /* Reserved for future use */
|
||||
/* First double-word of the ucode payload
|
||||
* Its address represents the beginning of the ucode update we need to
|
||||
* send to the CPU */
|
||||
|
|
|
@ -33,7 +33,7 @@ extern char _car_data_end[];
|
|||
|
||||
/*
|
||||
* The car_migrated global variable determines if the cache-as-ram space has
|
||||
* been migrated to real RAM. It does this by asumming the following things:
|
||||
* been migrated to real RAM. It does this by assuming the following things:
|
||||
* 1. cache-as-ram space is zero'd out once it is set up.
|
||||
* 2. Either the cache-as-ram space is memory-backed after getting torn down
|
||||
* or the space returns 0xff's for each byte read.
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
* will return 0, meaning no CPU.
|
||||
*
|
||||
* We actually handling that case by noting which cpus startup
|
||||
* and not telling anyone about the ones that dont.
|
||||
* and not telling anyone about the ones that don't.
|
||||
*/
|
||||
|
||||
/* Start-UP IPI vector must be 4kB aligned and below 1MB. */
|
||||
|
|
|
@ -32,7 +32,7 @@ static void cache_ramstage(void)
|
|||
const int addr_det = 0;
|
||||
|
||||
/* the fixed and variable MTTRs are power-up with random values,
|
||||
* clear them to MTRR_TYPE_UNCACHEABLE for safty.
|
||||
* clear them to MTRR_TYPE_UNCACHEABLE for safety.
|
||||
*/
|
||||
static void do_early_mtrr_init(const unsigned long *mtrr_msrs)
|
||||
{
|
||||
|
@ -43,7 +43,7 @@ static void do_early_mtrr_init(const unsigned long *mtrr_msrs)
|
|||
msr_t msr;
|
||||
const unsigned long *msr_addr;
|
||||
|
||||
/* Inialize all of the relevant msrs to 0 */
|
||||
/* Initialize all of the relevant msrs to 0 */
|
||||
msr.lo = 0;
|
||||
msr.hi = 0;
|
||||
unsigned long msr_nr;
|
||||
|
|
|
@ -94,7 +94,7 @@ static inline unsigned int fms(unsigned int x)
|
|||
return r;
|
||||
}
|
||||
|
||||
/* fls: find least sigificant bit set */
|
||||
/* fls: find least significant bit set */
|
||||
static inline unsigned int fls(unsigned int x)
|
||||
{
|
||||
int r;
|
||||
|
@ -160,8 +160,8 @@ static struct memranges *get_physical_address_space(void)
|
|||
static struct memranges addr_space_storage;
|
||||
|
||||
/* In order to handle some chipsets not being able to pre-determine
|
||||
* uncacheable ranges, such as graphics memory, at resource inseration
|
||||
* time remove unacheable regions from the cacheable ones. */
|
||||
* uncacheable ranges, such as graphics memory, at resource insertion
|
||||
* time remove uncacheable regions from the cacheable ones. */
|
||||
if (addr_space == NULL) {
|
||||
struct range_entry *r;
|
||||
unsigned long mask;
|
||||
|
@ -216,7 +216,7 @@ static struct memranges *get_physical_address_space(void)
|
|||
}
|
||||
|
||||
/* Fixed MTRR descriptor. This structure defines the step size and begin
|
||||
* and end (exclusive) address covered by a set of fixe MTRR MSRs.
|
||||
* and end (exclusive) address covered by a set of fixed MTRR MSRs.
|
||||
* It also describes the offset in byte intervals to store the calculated MTRR
|
||||
* type in an array. */
|
||||
struct fixed_mtrr_desc {
|
||||
|
@ -533,7 +533,7 @@ static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
|
|||
struct range_entry *next;
|
||||
|
||||
/*
|
||||
* Determine MTRRs based on the following algoirthm for the given entry:
|
||||
* Determine MTRRs based on the following algorithm for the given entry:
|
||||
* +------------------+ b2 = ALIGN_UP(end)
|
||||
* | 0 or more bytes | <-- hole is carved out between b1 and b2
|
||||
* +------------------+ a2 = b1 = end
|
||||
|
@ -571,7 +571,7 @@ static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
|
|||
|
||||
b1 = a2;
|
||||
|
||||
/* First check if a1 is >= 4GiB and the current etnry is the last
|
||||
/* First check if a1 is >= 4GiB and the current entry is the last
|
||||
* entry. If so perform an optimization of covering a larger range
|
||||
* defined by the base address' alignment. */
|
||||
if (a1 >= RANGE_4GB && next == NULL) {
|
||||
|
@ -686,10 +686,10 @@ static int calc_var_mtrrs(struct memranges *addr_space,
|
|||
* 1. UC as default type with no holes at top of range.
|
||||
* 2. UC as default using holes at top of range.
|
||||
* 3. WB as default.
|
||||
* The lowest count is then used as default after totalling all
|
||||
* MTRRs. Note that the optimal algoirthm for UC default is marked in
|
||||
* The lowest count is then used as default after totaling all
|
||||
* MTRRs. Note that the optimal algorithm for UC default is marked in
|
||||
* the tag of each range regardless of final decision. UC takes
|
||||
* precedence in the MTRR archiecture. Therefore, only holes can be
|
||||
* precedence in the MTRR architecture. Therefore, only holes can be
|
||||
* used when the type of the region is MTRR_TYPE_WRBACK with
|
||||
* MTRR_TYPE_UNCACHEABLE as the default type.
|
||||
*/
|
||||
|
|
|
@ -24,16 +24,16 @@
|
|||
#include <console/console.h>
|
||||
|
||||
/*
|
||||
* Compoments that make up the SMRAM:
|
||||
* Components that make up the SMRAM:
|
||||
* 1. Save state - the total save state memory used
|
||||
* 2. Stack - stacks for the CPUs in the SMM handler
|
||||
* 3. Stub - SMM stub code for calling into handler
|
||||
* 4. Handler - C-based SMM handler.
|
||||
*
|
||||
* The compoents are assumed to consist of one consecutive region.
|
||||
* The components are assumed to consist of one consecutive region.
|
||||
*/
|
||||
|
||||
/* These paramters are used by the SMM stub code. A pointer to the params
|
||||
/* These parameters are used by the SMM stub code. A pointer to the params
|
||||
* is also passed to the C-base handler. */
|
||||
struct smm_stub_params {
|
||||
u32 stack_size;
|
||||
|
@ -80,7 +80,7 @@ static void smm_place_jmp_instructions(void *entry_start, int stride, int num,
|
|||
|
||||
/* Each entry point has an IP value of 0x8000. The SMBASE for each
|
||||
* cpu is different so the effective address of the entry instruction
|
||||
* is different. Therefore, the relative displacment for each entry
|
||||
* is different. Therefore, the relative displacement for each entry
|
||||
* instruction needs to be updated to reflect the current effective
|
||||
* IP. Additionally, the IP result from the jmp instruction is
|
||||
* calculated using the next instruction's address so the size of
|
||||
|
@ -140,7 +140,7 @@ static void smm_stub_place_staggered_entry_points(char *base,
|
|||
stub_entry_offset = rmodule_entry_offset(smm_stub);
|
||||
|
||||
/* If there are staggered entry points or the stub is not located
|
||||
* at the SMM entry point then jmp instructionss need to be placed. */
|
||||
* at the SMM entry point then jmp instructions need to be placed. */
|
||||
if (params->num_concurrent_save_states > 1 || stub_entry_offset != 0) {
|
||||
int num_entries;
|
||||
|
||||
|
@ -297,7 +297,7 @@ int smm_setup_relocation_handler(struct smm_loader_params *params)
|
|||
return -1;
|
||||
|
||||
/* Since the relocation handler always uses stack, adjust the number
|
||||
* of conccurent stack users to be CONFIG_MAX_CPUS. */
|
||||
* of concurrent stack users to be CONFIG_MAX_CPUS. */
|
||||
if (params->num_concurrent_stacks == 0)
|
||||
params->num_concurrent_stacks = CONFIG_MAX_CPUS;
|
||||
|
||||
|
@ -318,7 +318,7 @@ int smm_setup_relocation_handler(struct smm_loader_params *params)
|
|||
*
|
||||
* It should be noted that this algorithm will not work for
|
||||
* SMM_DEFAULT_SIZE SMRAM regions such as the A segment. This algorithm
|
||||
* expectes a region large enough to encompass the handler and stacks
|
||||
* expects a region large enough to encompass the handler and stacks
|
||||
* as well as the SMM_DEFAULT_SIZE.
|
||||
*/
|
||||
int smm_load_module(void *smram, int size, struct smm_loader_params *params)
|
||||
|
|
Loading…
Reference in New Issue