cpu/intel: Wrap lines at 80 columns
Fix the following warning detected by checkpatch.pl: WARNING: line over 80 characters TEST=Build and run on Galileo Gen2 Change-Id: I74f25da5c53bd518189ce86817d6e3385b29c3b4 Signed-off-by: Lee Leahy <Leroy.P.Leahy@intel.com> Reviewed-on: https://review.coreboot.org/18850 Tested-by: build bot (Jenkins) Reviewed-by: Martin Roth <martinroth@google.com>
This commit is contained in:
parent
26eeb0f8ad
commit
cdc50480c4
|
@ -40,7 +40,8 @@ void set_vmx(void)
|
|||
msr = rdmsr(IA32_FEATURE_CONTROL);
|
||||
|
||||
if (msr.lo & (1 << 0)) {
|
||||
printk(BIOS_ERR, "VMX is locked, so %s will do nothing\n", __func__);
|
||||
printk(BIOS_ERR, "VMX is locked, so %s will do nothing\n",
|
||||
__func__);
|
||||
/* VMX locked. If we set it again we get an illegal
|
||||
* instruction
|
||||
*/
|
||||
|
@ -66,6 +67,7 @@ void set_vmx(void)
|
|||
wrmsr(IA32_FEATURE_CONTROL, msr);
|
||||
}
|
||||
|
||||
printk(BIOS_DEBUG, "VMX status: %s, %s\n", enable ? "enabled" : "disabled",
|
||||
printk(BIOS_DEBUG, "VMX status: %s, %s\n",
|
||||
enable ? "enabled" : "disabled",
|
||||
lock ? "locked" : "unlocked");
|
||||
}
|
||||
|
|
|
@ -33,11 +33,13 @@ static void check_for_warm_reset(void)
|
|||
{
|
||||
|
||||
/*
|
||||
* Check if INIT# is asserted by port 0xCF9 and whether RCBA has been set.
|
||||
* If either is true, then this is a warm reset so execute a Hard Reset
|
||||
* Check if INIT# is asserted by port 0xCF9 and whether RCBA has been
|
||||
* set. If either is true, then this is a warm reset so execute a
|
||||
* Hard Reset
|
||||
*/
|
||||
if ((inb(0xcf9) == 0x04) ||
|
||||
(pci_io_read_config32(SOC_LPC_DEV, RCBA) & RCBA_ENABLE)) {
|
||||
(pci_io_read_config32(SOC_LPC_DEV, RCBA)
|
||||
& RCBA_ENABLE)) {
|
||||
outb(0x00, 0xcf9);
|
||||
outb(0x06, 0xcf9);
|
||||
}
|
||||
|
|
|
@ -419,7 +419,8 @@ void set_power_limits(u8 power_limit_1_time)
|
|||
u8 power_limit_1_val;
|
||||
|
||||
if (power_limit_1_time >= ARRAY_SIZE(power_limit_time_sec_to_msr))
|
||||
power_limit_1_time = ARRAY_SIZE(power_limit_time_sec_to_msr) - 1;
|
||||
power_limit_1_time = ARRAY_SIZE(power_limit_time_sec_to_msr)
|
||||
- 1;
|
||||
|
||||
if (!(msr.lo & PLATFORM_INFO_SET_TDP))
|
||||
return;
|
||||
|
|
|
@ -255,7 +255,8 @@ static void fill_in_relocation_params(struct device *dev,
|
|||
/* SMRR has 32-bits of valid address aligned to 4KiB. */
|
||||
params->smrr_base.lo = (params->smram_base & rmask) | MTRR_TYPE_WRBACK;
|
||||
params->smrr_base.hi = 0;
|
||||
params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
|
||||
params->smrr_mask.lo = (~(tseg_size - 1) & rmask)
|
||||
| MTRR_PHYS_MASK_VALID;
|
||||
params->smrr_mask.hi = 0;
|
||||
|
||||
/* The EMRR and UNCORE_EMRR are at IEDBASE + 2MiB */
|
||||
|
@ -266,7 +267,8 @@ static void fill_in_relocation_params(struct device *dev,
|
|||
* on the number of physical address bits supported. */
|
||||
params->emrr_base.lo = emrr_base | MTRR_TYPE_WRBACK;
|
||||
params->emrr_base.hi = 0;
|
||||
params->emrr_mask.lo = (~(emrr_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
|
||||
params->emrr_mask.lo = (~(emrr_size - 1) & rmask)
|
||||
| MTRR_PHYS_MASK_VALID;
|
||||
params->emrr_mask.hi = (1 << (phys_bits - 32)) - 1;
|
||||
|
||||
/* UNCORE_EMRR has 39 bits of valid address aligned to 4KiB. */
|
||||
|
|
|
@ -209,7 +209,8 @@ void intel_update_microcode_from_cbfs(void)
|
|||
}
|
||||
|
||||
#if ENV_RAMSTAGE
|
||||
__attribute__((weak)) int soc_skip_ucode_update(u32 currrent_patch_id, u32 new_patch_id)
|
||||
__attribute__((weak)) int soc_skip_ucode_update(u32 currrent_patch_id,
|
||||
u32 new_patch_id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -83,7 +83,8 @@ static void configure_c_states(const int quad)
|
|||
|
||||
/* Set Processor MWAIT IO BASE */
|
||||
msr.hi = 0;
|
||||
msr.lo = ((PMB0_BASE + 4) & 0xffff) | (((PMB1_BASE + 9) & 0xffff) << 16);
|
||||
msr.lo = ((PMB0_BASE + 4) & 0xffff) | (((PMB1_BASE + 9) & 0xffff)
|
||||
<< 16);
|
||||
wrmsr(MSR_PMG_IO_BASE_ADDR, msr);
|
||||
|
||||
/* Set IO Capture Address */
|
||||
|
|
|
@ -34,7 +34,8 @@ static void configure_c_states(void)
|
|||
|
||||
msr = rdmsr(MSR_PMG_CST_CONFIG_CONTROL);
|
||||
msr.lo |= (1 << 15); // Lock configuration
|
||||
msr.lo |= (1 << 10); // redirect IO-based CState transition requests to MWAIT
|
||||
msr.lo |= (1 << 10); // redirect IO-based CState transition requests to
|
||||
// MWAIT
|
||||
msr.lo &= ~(1 << 9); // Issue a single stop grant cycle upon stpclk
|
||||
msr.lo &= ~7; msr.lo |= HIGHEST_CLEVEL; // support at most C3
|
||||
// TODO Do we want Deep C4 and Dynamic L2 shrinking?
|
||||
|
@ -43,13 +44,15 @@ static void configure_c_states(void)
|
|||
/* Set Processor MWAIT IO BASE (P_BLK) */
|
||||
msr.hi = 0;
|
||||
// TODO Do we want PM1_BASE? Needs SMM?
|
||||
//msr.lo = ((PMB0_BASE + 4) & 0xffff) | (((PMB1_BASE + 9) & 0xffff) << 16);
|
||||
//msr.lo = ((PMB0_BASE + 4) & 0xffff) | (((PMB1_BASE + 9) & 0xffff)
|
||||
// << 16);
|
||||
msr.lo = ((PMB0_BASE + 4) & 0xffff);
|
||||
wrmsr(MSR_PMG_IO_BASE_ADDR, msr);
|
||||
|
||||
/* set C_LVL controls */
|
||||
msr.hi = 0;
|
||||
msr.lo = (PMB0_BASE + 4) | ((HIGHEST_CLEVEL - 2) << 16); // -2 because LVL0+1 aren't counted
|
||||
// -2 because LVL0+1 aren't counted
|
||||
msr.lo = (PMB0_BASE + 4) | ((HIGHEST_CLEVEL - 2) << 16);
|
||||
wrmsr(MSR_PMG_IO_CAPTURE_ADDR, msr);
|
||||
}
|
||||
|
||||
|
|
|
@ -40,8 +40,9 @@ static struct device_operations cpu_dev_ops = {
|
|||
};
|
||||
|
||||
static struct cpu_device_id cpu_table[] = {
|
||||
{ X86_VENDOR_INTEL, 0x06D0 }, /* Pentium M on 90nm with 2MiB of L2 cache */
|
||||
{ X86_VENDOR_INTEL, 0x06D6 }, /* Pentium M on 90nm with 2MiB of L2 cache */
|
||||
/* Pentium M on 90nm with 2MiB of L2 cache */
|
||||
{ X86_VENDOR_INTEL, 0x06D0 },
|
||||
{ X86_VENDOR_INTEL, 0x06D6 },
|
||||
{ 0, 0 },
|
||||
};
|
||||
|
||||
|
|
|
@ -47,12 +47,14 @@ static void configure_c_states(void)
|
|||
|
||||
/* Set Processor MWAIT IO BASE (P_BLK) */
|
||||
msr.hi = 0;
|
||||
msr.lo = ((PMB0_BASE + 4) & 0xffff) | (((PMB1_BASE + 9) & 0xffff) << 16);
|
||||
msr.lo = ((PMB0_BASE + 4) & 0xffff) | (((PMB1_BASE + 9) & 0xffff)
|
||||
<< 16);
|
||||
wrmsr(MSR_PMG_IO_BASE_ADDR, msr);
|
||||
|
||||
/* Set C_LVL controls and IO Capture Address */
|
||||
msr.hi = 0;
|
||||
msr.lo = (PMB0_BASE + 4) | ((HIGHEST_CLEVEL - 2) << 16); // -2 because LVL0+1 aren't counted
|
||||
// -2 because LVL0+1 aren't counted
|
||||
msr.lo = (PMB0_BASE + 4) | ((HIGHEST_CLEVEL - 2) << 16);
|
||||
wrmsr(MSR_PMG_IO_CAPTURE_ADDR, msr);
|
||||
}
|
||||
|
||||
|
|
|
@ -48,12 +48,14 @@ static void configure_c_states(void)
|
|||
|
||||
/* Set Processor MWAIT IO BASE (P_BLK) */
|
||||
msr.hi = 0;
|
||||
msr.lo = ((PMB0_BASE + 4) & 0xffff) | (((PMB1_BASE + 9) & 0xffff) << 16);
|
||||
msr.lo = ((PMB0_BASE + 4) & 0xffff) | (((PMB1_BASE + 9) & 0xffff)
|
||||
<< 16);
|
||||
wrmsr(MSR_PMG_IO_BASE_ADDR, msr);
|
||||
|
||||
/* Set C_LVL controls and IO Capture Address */
|
||||
msr.hi = 0;
|
||||
msr.lo = (PMB0_BASE + 4) | ((HIGHEST_CLEVEL - 2) << 16); // -2 because LVL0+1 aren't counted
|
||||
// -2 because LVL0+1 aren't counted
|
||||
msr.lo = (PMB0_BASE + 4) | ((HIGHEST_CLEVEL - 2) << 16);
|
||||
wrmsr(MSR_PMG_IO_CAPTURE_ADDR, msr);
|
||||
}
|
||||
|
||||
|
|
|
@ -169,7 +169,8 @@ int calculate_l2_latency(void)
|
|||
msr = rdmsr(EBL_CR_POWERON);
|
||||
/* Get clock multiplier and FSB frequency.
|
||||
* Multiplier is in [25:22].
|
||||
* FSB is in [19:18] in Katmai, [19] in Deschutes ([18] is zero for them).
|
||||
* FSB is in [19:18] in Katmai, [19] in Deschutes ([18] is zero
|
||||
* for them).
|
||||
*/
|
||||
eax = msr.lo >> 18;
|
||||
if (signature == 0x650) {
|
||||
|
@ -185,7 +186,9 @@ int calculate_l2_latency(void)
|
|||
for (le = latency_table; le->key != eax; le++) {
|
||||
/* Fail if we get to the end of the table */
|
||||
if (le->key == 0xff) {
|
||||
printk(BIOS_DEBUG, "Could not find key %02x in latency table\n", eax);
|
||||
printk(BIOS_DEBUG,
|
||||
"Could not find key %02x in latency table\n",
|
||||
eax);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
@ -281,7 +284,8 @@ int write_l2(u32 address, u32 data)
|
|||
} else
|
||||
v2 &= 0x7;
|
||||
|
||||
/* This write has to be replicated to a number of places. Not sure what. */
|
||||
/* This write has to be replicated to a number of places. Not sure what.
|
||||
*/
|
||||
|
||||
for (i = 0; i < v2; i++) {
|
||||
|
||||
|
@ -339,7 +343,8 @@ int test_l2_address_alias(u32 address1, u32 address2,
|
|||
/* Calculates the L2 cache size.
|
||||
*
|
||||
* Reference: Intel(R) 64 and IA-32 Architectures Software Developer's Manual
|
||||
* Volume 3B: System Programming Guide, Part 2, Intel pub. 253669, pg. B-172.
|
||||
* Volume 3B: System Programming Guide, Part 2, Intel pub. 253669,
|
||||
* pg. B-172.
|
||||
*
|
||||
*/
|
||||
int calculate_l2_cache_size(void)
|
||||
|
@ -357,8 +362,9 @@ int calculate_l2_cache_size(void)
|
|||
bblcr3 = msr.lo & ~BBLCR3_L2_SIZE;
|
||||
/*
|
||||
* Successively write in all the possible cache size per bank
|
||||
* into BBL_CR_CTL3[17:13], starting from 256KB (00001) to 4MB (10000),
|
||||
* and read the last value written and accepted by the cache.
|
||||
* into BBL_CR_CTL3[17:13], starting from 256KB (00001) to 4MB
|
||||
* (10000), and read the last value written and accepted by the
|
||||
* cache.
|
||||
*
|
||||
* No idea why these bits are writable at all.
|
||||
*/
|
||||
|
@ -386,8 +392,9 @@ int calculate_l2_cache_size(void)
|
|||
|
||||
printk(BIOS_DEBUG, "Maximum cache mask is %x\n", cache_setting);
|
||||
|
||||
/* For now, BBL_CR_CTL3 has the highest cache "size" that register
|
||||
* will accept. Now we'll ping the cache and see where it wraps.
|
||||
/* For now, BBL_CR_CTL3 has the highest cache "size" that
|
||||
* register will accept. Now we'll ping the cache and see where
|
||||
* it wraps.
|
||||
*/
|
||||
|
||||
/* Write aaaaaaaa:aaaaaaaa to address 0 in the l2 cache.
|
||||
|
@ -493,7 +500,8 @@ int calculate_l2_cache_size(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
// L2 physical address range can be found from L2 control register 3, bits [2:0].
|
||||
// L2 physical address range can be found from L2 control register 3,
|
||||
// bits [2:0].
|
||||
int calculate_l2_physical_address_range(void)
|
||||
{
|
||||
int r0, r3;
|
||||
|
@ -512,7 +520,8 @@ int calculate_l2_physical_address_range(void)
|
|||
else
|
||||
r3 &= 0x7;
|
||||
|
||||
printk(BIOS_DEBUG, "L2 Physical Address Range is %dM\n", (1 << r3) * 512);
|
||||
printk(BIOS_DEBUG, "L2 Physical Address Range is %dM\n",
|
||||
(1 << r3) * 512);
|
||||
|
||||
/* Shift into [22:20] to be saved into BBL_CR_CTL3. */
|
||||
r3 = r3 << 20;
|
||||
|
@ -659,8 +668,9 @@ int p6_configure_l2_cache(void)
|
|||
bblctl3.lo = eax;
|
||||
wrmsr(BBL_CR_CTL3, bblctl3);
|
||||
|
||||
/* Write BBL_CR_CTL3[27:26] (reserved??) to bits [1:0] of L2 register 4.
|
||||
* Apparently all other bits must be preserved, hence these code.
|
||||
/* Write BBL_CR_CTL3[27:26] (reserved??) to bits [1:0] of L2
|
||||
* register 4. Apparently all other bits must be preserved,
|
||||
* hence these code.
|
||||
*/
|
||||
|
||||
v = (calc_eax >> 26) & 0x3;
|
||||
|
@ -685,7 +695,8 @@ int p6_configure_l2_cache(void)
|
|||
/* Read L2 register 0 */
|
||||
v = read_l2(0);
|
||||
|
||||
/* If L2(0)[5] set (and can be read properly), enable CRTN and address parity
|
||||
/* If L2(0)[5] set (and can be read properly), enable CRTN and address
|
||||
* parity
|
||||
*/
|
||||
if (v >= 0 && (v & 0x20)) {
|
||||
bblctl3 = rdmsr(BBL_CR_CTL3);
|
||||
|
@ -700,7 +711,8 @@ int p6_configure_l2_cache(void)
|
|||
set_l2_ecc();
|
||||
|
||||
if (calculate_l2_physical_address_range() != 0) {
|
||||
printk(BIOS_ERR, "Failed to calculate L2 physical address range");
|
||||
printk(BIOS_ERR,
|
||||
"Failed to calculate L2 physical address range");
|
||||
goto bad;
|
||||
}
|
||||
|
||||
|
@ -738,9 +750,10 @@ int p6_configure_l2_cache(void)
|
|||
|
||||
/* Update each way */
|
||||
|
||||
/* We're supposed to get L2 associativity from BBL_CR_CTL3[10:9].
|
||||
* But this code only applies to certain members of the P6 processor family
|
||||
* and since all P6 processors have 4-way L2 cache, we can safely assume
|
||||
/* We're supposed to get L2 associativity from
|
||||
* BBL_CR_CTL3[10:9]. But this code only applies to certain
|
||||
* members of the P6 processor family and since all P6
|
||||
* processors have 4-way L2 cache, we can safely assume
|
||||
* 4 way for all cache operations.
|
||||
*/
|
||||
|
||||
|
@ -748,8 +761,10 @@ int p6_configure_l2_cache(void)
|
|||
/* Send Tag Write w/Data Write (TWW) to L2 controller
|
||||
* MESI = Invalid
|
||||
*/
|
||||
if (signal_l2(cache_size, 0, 0, v, L2CMD_TWW | L2CMD_MESI_I) != 0) {
|
||||
printk(BIOS_ERR, "Failed on signal_l2(%x, %x)\n",
|
||||
if (signal_l2(cache_size, 0, 0, v, L2CMD_TWW
|
||||
| L2CMD_MESI_I) != 0) {
|
||||
printk(BIOS_ERR,
|
||||
"Failed on signal_l2(%x, %x)\n",
|
||||
cache_size, v);
|
||||
goto bad;
|
||||
}
|
||||
|
|
|
@ -133,7 +133,8 @@ static void fill_in_relocation_params(struct smm_relocation_params *params)
|
|||
/* SMRR has 32-bits of valid address aligned to 4KiB. */
|
||||
params->smrr_base.lo = (params->smram_base & rmask) | MTRR_TYPE_WRBACK;
|
||||
params->smrr_base.hi = 0;
|
||||
params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
|
||||
params->smrr_mask.lo = (~(tseg_size - 1) & rmask)
|
||||
| MTRR_PHYS_MASK_VALID;
|
||||
params->smrr_mask.hi = 0;
|
||||
}
|
||||
|
||||
|
@ -226,12 +227,14 @@ static int cpu_smm_setup(void)
|
|||
num_cpus, CONFIG_MAX_CPUS);
|
||||
}
|
||||
|
||||
if (install_relocation_handler(apic_id_map, num_cpus, &smm_reloc_params)) {
|
||||
if (install_relocation_handler(apic_id_map, num_cpus,
|
||||
&smm_reloc_params)) {
|
||||
printk(BIOS_CRIT, "SMM Relocation handler install failed.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (install_permanent_handler(apic_id_map, num_cpus, &smm_reloc_params)) {
|
||||
if (install_permanent_handler(apic_id_map, num_cpus,
|
||||
&smm_reloc_params)) {
|
||||
printk(BIOS_CRIT, "SMM Permanent handler install failed.\n");
|
||||
return -1;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue