cpu/intel: Wrap lines at 80 columns

Fix the following warning detected by checkpatch.pl:

WARNING: line over 80 characters

TEST=Build and run on Galileo Gen2

Change-Id: I74f25da5c53bd518189ce86817d6e3385b29c3b4
Signed-off-by: Lee Leahy <Leroy.P.Leahy@intel.com>
Reviewed-on: https://review.coreboot.org/18850
Tested-by: build bot (Jenkins)
Reviewed-by: Martin Roth <martinroth@google.com>
This commit is contained in:
Lee Leahy 2017-03-15 18:26:18 -07:00
parent 26eeb0f8ad
commit cdc50480c4
12 changed files with 76 additions and 41 deletions

View File

@ -40,7 +40,8 @@ void set_vmx(void)
msr = rdmsr(IA32_FEATURE_CONTROL); msr = rdmsr(IA32_FEATURE_CONTROL);
if (msr.lo & (1 << 0)) { if (msr.lo & (1 << 0)) {
printk(BIOS_ERR, "VMX is locked, so %s will do nothing\n", __func__); printk(BIOS_ERR, "VMX is locked, so %s will do nothing\n",
__func__);
/* VMX locked. If we set it again we get an illegal /* VMX locked. If we set it again we get an illegal
* instruction * instruction
*/ */
@ -66,6 +67,7 @@ void set_vmx(void)
wrmsr(IA32_FEATURE_CONTROL, msr); wrmsr(IA32_FEATURE_CONTROL, msr);
} }
printk(BIOS_DEBUG, "VMX status: %s, %s\n", enable ? "enabled" : "disabled", printk(BIOS_DEBUG, "VMX status: %s, %s\n",
enable ? "enabled" : "disabled",
lock ? "locked" : "unlocked"); lock ? "locked" : "unlocked");
} }

View File

@ -33,11 +33,13 @@ static void check_for_warm_reset(void)
{ {
/* /*
* Check if INIT# is asserted by port 0xCF9 and whether RCBA has been set. * Check if INIT# is asserted by port 0xCF9 and whether RCBA has been
* If either is true, then this is a warm reset so execute a Hard Reset * set. If either is true, then this is a warm reset so execute a
* Hard Reset
*/ */
if ((inb(0xcf9) == 0x04) || if ((inb(0xcf9) == 0x04) ||
(pci_io_read_config32(SOC_LPC_DEV, RCBA) & RCBA_ENABLE)) { (pci_io_read_config32(SOC_LPC_DEV, RCBA)
& RCBA_ENABLE)) {
outb(0x00, 0xcf9); outb(0x00, 0xcf9);
outb(0x06, 0xcf9); outb(0x06, 0xcf9);
} }

View File

@ -419,7 +419,8 @@ void set_power_limits(u8 power_limit_1_time)
u8 power_limit_1_val; u8 power_limit_1_val;
if (power_limit_1_time >= ARRAY_SIZE(power_limit_time_sec_to_msr)) if (power_limit_1_time >= ARRAY_SIZE(power_limit_time_sec_to_msr))
power_limit_1_time = ARRAY_SIZE(power_limit_time_sec_to_msr) - 1; power_limit_1_time = ARRAY_SIZE(power_limit_time_sec_to_msr)
- 1;
if (!(msr.lo & PLATFORM_INFO_SET_TDP)) if (!(msr.lo & PLATFORM_INFO_SET_TDP))
return; return;

View File

@ -255,7 +255,8 @@ static void fill_in_relocation_params(struct device *dev,
/* SMRR has 32-bits of valid address aligned to 4KiB. */ /* SMRR has 32-bits of valid address aligned to 4KiB. */
params->smrr_base.lo = (params->smram_base & rmask) | MTRR_TYPE_WRBACK; params->smrr_base.lo = (params->smram_base & rmask) | MTRR_TYPE_WRBACK;
params->smrr_base.hi = 0; params->smrr_base.hi = 0;
params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID; params->smrr_mask.lo = (~(tseg_size - 1) & rmask)
| MTRR_PHYS_MASK_VALID;
params->smrr_mask.hi = 0; params->smrr_mask.hi = 0;
/* The EMRR and UNCORE_EMRR are at IEDBASE + 2MiB */ /* The EMRR and UNCORE_EMRR are at IEDBASE + 2MiB */
@ -266,7 +267,8 @@ static void fill_in_relocation_params(struct device *dev,
* on the number of physical address bits supported. */ * on the number of physical address bits supported. */
params->emrr_base.lo = emrr_base | MTRR_TYPE_WRBACK; params->emrr_base.lo = emrr_base | MTRR_TYPE_WRBACK;
params->emrr_base.hi = 0; params->emrr_base.hi = 0;
params->emrr_mask.lo = (~(emrr_size - 1) & rmask) | MTRR_PHYS_MASK_VALID; params->emrr_mask.lo = (~(emrr_size - 1) & rmask)
| MTRR_PHYS_MASK_VALID;
params->emrr_mask.hi = (1 << (phys_bits - 32)) - 1; params->emrr_mask.hi = (1 << (phys_bits - 32)) - 1;
/* UNCORE_EMRR has 39 bits of valid address aligned to 4KiB. */ /* UNCORE_EMRR has 39 bits of valid address aligned to 4KiB. */

View File

@ -209,7 +209,8 @@ void intel_update_microcode_from_cbfs(void)
} }
#if ENV_RAMSTAGE #if ENV_RAMSTAGE
__attribute__((weak)) int soc_skip_ucode_update(u32 currrent_patch_id, u32 new_patch_id) __attribute__((weak)) int soc_skip_ucode_update(u32 currrent_patch_id,
u32 new_patch_id)
{ {
return 0; return 0;
} }

View File

@ -83,7 +83,8 @@ static void configure_c_states(const int quad)
/* Set Processor MWAIT IO BASE */ /* Set Processor MWAIT IO BASE */
msr.hi = 0; msr.hi = 0;
msr.lo = ((PMB0_BASE + 4) & 0xffff) | (((PMB1_BASE + 9) & 0xffff) << 16); msr.lo = ((PMB0_BASE + 4) & 0xffff) | (((PMB1_BASE + 9) & 0xffff)
<< 16);
wrmsr(MSR_PMG_IO_BASE_ADDR, msr); wrmsr(MSR_PMG_IO_BASE_ADDR, msr);
/* Set IO Capture Address */ /* Set IO Capture Address */

View File

@ -34,7 +34,8 @@ static void configure_c_states(void)
msr = rdmsr(MSR_PMG_CST_CONFIG_CONTROL); msr = rdmsr(MSR_PMG_CST_CONFIG_CONTROL);
msr.lo |= (1 << 15); // Lock configuration msr.lo |= (1 << 15); // Lock configuration
msr.lo |= (1 << 10); // redirect IO-based CState transition requests to MWAIT msr.lo |= (1 << 10); // redirect IO-based CState transition requests to
// MWAIT
msr.lo &= ~(1 << 9); // Issue a single stop grant cycle upon stpclk msr.lo &= ~(1 << 9); // Issue a single stop grant cycle upon stpclk
msr.lo &= ~7; msr.lo |= HIGHEST_CLEVEL; // support at most C3 msr.lo &= ~7; msr.lo |= HIGHEST_CLEVEL; // support at most C3
// TODO Do we want Deep C4 and Dynamic L2 shrinking? // TODO Do we want Deep C4 and Dynamic L2 shrinking?
@ -43,13 +44,15 @@ static void configure_c_states(void)
/* Set Processor MWAIT IO BASE (P_BLK) */ /* Set Processor MWAIT IO BASE (P_BLK) */
msr.hi = 0; msr.hi = 0;
// TODO Do we want PM1_BASE? Needs SMM? // TODO Do we want PM1_BASE? Needs SMM?
//msr.lo = ((PMB0_BASE + 4) & 0xffff) | (((PMB1_BASE + 9) & 0xffff) << 16); //msr.lo = ((PMB0_BASE + 4) & 0xffff) | (((PMB1_BASE + 9) & 0xffff)
// << 16);
msr.lo = ((PMB0_BASE + 4) & 0xffff); msr.lo = ((PMB0_BASE + 4) & 0xffff);
wrmsr(MSR_PMG_IO_BASE_ADDR, msr); wrmsr(MSR_PMG_IO_BASE_ADDR, msr);
/* set C_LVL controls */ /* set C_LVL controls */
msr.hi = 0; msr.hi = 0;
msr.lo = (PMB0_BASE + 4) | ((HIGHEST_CLEVEL - 2) << 16); // -2 because LVL0+1 aren't counted // -2 because LVL0+1 aren't counted
msr.lo = (PMB0_BASE + 4) | ((HIGHEST_CLEVEL - 2) << 16);
wrmsr(MSR_PMG_IO_CAPTURE_ADDR, msr); wrmsr(MSR_PMG_IO_CAPTURE_ADDR, msr);
} }

View File

@ -40,8 +40,9 @@ static struct device_operations cpu_dev_ops = {
}; };
static struct cpu_device_id cpu_table[] = { static struct cpu_device_id cpu_table[] = {
{ X86_VENDOR_INTEL, 0x06D0 }, /* Pentium M on 90nm with 2MiB of L2 cache */ /* Pentium M on 90nm with 2MiB of L2 cache */
{ X86_VENDOR_INTEL, 0x06D6 }, /* Pentium M on 90nm with 2MiB of L2 cache */ { X86_VENDOR_INTEL, 0x06D0 },
{ X86_VENDOR_INTEL, 0x06D6 },
{ 0, 0 }, { 0, 0 },
}; };

View File

@ -47,12 +47,14 @@ static void configure_c_states(void)
/* Set Processor MWAIT IO BASE (P_BLK) */ /* Set Processor MWAIT IO BASE (P_BLK) */
msr.hi = 0; msr.hi = 0;
msr.lo = ((PMB0_BASE + 4) & 0xffff) | (((PMB1_BASE + 9) & 0xffff) << 16); msr.lo = ((PMB0_BASE + 4) & 0xffff) | (((PMB1_BASE + 9) & 0xffff)
<< 16);
wrmsr(MSR_PMG_IO_BASE_ADDR, msr); wrmsr(MSR_PMG_IO_BASE_ADDR, msr);
/* Set C_LVL controls and IO Capture Address */ /* Set C_LVL controls and IO Capture Address */
msr.hi = 0; msr.hi = 0;
msr.lo = (PMB0_BASE + 4) | ((HIGHEST_CLEVEL - 2) << 16); // -2 because LVL0+1 aren't counted // -2 because LVL0+1 aren't counted
msr.lo = (PMB0_BASE + 4) | ((HIGHEST_CLEVEL - 2) << 16);
wrmsr(MSR_PMG_IO_CAPTURE_ADDR, msr); wrmsr(MSR_PMG_IO_CAPTURE_ADDR, msr);
} }

View File

@ -48,12 +48,14 @@ static void configure_c_states(void)
/* Set Processor MWAIT IO BASE (P_BLK) */ /* Set Processor MWAIT IO BASE (P_BLK) */
msr.hi = 0; msr.hi = 0;
msr.lo = ((PMB0_BASE + 4) & 0xffff) | (((PMB1_BASE + 9) & 0xffff) << 16); msr.lo = ((PMB0_BASE + 4) & 0xffff) | (((PMB1_BASE + 9) & 0xffff)
<< 16);
wrmsr(MSR_PMG_IO_BASE_ADDR, msr); wrmsr(MSR_PMG_IO_BASE_ADDR, msr);
/* Set C_LVL controls and IO Capture Address */ /* Set C_LVL controls and IO Capture Address */
msr.hi = 0; msr.hi = 0;
msr.lo = (PMB0_BASE + 4) | ((HIGHEST_CLEVEL - 2) << 16); // -2 because LVL0+1 aren't counted // -2 because LVL0+1 aren't counted
msr.lo = (PMB0_BASE + 4) | ((HIGHEST_CLEVEL - 2) << 16);
wrmsr(MSR_PMG_IO_CAPTURE_ADDR, msr); wrmsr(MSR_PMG_IO_CAPTURE_ADDR, msr);
} }

View File

@ -169,7 +169,8 @@ int calculate_l2_latency(void)
msr = rdmsr(EBL_CR_POWERON); msr = rdmsr(EBL_CR_POWERON);
/* Get clock multiplier and FSB frequency. /* Get clock multiplier and FSB frequency.
* Multiplier is in [25:22]. * Multiplier is in [25:22].
* FSB is in [19:18] in Katmai, [19] in Deschutes ([18] is zero for them). * FSB is in [19:18] in Katmai, [19] in Deschutes ([18] is zero
* for them).
*/ */
eax = msr.lo >> 18; eax = msr.lo >> 18;
if (signature == 0x650) { if (signature == 0x650) {
@ -185,7 +186,9 @@ int calculate_l2_latency(void)
for (le = latency_table; le->key != eax; le++) { for (le = latency_table; le->key != eax; le++) {
/* Fail if we get to the end of the table */ /* Fail if we get to the end of the table */
if (le->key == 0xff) { if (le->key == 0xff) {
printk(BIOS_DEBUG, "Could not find key %02x in latency table\n", eax); printk(BIOS_DEBUG,
"Could not find key %02x in latency table\n",
eax);
return -1; return -1;
} }
} }
@ -281,7 +284,8 @@ int write_l2(u32 address, u32 data)
} else } else
v2 &= 0x7; v2 &= 0x7;
/* This write has to be replicated to a number of places. Not sure what. */ /* This write has to be replicated to a number of places. Not sure what.
*/
for (i = 0; i < v2; i++) { for (i = 0; i < v2; i++) {
@ -339,7 +343,8 @@ int test_l2_address_alias(u32 address1, u32 address2,
/* Calculates the L2 cache size. /* Calculates the L2 cache size.
* *
* Reference: Intel(R) 64 and IA-32 Architectures Software Developer's Manual * Reference: Intel(R) 64 and IA-32 Architectures Software Developer's Manual
* Volume 3B: System Programming Guide, Part 2, Intel pub. 253669, pg. B-172. * Volume 3B: System Programming Guide, Part 2, Intel pub. 253669,
* pg. B-172.
* *
*/ */
int calculate_l2_cache_size(void) int calculate_l2_cache_size(void)
@ -357,8 +362,9 @@ int calculate_l2_cache_size(void)
bblcr3 = msr.lo & ~BBLCR3_L2_SIZE; bblcr3 = msr.lo & ~BBLCR3_L2_SIZE;
/* /*
* Successively write in all the possible cache size per bank * Successively write in all the possible cache size per bank
* into BBL_CR_CTL3[17:13], starting from 256KB (00001) to 4MB (10000), * into BBL_CR_CTL3[17:13], starting from 256KB (00001) to 4MB
* and read the last value written and accepted by the cache. * (10000), and read the last value written and accepted by the
* cache.
* *
* No idea why these bits are writable at all. * No idea why these bits are writable at all.
*/ */
@ -386,8 +392,9 @@ int calculate_l2_cache_size(void)
printk(BIOS_DEBUG, "Maximum cache mask is %x\n", cache_setting); printk(BIOS_DEBUG, "Maximum cache mask is %x\n", cache_setting);
/* For now, BBL_CR_CTL3 has the highest cache "size" that register /* For now, BBL_CR_CTL3 has the highest cache "size" that
* will accept. Now we'll ping the cache and see where it wraps. * register will accept. Now we'll ping the cache and see where
* it wraps.
*/ */
/* Write aaaaaaaa:aaaaaaaa to address 0 in the l2 cache. /* Write aaaaaaaa:aaaaaaaa to address 0 in the l2 cache.
@ -493,7 +500,8 @@ int calculate_l2_cache_size(void)
return 0; return 0;
} }
// L2 physical address range can be found from L2 control register 3, bits [2:0]. // L2 physical address range can be found from L2 control register 3,
// bits [2:0].
int calculate_l2_physical_address_range(void) int calculate_l2_physical_address_range(void)
{ {
int r0, r3; int r0, r3;
@ -512,7 +520,8 @@ int calculate_l2_physical_address_range(void)
else else
r3 &= 0x7; r3 &= 0x7;
printk(BIOS_DEBUG, "L2 Physical Address Range is %dM\n", (1 << r3) * 512); printk(BIOS_DEBUG, "L2 Physical Address Range is %dM\n",
(1 << r3) * 512);
/* Shift into [22:20] to be saved into BBL_CR_CTL3. */ /* Shift into [22:20] to be saved into BBL_CR_CTL3. */
r3 = r3 << 20; r3 = r3 << 20;
@ -659,8 +668,9 @@ int p6_configure_l2_cache(void)
bblctl3.lo = eax; bblctl3.lo = eax;
wrmsr(BBL_CR_CTL3, bblctl3); wrmsr(BBL_CR_CTL3, bblctl3);
/* Write BBL_CR_CTL3[27:26] (reserved??) to bits [1:0] of L2 register 4. /* Write BBL_CR_CTL3[27:26] (reserved??) to bits [1:0] of L2
* Apparently all other bits must be preserved, hence these code. * register 4. Apparently all other bits must be preserved,
* hence these code.
*/ */
v = (calc_eax >> 26) & 0x3; v = (calc_eax >> 26) & 0x3;
@ -685,7 +695,8 @@ int p6_configure_l2_cache(void)
/* Read L2 register 0 */ /* Read L2 register 0 */
v = read_l2(0); v = read_l2(0);
/* If L2(0)[5] set (and can be read properly), enable CRTN and address parity /* If L2(0)[5] set (and can be read properly), enable CRTN and address
* parity
*/ */
if (v >= 0 && (v & 0x20)) { if (v >= 0 && (v & 0x20)) {
bblctl3 = rdmsr(BBL_CR_CTL3); bblctl3 = rdmsr(BBL_CR_CTL3);
@ -700,7 +711,8 @@ int p6_configure_l2_cache(void)
set_l2_ecc(); set_l2_ecc();
if (calculate_l2_physical_address_range() != 0) { if (calculate_l2_physical_address_range() != 0) {
printk(BIOS_ERR, "Failed to calculate L2 physical address range"); printk(BIOS_ERR,
"Failed to calculate L2 physical address range");
goto bad; goto bad;
} }
@ -738,9 +750,10 @@ int p6_configure_l2_cache(void)
/* Update each way */ /* Update each way */
/* We're supposed to get L2 associativity from BBL_CR_CTL3[10:9]. /* We're supposed to get L2 associativity from
* But this code only applies to certain members of the P6 processor family * BBL_CR_CTL3[10:9]. But this code only applies to certain
* and since all P6 processors have 4-way L2 cache, we can safely assume * members of the P6 processor family and since all P6
* processors have 4-way L2 cache, we can safely assume
* 4 way for all cache operations. * 4 way for all cache operations.
*/ */
@ -748,8 +761,10 @@ int p6_configure_l2_cache(void)
/* Send Tag Write w/Data Write (TWW) to L2 controller /* Send Tag Write w/Data Write (TWW) to L2 controller
* MESI = Invalid * MESI = Invalid
*/ */
if (signal_l2(cache_size, 0, 0, v, L2CMD_TWW | L2CMD_MESI_I) != 0) { if (signal_l2(cache_size, 0, 0, v, L2CMD_TWW
printk(BIOS_ERR, "Failed on signal_l2(%x, %x)\n", | L2CMD_MESI_I) != 0) {
printk(BIOS_ERR,
"Failed on signal_l2(%x, %x)\n",
cache_size, v); cache_size, v);
goto bad; goto bad;
} }

View File

@ -133,7 +133,8 @@ static void fill_in_relocation_params(struct smm_relocation_params *params)
/* SMRR has 32-bits of valid address aligned to 4KiB. */ /* SMRR has 32-bits of valid address aligned to 4KiB. */
params->smrr_base.lo = (params->smram_base & rmask) | MTRR_TYPE_WRBACK; params->smrr_base.lo = (params->smram_base & rmask) | MTRR_TYPE_WRBACK;
params->smrr_base.hi = 0; params->smrr_base.hi = 0;
params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID; params->smrr_mask.lo = (~(tseg_size - 1) & rmask)
| MTRR_PHYS_MASK_VALID;
params->smrr_mask.hi = 0; params->smrr_mask.hi = 0;
} }
@ -226,12 +227,14 @@ static int cpu_smm_setup(void)
num_cpus, CONFIG_MAX_CPUS); num_cpus, CONFIG_MAX_CPUS);
} }
if (install_relocation_handler(apic_id_map, num_cpus, &smm_reloc_params)) { if (install_relocation_handler(apic_id_map, num_cpus,
&smm_reloc_params)) {
printk(BIOS_CRIT, "SMM Relocation handler install failed.\n"); printk(BIOS_CRIT, "SMM Relocation handler install failed.\n");
return -1; return -1;
} }
if (install_permanent_handler(apic_id_map, num_cpus, &smm_reloc_params)) { if (install_permanent_handler(apic_id_map, num_cpus,
&smm_reloc_params)) {
printk(BIOS_CRIT, "SMM Permanent handler install failed.\n"); printk(BIOS_CRIT, "SMM Permanent handler install failed.\n");
return -1; return -1;
} }