cpu/x86: Use tabs for indent
Fix the following error and warning detected by checkpatch.pl: ERROR: code indent should use tabs where possible WARNING: please, no spaces at the start of a line TEST=Build and run on Galileo Gen2 Change-Id: Ie6e4dd4c3eb0d2c44ecd008740dfc348d496fe78 Signed-off-by: Lee Leahy <Leroy.P.Leahy@intel.com> Reviewed-on: https://review.coreboot.org/18841 Tested-by: build bot (Jenkins) Reviewed-by: Philippe Mathieu-Daudé <philippe.mathieu.daude@gmail.com> Reviewed-by: Martin Roth <martinroth@google.com>
This commit is contained in:
parent
8bad6d2f90
commit
a07d0ddc44
4 changed files with 28 additions and 28 deletions
|
@ -154,7 +154,7 @@ static inline void release_barrier(atomic_t *b)
|
|||
|
||||
/* Returns 1 if timeout waiting for APs. 0 if target aps found. */
|
||||
static int wait_for_aps(atomic_t *val, int target, int total_delay,
|
||||
int delay_step)
|
||||
int delay_step)
|
||||
{
|
||||
int timeout = 0;
|
||||
int delayed = 0;
|
||||
|
@ -453,7 +453,7 @@ static int start_aps(struct bus *cpu_bus, int ap_count, atomic_t *num_aps)
|
|||
/* Send INIT IPI to all but self. */
|
||||
lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
|
||||
lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
|
||||
LAPIC_DM_INIT);
|
||||
LAPIC_DM_INIT);
|
||||
printk(BIOS_DEBUG, "Waiting for 10ms after sending INIT.\n");
|
||||
mdelay(10);
|
||||
|
||||
|
@ -469,7 +469,7 @@ static int start_aps(struct bus *cpu_bus, int ap_count, atomic_t *num_aps)
|
|||
|
||||
lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
|
||||
lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
|
||||
LAPIC_DM_STARTUP | sipi_vector);
|
||||
LAPIC_DM_STARTUP | sipi_vector);
|
||||
printk(BIOS_DEBUG, "Waiting for 1st SIPI to complete...");
|
||||
if (apic_wait_timeout(10000 /* 10 ms */, 50 /* us */)) {
|
||||
printk(BIOS_DEBUG, "timed out.\n");
|
||||
|
@ -493,7 +493,7 @@ static int start_aps(struct bus *cpu_bus, int ap_count, atomic_t *num_aps)
|
|||
|
||||
lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0));
|
||||
lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT |
|
||||
LAPIC_DM_STARTUP | sipi_vector);
|
||||
LAPIC_DM_STARTUP | sipi_vector);
|
||||
printk(BIOS_DEBUG, "Waiting for 2nd SIPI to complete...");
|
||||
if (apic_wait_timeout(10000 /* 10 ms */, 50 /* us */)) {
|
||||
printk(BIOS_DEBUG, "timed out.\n");
|
||||
|
@ -527,7 +527,7 @@ static int bsp_do_flight_plan(struct mp_params *mp_params)
|
|||
if (atomic_read(&rec->barrier) == 0) {
|
||||
/* Wait for the APs to check in. */
|
||||
if (wait_for_aps(&rec->cpus_entered, num_aps,
|
||||
timeout_us, step_us)) {
|
||||
timeout_us, step_us)) {
|
||||
printk(BIOS_ERR, "MP record %d timeout.\n", i);
|
||||
ret = -1;
|
||||
}
|
||||
|
|
|
@ -192,14 +192,14 @@ static struct memranges *get_physical_address_space(void)
|
|||
* regions. */
|
||||
memranges_init(addr_space, mask, mask, MTRR_TYPE_WRBACK);
|
||||
memranges_add_resources(addr_space, mask, 0,
|
||||
MTRR_TYPE_UNCACHEABLE);
|
||||
MTRR_TYPE_UNCACHEABLE);
|
||||
|
||||
/* Handle any write combining resources. Only prefetchable
|
||||
* resources are appropriate for this MTRR type. */
|
||||
match = IORESOURCE_PREFETCH;
|
||||
mask |= match;
|
||||
memranges_add_resources_filter(addr_space, mask, match, MTRR_TYPE_WRCOMB,
|
||||
filter_vga_wrcomb);
|
||||
filter_vga_wrcomb);
|
||||
|
||||
/* The address space below 4GiB is special. It needs to be
|
||||
* covered entirely by range entries so that MTRR calculations
|
||||
|
@ -207,8 +207,8 @@ static struct memranges *get_physical_address_space(void)
|
|||
* Therefore, ensure holes are filled up to 4GiB as
|
||||
* uncacheable */
|
||||
memranges_fill_holes_up_to(addr_space,
|
||||
RANGE_TO_PHYS_ADDR(RANGE_4GB),
|
||||
MTRR_TYPE_UNCACHEABLE);
|
||||
RANGE_TO_PHYS_ADDR(RANGE_4GB),
|
||||
MTRR_TYPE_UNCACHEABLE);
|
||||
|
||||
print_physical_address_space(addr_space, NULL);
|
||||
}
|
||||
|
@ -399,7 +399,7 @@ static void clear_var_mtrr(int index)
|
|||
}
|
||||
|
||||
static void prep_var_mtrr(struct var_mtrr_state *var_state,
|
||||
uint32_t base, uint32_t size, int mtrr_type)
|
||||
uint32_t base, uint32_t size, int mtrr_type)
|
||||
{
|
||||
struct var_mtrr_regs *regs;
|
||||
resource_t rbase;
|
||||
|
@ -443,7 +443,7 @@ static void prep_var_mtrr(struct var_mtrr_state *var_state,
|
|||
}
|
||||
|
||||
static void calc_var_mtrr_range(struct var_mtrr_state *var_state,
|
||||
uint32_t base, uint32_t size, int mtrr_type)
|
||||
uint32_t base, uint32_t size, int mtrr_type)
|
||||
{
|
||||
while (size != 0) {
|
||||
uint32_t addr_lsb;
|
||||
|
@ -471,7 +471,7 @@ static void calc_var_mtrr_range(struct var_mtrr_state *var_state,
|
|||
}
|
||||
|
||||
static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
|
||||
struct range_entry *r)
|
||||
struct range_entry *r)
|
||||
{
|
||||
uint32_t a1, a2, b1, b2;
|
||||
int mtrr_type;
|
||||
|
@ -549,7 +549,7 @@ static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
|
|||
}
|
||||
|
||||
static void calc_var_mtrrs_without_hole(struct var_mtrr_state *var_state,
|
||||
struct range_entry *r)
|
||||
struct range_entry *r)
|
||||
{
|
||||
uint32_t a1, a2, b1, b2, c1, c2;
|
||||
int mtrr_type;
|
||||
|
@ -608,8 +608,8 @@ static void calc_var_mtrrs_without_hole(struct var_mtrr_state *var_state,
|
|||
}
|
||||
|
||||
static void __calc_var_mtrrs(struct memranges *addr_space,
|
||||
int above4gb, int address_bits,
|
||||
int *num_def_wb_mtrrs, int *num_def_uc_mtrrs)
|
||||
int above4gb, int address_bits,
|
||||
int *num_def_wb_mtrrs, int *num_def_uc_mtrrs)
|
||||
{
|
||||
int wb_deftype_count;
|
||||
int uc_deftype_count;
|
||||
|
@ -690,22 +690,22 @@ static void __calc_var_mtrrs(struct memranges *addr_space,
|
|||
}
|
||||
|
||||
static int calc_var_mtrrs(struct memranges *addr_space,
|
||||
int above4gb, int address_bits)
|
||||
int above4gb, int address_bits)
|
||||
{
|
||||
int wb_deftype_count = 0;
|
||||
int uc_deftype_count = 0;
|
||||
|
||||
__calc_var_mtrrs(addr_space, above4gb, address_bits, &wb_deftype_count,
|
||||
&uc_deftype_count);
|
||||
&uc_deftype_count);
|
||||
|
||||
if (wb_deftype_count > bios_mtrrs && uc_deftype_count > bios_mtrrs) {
|
||||
printk(BIOS_DEBUG, "MTRR: Removing WRCOMB type. "
|
||||
"WB/UC MTRR counts: %d/%d > %d.\n",
|
||||
wb_deftype_count, uc_deftype_count, bios_mtrrs);
|
||||
memranges_update_tag(addr_space, MTRR_TYPE_WRCOMB,
|
||||
MTRR_TYPE_UNCACHEABLE);
|
||||
MTRR_TYPE_UNCACHEABLE);
|
||||
__calc_var_mtrrs(addr_space, above4gb, address_bits,
|
||||
&wb_deftype_count, &uc_deftype_count);
|
||||
&wb_deftype_count, &uc_deftype_count);
|
||||
}
|
||||
|
||||
printk(BIOS_DEBUG, "MTRR: default type WB/UC MTRR counts: %d/%d.\n",
|
||||
|
|
|
@ -161,7 +161,7 @@ void smi_handler(u32 smm_revision)
|
|||
state_save.type = EM64T101;
|
||||
state_save.em64t101_state_save =
|
||||
smm_save_state(smm_base,
|
||||
SMM_EM64T101_ARCH_OFFSET, node);
|
||||
SMM_EM64T101_ARCH_OFFSET, node);
|
||||
break;
|
||||
case 0x00030064:
|
||||
state_save.type = AMD64;
|
||||
|
|
|
@ -65,7 +65,7 @@ struct smm_entry_ins {
|
|||
* other entry points are stride size below the previous.
|
||||
*/
|
||||
static void smm_place_jmp_instructions(void *entry_start, int stride, int num,
|
||||
void *jmp_target)
|
||||
void *jmp_target)
|
||||
{
|
||||
int i;
|
||||
char *cur;
|
||||
|
@ -95,7 +95,7 @@ static void smm_place_jmp_instructions(void *entry_start, int stride, int num,
|
|||
/* Place stacks in base -> base + size region, but ensure the stacks don't
|
||||
* overlap the staggered entry points. */
|
||||
static void *smm_stub_place_stacks(char *base, int size,
|
||||
struct smm_loader_params *params)
|
||||
struct smm_loader_params *params)
|
||||
{
|
||||
int total_stack_size;
|
||||
char *stacks_top;
|
||||
|
@ -106,7 +106,7 @@ static void *smm_stub_place_stacks(char *base, int size,
|
|||
/* If stack space is requested assume the space lives in the lower
|
||||
* half of SMRAM. */
|
||||
total_stack_size = params->per_cpu_stack_size *
|
||||
params->num_concurrent_stacks;
|
||||
params->num_concurrent_stacks;
|
||||
|
||||
/* There has to be at least one stack user. */
|
||||
if (params->num_concurrent_stacks < 1)
|
||||
|
@ -146,9 +146,9 @@ static void smm_stub_place_staggered_entry_points(char *base,
|
|||
num_entries--;
|
||||
}
|
||||
smm_place_jmp_instructions(base,
|
||||
params->per_cpu_save_state_size,
|
||||
num_entries,
|
||||
rmodule_entry(smm_stub));
|
||||
params->per_cpu_save_state_size,
|
||||
num_entries,
|
||||
rmodule_entry(smm_stub));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -190,7 +190,7 @@ static int smm_module_setup_stub(void *smbase, struct smm_loader_params *params)
|
|||
|
||||
/* Adjust remaining size to account for save state. */
|
||||
total_save_state_size = params->per_cpu_save_state_size *
|
||||
params->num_concurrent_save_states;
|
||||
params->num_concurrent_save_states;
|
||||
size -= total_save_state_size;
|
||||
|
||||
/* The save state size encroached over the first SMM entry point. */
|
||||
|
@ -331,7 +331,7 @@ int smm_load_module(void *smram, int size, struct smm_loader_params *params)
|
|||
return -1;
|
||||
|
||||
total_stack_size = params->per_cpu_stack_size *
|
||||
params->num_concurrent_stacks;
|
||||
params->num_concurrent_stacks;
|
||||
|
||||
/* Stacks start at the top of the region. */
|
||||
base = smram;
|
||||
|
|
Loading…
Reference in a new issue