src/arch: Fix checkpatch warning: no spaces at the start of a line

Change-Id: Id9846ceb714dceaea12ea33ce2aa2b8e5bb6f4df
Signed-off-by: Martin Roth <martinroth@google.com>
Reviewed-on: https://review.coreboot.org/20728
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Werner Zeh <werner.zeh@siemens.com>
Reviewed-by: Paul Menzel <paulepanter@users.sourceforge.net>
This commit is contained in:
Martin Roth 2017-07-23 16:01:26 -06:00
parent 7f35d3aa16
commit 4635787895
5 changed files with 46 additions and 46 deletions

View file

@ -35,10 +35,10 @@
struct cpu_info *cpu_info(void)
{
#error "This is BROKEN! ARM stacks are currently not guaranteed to be " \
"STACK_SIZE-aligned in any way. If you ever plan to revive this " \
"feature, make sure you add the proper assertions " \
"(and maybe consider revising the whole thing to work closer to what " \
"arm64 is doing now)."
"STACK_SIZE-aligned in any way. If you ever plan to revive this " \
"feature, make sure you add the proper assertions " \
"(and maybe consider revising the whole thing to work closer to what " \
"arm64 is doing now)."
uintptr_t addr = ALIGN((uintptr_t)__builtin_frame_address(0),
CONFIG_STACK_SIZE);
addr -= sizeof(struct cpu_info);

View file

@ -198,35 +198,35 @@
#ifdef __GNUC__
#define read_csr(reg) ({ unsigned long __tmp; \
asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \
__tmp; })
asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \
__tmp; })
#define write_csr(reg, val) ({ \
if (__builtin_constant_p(val) && (unsigned long)(val) < 32) \
asm volatile ("csrw " #reg ", %0" :: "i"(val)); \
else \
asm volatile ("csrw " #reg ", %0" :: "r"(val)); })
if (__builtin_constant_p(val) && (unsigned long)(val) < 32) \
asm volatile ("csrw " #reg ", %0" :: "i"(val)); \
else \
asm volatile ("csrw " #reg ", %0" :: "r"(val)); })
#define swap_csr(reg, val) ({ unsigned long __tmp; \
if (__builtin_constant_p(val) && (unsigned long)(val) < 32) \
asm volatile ("csrrw %0, " #reg ", %1" : "=r"(__tmp) : "i"(val)); \
else \
asm volatile ("csrrw %0, " #reg ", %1" : "=r"(__tmp) : "r"(val)); \
__tmp; })
if (__builtin_constant_p(val) && (unsigned long)(val) < 32) \
asm volatile ("csrrw %0, " #reg ", %1" : "=r"(__tmp) : "i"(val)); \
else \
asm volatile ("csrrw %0, " #reg ", %1" : "=r"(__tmp) : "r"(val)); \
__tmp; })
#define set_csr(reg, bit) ({ unsigned long __tmp; \
if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
asm volatile ("csrrs %0, " #reg ", %1" : "=r"(__tmp) : "i"(bit)); \
else \
asm volatile ("csrrs %0, " #reg ", %1" : "=r"(__tmp) : "r"(bit)); \
__tmp; })
if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
asm volatile ("csrrs %0, " #reg ", %1" : "=r"(__tmp) : "i"(bit)); \
else \
asm volatile ("csrrs %0, " #reg ", %1" : "=r"(__tmp) : "r"(bit)); \
__tmp; })
#define clear_csr(reg, bit) ({ unsigned long __tmp; \
if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
asm volatile ("csrrc %0, " #reg ", %1" : "=r"(__tmp) : "i"(bit)); \
else \
asm volatile ("csrrc %0, " #reg ", %1" : "=r"(__tmp) : "r"(bit)); \
__tmp; })
if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
asm volatile ("csrrc %0, " #reg ", %1" : "=r"(__tmp) : "i"(bit)); \
else \
asm volatile ("csrrc %0, " #reg ", %1" : "=r"(__tmp) : "r"(bit)); \
__tmp; })
#define rdtime() read_csr(time)
#define rdcycle() read_csr(cycle)

View file

@ -45,23 +45,23 @@ typedef struct { int lock; } spinlock_t;
# define atomic_cas(ptr, cmp, swp) __sync_val_compare_and_swap(ptr, cmp, swp)
#else
# define atomic_add(ptr, inc) ({ \
long flags = disable_irqsave(); \
typeof(ptr) res = *(volatile typeof(ptr))(ptr); \
*(volatile typeof(ptr))(ptr) = res + (inc); \
enable_irqrestore(flags); \
res; })
long flags = disable_irqsave(); \
typeof(ptr) res = *(volatile typeof(ptr))(ptr); \
*(volatile typeof(ptr))(ptr) = res + (inc); \
enable_irqrestore(flags); \
res; })
# define atomic_swap(ptr, swp) ({ \
long flags = disable_irqsave(); \
typeof(*ptr) res = *(volatile typeof(ptr))(ptr); \
*(volatile typeof(ptr))(ptr) = (swp); \
enable_irqrestore(flags); \
res; })
long flags = disable_irqsave(); \
typeof(*ptr) res = *(volatile typeof(ptr))(ptr); \
*(volatile typeof(ptr))(ptr) = (swp); \
enable_irqrestore(flags); \
res; })
# define atomic_cas(ptr, cmp, swp) ({ \
long flags = disable_irqsave(); \
typeof(ptr) res = *(volatile typeof(ptr))(ptr); \
if (res == (cmp)) *(volatile typeof(ptr))(ptr) = (swp); \
enable_irqrestore(flags); \
res; })
long flags = disable_irqsave(); \
typeof(ptr) res = *(volatile typeof(ptr))(ptr); \
if (res == (cmp)) *(volatile typeof(ptr))(ptr) = (swp); \
enable_irqrestore(flags); \
res; })
#endif
#endif

View file

@ -70,9 +70,9 @@ typedef struct {
} hls_t;
#define MACHINE_STACK_TOP() ({ \
/* coverity[uninit_use] : FALSE */ \
register uintptr_t sp asm ("sp"); \
(void*)((sp + RISCV_PGSIZE) & -RISCV_PGSIZE); })
/* coverity[uninit_use] : FALSE */ \
register uintptr_t sp asm ("sp"); \
(void*)((sp + RISCV_PGSIZE) & -RISCV_PGSIZE); })
// hart-local storage, at top of stack
#define HLS() ((hls_t*)(MACHINE_STACK_TOP() - HLS_SIZE))

View file

@ -41,9 +41,9 @@
#define INSERT_FIELD(val, which, fieldval) (((val) & ~(which)) | ((fieldval) * ((which) & ~((which)-1))))
#define supervisor_paddr_valid(start, length) \
((uintptr_t)(start) >= current.first_user_vaddr + current.bias \
&& (uintptr_t)(start) + (length) < mem_size \
&& (uintptr_t)(start) + (length) >= (uintptr_t)(start))
((uintptr_t)(start) >= current.first_user_vaddr + current.bias \
&& (uintptr_t)(start) + (length) < mem_size \
&& (uintptr_t)(start) + (length) >= (uintptr_t)(start))
typedef uintptr_t pte_t;
extern pte_t* root_page_table;