arch/x86/spinlock.h: Support systems with >128 cores

Each time the spinlock is acquired a byte is decreased and then the
sign of the byte is checked. If there are more than 128 cores the sign
check will overflow. An easy fix is to increase the word size of the
spinlock acquiring and releasing.

TEST: See that serialized SMM relocation is still serialized on
systems with >128 cores.

Change-Id: I76afaa60669335090743d99381280e74aa9fb5b1
Signed-off-by: Arthur Heymans <arthur@aheymans.xyz>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/60539
Reviewed-by: Kyösti Mälkki <kyosti.malkki@gmail.com>
Reviewed-by: Tim Wawrzynczak <twawrzynczak@chromium.org>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
This commit is contained in:
Arthur Heymans 2021-12-30 21:12:35 +01:00
parent 395f5b3129
commit ac24a96579
1 changed files with 4 additions and 4 deletions

View File

@ -25,17 +25,17 @@ typedef struct {
* We make no fairness assumptions. They have a cost. * We make no fairness assumptions. They have a cost.
*/ */
#define barrier() __asm__ __volatile__("" : : : "memory") #define barrier() __asm__ __volatile__("" : : : "memory")
#define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0) #define spin_is_locked(x) (*(volatile int *)(&(x)->lock) <= 0)
#define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x)) #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x))
#undef barrier #undef barrier
#define spin_lock_string \ #define spin_lock_string \
"\n1:\t" \ "\n1:\t" \
"lock ; decb %0\n\t" \ "lock ; decl %0\n\t" \
"js 2f\n" \ "js 2f\n" \
".section .text.lock,\"ax\"\n" \ ".section .text.lock,\"ax\"\n" \
"2:\t" \ "2:\t" \
"cmpb $0,%0\n\t" \ "cmpl $0,%0\n\t" \
"rep;nop\n\t" \ "rep;nop\n\t" \
"jle 2b\n\t" \ "jle 2b\n\t" \
"jmp 1b\n" \ "jmp 1b\n" \
@ -45,7 +45,7 @@ typedef struct {
* This works. Despite all the confusion. * This works. Despite all the confusion.
*/ */
#define spin_unlock_string \ #define spin_unlock_string \
"movb $1,%0" "movl $1,%0"
static __always_inline void spin_lock(spinlock_t *lock) static __always_inline void spin_lock(spinlock_t *lock)
{ {