cpu/x86: add a barrier with timeout
In case something goes wrong on one of the cpus, add the ability to use a barrier with timeout so that other cpus don't wait forever. Remove static from barrier wait and release. BUG=chrome-os-partner:59875 BRANCH=reef TEST=None Change-Id: Iab6bd30ddf7632c7a5785b338798960c26016b24 Signed-off-by: Bora Guvendik <bora.guvendik@intel.com> Reviewed-on: https://review.coreboot.org/18107 Tested-by: build bot (Jenkins) Reviewed-by: Martin Roth <martinroth@google.com>
This commit is contained in:
parent
4796c32ad6
commit
9b76f0b27b
|
@ -138,14 +138,37 @@ struct cpu_map {
|
||||||
/* Keep track of APIC and device structure for each CPU. */
|
/* Keep track of APIC and device structure for each CPU. */
|
||||||
static struct cpu_map cpus[CONFIG_MAX_CPUS];
|
static struct cpu_map cpus[CONFIG_MAX_CPUS];
|
||||||
|
|
||||||
static inline void barrier_wait(atomic_t *b)
|
inline void barrier_wait(atomic_t *b)
|
||||||
{
|
{
|
||||||
while (atomic_read(b) == 0)
|
while (atomic_read(b) == 0)
|
||||||
asm ("pause");
|
asm ("pause");
|
||||||
mfence();
|
mfence();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void release_barrier(atomic_t *b)
|
/* Returns 1 if timeout occurs before barier is released.
|
||||||
|
* returns 0 if barrier is released before timeout. */
|
||||||
|
int barrier_wait_timeout(atomic_t *b, uint32_t timeout_ms)
|
||||||
|
{
|
||||||
|
int timeout = 0;
|
||||||
|
struct mono_time current, end;
|
||||||
|
|
||||||
|
timer_monotonic_get(¤t);
|
||||||
|
end = current;
|
||||||
|
mono_time_add_msecs(&end, timeout_ms);
|
||||||
|
|
||||||
|
while ((atomic_read(b) == 0) && (!mono_time_after(¤t, &end))) {
|
||||||
|
timer_monotonic_get(¤t);
|
||||||
|
asm ("pause");
|
||||||
|
}
|
||||||
|
mfence();
|
||||||
|
|
||||||
|
if (mono_time_after(¤t, &end))
|
||||||
|
timeout = 1;
|
||||||
|
|
||||||
|
return timeout;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void release_barrier(atomic_t *b)
|
||||||
{
|
{
|
||||||
mfence();
|
mfence();
|
||||||
atomic_set(b, 1);
|
atomic_set(b, 1);
|
||||||
|
|
|
@ -150,5 +150,15 @@ int mp_park_aps(void);
|
||||||
void smm_initiate_relocation_parallel(void);
|
void smm_initiate_relocation_parallel(void);
|
||||||
/* Send SMI to self with single execution. */
|
/* Send SMI to self with single execution. */
|
||||||
void smm_initiate_relocation(void);
|
void smm_initiate_relocation(void);
|
||||||
|
/* Make a CPU wait until the barrier is released */
|
||||||
|
void barrier_wait(atomic_t *b);
|
||||||
|
/*
|
||||||
|
* Make a CPU wait until the barrier is released, or timeout occurs
|
||||||
|
* returns 1 if timeout occurs before barier is released.
|
||||||
|
* returns 0 if barrier is released before timeout.
|
||||||
|
*/
|
||||||
|
int barrier_wait_timeout(atomic_t *b, uint32_t timeout_ms);
|
||||||
|
/* Release a barrier so that other CPUs waiting for that barrier can continue */
|
||||||
|
void release_barrier(atomic_t *b);
|
||||||
|
|
||||||
#endif /* _X86_MP_H_ */
|
#endif /* _X86_MP_H_ */
|
||||||
|
|
Loading…
Reference in New Issue