arm64: Remove SMP support
As ARM Trusted Firmware is the only first class citizen for booting arm64 multi-processor in coreboot remove SMP support. If SoCs want to bring up MP then ATF needs to be ported and integrated. Change-Id: Ife24d53eed9b7a5a5d8c69a64d7a20a55a4163db Signed-off-by: Furquan Shaikh <furquan@google.com> Reviewed-on: http://review.coreboot.org/11909 Tested-by: build bot (Jenkins) Reviewed-by: Julius Werner <jwerner@chromium.org>
This commit is contained in:
parent
1148786c05
commit
b3f6ad3522
|
@ -130,9 +130,7 @@ ifeq ($(CONFIG_ARCH_RAMSTAGE_ARM64),y)
|
|||
|
||||
ramstage-y += c_entry.c
|
||||
ramstage-y += stages.c
|
||||
ramstage-y += startup.c
|
||||
ramstage-y += div0.c
|
||||
ramstage-y += cpu.c
|
||||
ramstage-y += cpu_ramstage.c
|
||||
ramstage-y += eabi_compat.c
|
||||
ramstage-y += boot.c
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include <arch/cpu.h>
|
||||
#include <arch/mmu.h>
|
||||
#include <arch/stages.h>
|
||||
#include <arch/startup.h>
|
||||
#include "cpu-internal.h"
|
||||
|
||||
void __attribute__((weak)) arm64_soc_init(void)
|
||||
|
@ -32,7 +31,7 @@ static void seed_stack(void)
|
|||
int i;
|
||||
int size;
|
||||
|
||||
stack_begin = cpu_get_stack(smp_processor_id());
|
||||
stack_begin = cpu_get_stack();
|
||||
stack_begin -= CONFIG_STACK_SIZE;
|
||||
slot = (void *)stack_begin;
|
||||
|
||||
|
@ -45,21 +44,10 @@ static void seed_stack(void)
|
|||
|
||||
static void arm64_init(void)
|
||||
{
|
||||
cpu_set_bsp();
|
||||
seed_stack();
|
||||
arm64_soc_init();
|
||||
main();
|
||||
}
|
||||
|
||||
/*
|
||||
* This variable holds entry point for CPUs starting up. The first
|
||||
* element is the BSP path, and the second is the non-BSP path.
|
||||
*/
|
||||
void (*c_entry[2])(void) = { &arm64_init, &arch_secondary_cpu_init };
|
||||
|
||||
void *prepare_secondary_cpu_startup(void)
|
||||
{
|
||||
startup_save_cpu_data();
|
||||
|
||||
return secondary_entry_point(&arm64_cpu_startup_resume);
|
||||
}
|
||||
/* This variable holds entry point for CPU starting up. */
|
||||
void (*c_entry)(void) = &arm64_init;
|
||||
|
|
|
@ -16,22 +16,10 @@
|
|||
#ifndef ARCH_CPU_INTERNAL_H
|
||||
#define ARCH_CPU_INTERNAL_H
|
||||
|
||||
/*
|
||||
* Do the necessary work to prepare for secondary CPUs coming up. The
|
||||
* SoC will call this function before bringing up the other CPUs. The
|
||||
* entry point for the seoncdary CPUs is returned.
|
||||
*/
|
||||
void *prepare_secondary_cpu_startup(void);
|
||||
/* Return the top of the stack for the cpu. */
|
||||
void *cpu_get_stack(void);
|
||||
|
||||
/*
|
||||
* Code path for the non-BSP CPUs. This is an internal function used.
|
||||
*/
|
||||
void arch_secondary_cpu_init(void);
|
||||
|
||||
/* Return the top of the stack for the specified cpu. */
|
||||
void *cpu_get_stack(unsigned int cpu);
|
||||
|
||||
/* Return the top of the exception stack for the specified cpu. */
|
||||
void *cpu_get_exception_stack(unsigned int cpu);
|
||||
/* Return the top of the exception stack for the cpu. */
|
||||
void *cpu_get_exception_stack(void);
|
||||
|
||||
#endif /* ARCH_CPU_INTERNAL_H */
|
||||
|
|
|
@ -1,224 +0,0 @@
|
|||
/*
|
||||
* This file is part of the coreboot project.
|
||||
*
|
||||
* Copyright 2013 Google Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <arch/lib_helpers.h>
|
||||
#include <cpu/cpu.h>
|
||||
#include <console/console.h>
|
||||
#include <smp/node.h>
|
||||
#include "cpu-internal.h"
|
||||
|
||||
struct cpu_info cpu_infos[CONFIG_MAX_CPUS];
|
||||
struct cpu_info *bsp_cpu_info;
|
||||
|
||||
struct cpu_info *cpu_info(void)
|
||||
{
|
||||
return cpu_info_for_cpu(smp_processor_id());
|
||||
}
|
||||
|
||||
size_t cpus_online(void)
|
||||
{
|
||||
int i;
|
||||
size_t num = 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cpu_infos); i++) {
|
||||
if (cpu_online(cpu_info_for_cpu(i)))
|
||||
num++;
|
||||
}
|
||||
|
||||
return num;
|
||||
}
|
||||
|
||||
static inline int action_queue_empty(struct cpu_action_queue *q)
|
||||
{
|
||||
return load_acquire_exclusive(&q->todo) == NULL;
|
||||
}
|
||||
|
||||
static inline int action_completed(struct cpu_action_queue *q,
|
||||
struct cpu_action *action)
|
||||
{
|
||||
return load_acquire(&q->completed) == action;
|
||||
}
|
||||
|
||||
static inline void wait_for_action_queue_slot(struct cpu_action_queue *q)
|
||||
{
|
||||
while (!action_queue_empty(q))
|
||||
wfe();
|
||||
}
|
||||
|
||||
static void wait_for_action_complete(struct cpu_action_queue *q,
|
||||
struct cpu_action *a)
|
||||
{
|
||||
while (!action_completed(q, a))
|
||||
wfe();
|
||||
}
|
||||
|
||||
static struct cpu_action *wait_for_action(struct cpu_action_queue *q,
|
||||
struct cpu_action *local)
|
||||
{
|
||||
struct cpu_action *action;
|
||||
|
||||
while (action_queue_empty(q))
|
||||
wfe();
|
||||
|
||||
/*
|
||||
* Keep original address, but use a local copy for async processing.
|
||||
*/
|
||||
do {
|
||||
action = load_acquire_exclusive(&q->todo);
|
||||
*local = *action;
|
||||
} while (!store_release_exclusive(&q->todo, NULL));
|
||||
|
||||
return action;
|
||||
}
|
||||
|
||||
static void queue_action(struct cpu_action_queue *q, struct cpu_action *action)
|
||||
{
|
||||
do {
|
||||
wait_for_action_queue_slot(q);
|
||||
if (load_acquire_exclusive(&q->todo) != NULL)
|
||||
continue;
|
||||
} while (!store_release_exclusive(&q->todo, action));
|
||||
}
|
||||
|
||||
static void action_queue_complete(struct cpu_action_queue *q,
|
||||
struct cpu_action *action)
|
||||
{
|
||||
/* Mark completion and send events to waiters. */
|
||||
store_release(&q->completed, action);
|
||||
sev();
|
||||
}
|
||||
|
||||
static void action_run(struct cpu_action *action)
|
||||
{
|
||||
action->run(action->arg);
|
||||
}
|
||||
|
||||
static void action_run_on_cpu(struct cpu_info *ci, struct cpu_action *action,
|
||||
int sync)
|
||||
{
|
||||
struct cpu_action_queue *q = &ci->action_queue;
|
||||
|
||||
/* Don't run actions on non-online cpus. */
|
||||
if (!cpu_online(ci))
|
||||
return;
|
||||
|
||||
if (ci->id == smp_processor_id()) {
|
||||
action->run(action->arg);
|
||||
return;
|
||||
}
|
||||
|
||||
queue_action(q, action);
|
||||
/* Wait for CPU to pick it up. Empty slot means it was picked up. */
|
||||
wait_for_action_queue_slot(q);
|
||||
/* Wait for completion if requested. */
|
||||
if (sync)
|
||||
wait_for_action_complete(q, action);
|
||||
}
|
||||
|
||||
static int __arch_run_on_cpu(unsigned int cpu, struct cpu_action *action,
|
||||
int sync)
|
||||
{
|
||||
struct cpu_info *ci;
|
||||
|
||||
if (cpu >= CONFIG_MAX_CPUS)
|
||||
return -1;
|
||||
|
||||
ci = cpu_info_for_cpu(cpu);
|
||||
|
||||
action_run_on_cpu(ci, action, sync);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_run_on_cpu(unsigned int cpu, struct cpu_action *action)
|
||||
{
|
||||
return __arch_run_on_cpu(cpu, action, 1);
|
||||
}
|
||||
|
||||
int arch_run_on_cpu_async(unsigned int cpu, struct cpu_action *action)
|
||||
{
|
||||
return __arch_run_on_cpu(cpu, action, 0);
|
||||
}
|
||||
|
||||
static int __arch_run_on_all_cpus(struct cpu_action *action, int sync)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < CONFIG_MAX_CPUS; i++)
|
||||
action_run_on_cpu(cpu_info_for_cpu(i), action, sync);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __arch_run_on_all_cpus_but_self(struct cpu_action *action, int sync)
|
||||
{
|
||||
int i;
|
||||
struct cpu_info *me = cpu_info();
|
||||
|
||||
for (i = 0; i < CONFIG_MAX_CPUS; i++) {
|
||||
struct cpu_info *ci = cpu_info_for_cpu(i);
|
||||
if (ci == me)
|
||||
continue;
|
||||
action_run_on_cpu(ci, action, sync);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_run_on_all_cpus(struct cpu_action *action)
|
||||
{
|
||||
return __arch_run_on_all_cpus(action, 1);
|
||||
}
|
||||
|
||||
int arch_run_on_all_cpus_async(struct cpu_action *action)
|
||||
{
|
||||
return __arch_run_on_all_cpus(action, 0);
|
||||
}
|
||||
|
||||
int arch_run_on_all_cpus_but_self(struct cpu_action *action)
|
||||
{
|
||||
return __arch_run_on_all_cpus_but_self(action, 1);
|
||||
}
|
||||
|
||||
int arch_run_on_all_cpus_but_self_async(struct cpu_action *action)
|
||||
{
|
||||
return __arch_run_on_all_cpus_but_self(action, 0);
|
||||
}
|
||||
|
||||
|
||||
void arch_cpu_wait_for_action(void)
|
||||
{
|
||||
struct cpu_info *ci = cpu_info();
|
||||
struct cpu_action_queue *q = &ci->action_queue;
|
||||
|
||||
while (1) {
|
||||
struct cpu_action *orig;
|
||||
struct cpu_action action;
|
||||
|
||||
orig = wait_for_action(q, &action);
|
||||
|
||||
action_run(&action);
|
||||
action_queue_complete(q, orig);
|
||||
}
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_SMP)
|
||||
int boot_cpu(void)
|
||||
{
|
||||
return cpu_is_bsp();
|
||||
}
|
||||
#endif
|
|
@ -24,6 +24,8 @@
|
|||
#include <timer.h>
|
||||
#include "cpu-internal.h"
|
||||
|
||||
static struct cpu_info cpu_info;
|
||||
|
||||
void __attribute__((weak)) arm64_arch_timer_init(void)
|
||||
{
|
||||
/* Default weak implementation does nothing. */
|
||||
|
@ -102,9 +104,9 @@ static void el3_init(void)
|
|||
isb();
|
||||
}
|
||||
|
||||
static void init_this_cpu(void *arg)
|
||||
static void init_this_cpu(void)
|
||||
{
|
||||
struct cpu_info *ci = arg;
|
||||
struct cpu_info *ci = &cpu_info;
|
||||
device_t dev = ci->cpu;
|
||||
|
||||
cpu_set_device_operations(dev);
|
||||
|
@ -156,14 +158,14 @@ static void init_cpu_info(struct bus *bus)
|
|||
continue;
|
||||
|
||||
/* IDs are currently mapped 1:1 with logical CPU numbers. */
|
||||
if (id >= CONFIG_MAX_CPUS) {
|
||||
if (id != 0) {
|
||||
printk(BIOS_WARNING,
|
||||
"CPU id %x too large. Disabling.\n", id);
|
||||
cpu_disable_dev(cur);
|
||||
continue;
|
||||
}
|
||||
|
||||
ci = cpu_info_for_cpu(id);
|
||||
ci = &cpu_info;
|
||||
if (ci->cpu != NULL) {
|
||||
printk(BIOS_WARNING,
|
||||
"Duplicate ID %x in device tree.\n", id);
|
||||
|
@ -175,12 +177,8 @@ static void init_cpu_info(struct bus *bus)
|
|||
}
|
||||
}
|
||||
|
||||
void arch_initialize_cpus(device_t cluster, struct cpu_control_ops *cntrl_ops)
|
||||
void arch_initialize_cpu(device_t cluster)
|
||||
{
|
||||
size_t max_cpus;
|
||||
size_t i;
|
||||
struct cpu_info *ci;
|
||||
void (*entry)(void);
|
||||
struct bus *bus;
|
||||
|
||||
if (cluster->path.type != DEVICE_PATH_CPU_CLUSTER) {
|
||||
|
@ -196,83 +194,11 @@ void arch_initialize_cpus(device_t cluster, struct cpu_control_ops *cntrl_ops)
|
|||
if (bus == NULL)
|
||||
return;
|
||||
|
||||
/*
|
||||
* el3_init must be performed prior to prepare_secondary_cpu_startup.
|
||||
* This is important since el3_init initializes SCR values on BSP CPU
|
||||
* and then prepare_secondary_cpu_startup reads the initialized SCR
|
||||
* value and saves it for use by non-BSP CPUs.
|
||||
*/
|
||||
el3_init();
|
||||
/* Mark current cpu online. */
|
||||
cpu_mark_online(cpu_info());
|
||||
entry = prepare_secondary_cpu_startup();
|
||||
|
||||
/* Initialize the cpu_info structures. */
|
||||
init_cpu_info(bus);
|
||||
max_cpus = cntrl_ops->total_cpus();
|
||||
|
||||
if (max_cpus > CONFIG_MAX_CPUS) {
|
||||
printk(BIOS_WARNING,
|
||||
"max_cpus (%zu) exceeds CONFIG_MAX_CPUS (%zu).\n",
|
||||
max_cpus, (size_t)CONFIG_MAX_CPUS);
|
||||
max_cpus = CONFIG_MAX_CPUS;
|
||||
}
|
||||
|
||||
for (i = 0; i < max_cpus; i++) {
|
||||
device_t dev;
|
||||
struct cpu_action action;
|
||||
struct stopwatch sw;
|
||||
|
||||
ci = cpu_info_for_cpu(i);
|
||||
dev = ci->cpu;
|
||||
|
||||
/* Disregard CPUs not in device tree. */
|
||||
if (dev == NULL)
|
||||
continue;
|
||||
|
||||
/* Skip disabled CPUs. */
|
||||
if (!dev->enabled)
|
||||
continue;
|
||||
|
||||
if (!cpu_online(ci)) {
|
||||
/* Start the CPU. */
|
||||
printk(BIOS_DEBUG, "Starting CPU%x\n", ci->id);
|
||||
|
||||
if (cntrl_ops->start_cpu(ci->id, entry)) {
|
||||
printk(BIOS_ERR,
|
||||
"Failed to start CPU%x\n", ci->id);
|
||||
continue;
|
||||
}
|
||||
stopwatch_init_msecs_expire(&sw, 1000);
|
||||
/* Wait for CPU to come online. */
|
||||
while (!stopwatch_expired(&sw)) {
|
||||
if (!cpu_online(ci))
|
||||
continue;
|
||||
printk(BIOS_DEBUG,
|
||||
"CPU%x online in %ld usecs.\n",
|
||||
ci->id, stopwatch_duration_usecs(&sw));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!cpu_online(ci)) {
|
||||
printk(BIOS_DEBUG,
|
||||
"CPU%x failed to come online in %ld usecs.\n",
|
||||
ci->id, stopwatch_duration_usecs(&sw));
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Send it the init action. */
|
||||
action.run = init_this_cpu;
|
||||
action.arg = ci;
|
||||
arch_run_on_cpu(ci->id, &action);
|
||||
}
|
||||
}
|
||||
|
||||
void arch_secondary_cpu_init(void)
|
||||
{
|
||||
/* Mark this CPU online. */
|
||||
cpu_mark_online(cpu_info());
|
||||
|
||||
arch_cpu_wait_for_action();
|
||||
/* Send it the init action. */
|
||||
init_this_cpu();
|
||||
}
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
/*
|
||||
* This file is part of the coreboot project.
|
||||
*
|
||||
* Copyright (C) 2014 Google Inc
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; version 2 of
|
||||
* the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef __ARCH_ARM64_INCLUDE_ARCH_STARTUP_H__
|
||||
#define __ARCH_ARM64_INCLUDE_ARCH_STARTUP_H__
|
||||
|
||||
/* Every element occupies 8 bytes (64-bit entries) */
|
||||
#define PER_ELEMENT_SIZE_BYTES 8
|
||||
#define MAIR_INDEX 0
|
||||
#define TCR_INDEX 1
|
||||
#define TTBR0_INDEX 2
|
||||
#define SCR_INDEX 3
|
||||
#define VBAR_INDEX 4
|
||||
#define CNTFRQ_INDEX 5
|
||||
#define CPTR_INDEX 6
|
||||
#define CPACR_INDEX 7
|
||||
/* IMPORTANT!!! If any new element is added please update NUM_ELEMENTS */
|
||||
#define NUM_ELEMENTS 8
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* startup_save_cpu_data is used to save register values that need to be setup
|
||||
* when a CPU starts booting. This is used by secondary CPUs as well as resume
|
||||
* path to directly setup MMU and other related registers.
|
||||
*/
|
||||
void startup_save_cpu_data(void);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __ARCH_ARM64_INCLUDE_ARCH_STARTUP_H__ */
|
|
@ -37,148 +37,28 @@ struct cpu_driver {
|
|||
const struct cpu_device_id *id_table;
|
||||
};
|
||||
|
||||
/* Action to run. */
|
||||
struct cpu_action {
|
||||
void (*run)(void *arg);
|
||||
void *arg;
|
||||
};
|
||||
|
||||
/*
|
||||
* Actions are queued to 'todo'. When picked up 'todo' is cleared. The
|
||||
* 'completed' field is set to the original 'todo' value when the action
|
||||
* is complete.
|
||||
*/
|
||||
struct cpu_action_queue {
|
||||
struct cpu_action *todo;
|
||||
struct cpu_action *completed;
|
||||
};
|
||||
|
||||
struct cpu_info {
|
||||
device_t cpu;
|
||||
struct cpu_action_queue action_queue;
|
||||
unsigned int online;
|
||||
/* Current assumption is that id matches smp_processor_id(). */
|
||||
unsigned int id;
|
||||
uint64_t mpidr;
|
||||
};
|
||||
|
||||
/* Obtain cpu_info for current executing CPU. */
|
||||
struct cpu_info *cpu_info(void);
|
||||
|
||||
extern struct cpu_info *bsp_cpu_info;
|
||||
extern struct cpu_info cpu_infos[CONFIG_MAX_CPUS];
|
||||
|
||||
static inline struct cpu_info *cpu_info_for_cpu(unsigned int id)
|
||||
{
|
||||
return &cpu_infos[id];
|
||||
}
|
||||
|
||||
/* Ran only by BSP at initial boot strapping. */
|
||||
static inline void cpu_set_bsp(void)
|
||||
{
|
||||
bsp_cpu_info = cpu_info();
|
||||
}
|
||||
|
||||
static inline int cpu_is_bsp(void)
|
||||
{
|
||||
return cpu_info() == bsp_cpu_info;
|
||||
}
|
||||
|
||||
static inline int cpu_online(struct cpu_info *ci)
|
||||
{
|
||||
return load_acquire(&ci->online) != 0;
|
||||
}
|
||||
|
||||
static inline void cpu_mark_online(struct cpu_info *ci)
|
||||
{
|
||||
ci->mpidr = read_affinity_mpidr();
|
||||
store_release(&ci->online, 1);
|
||||
}
|
||||
|
||||
/* Provide number of CPUs online. */
|
||||
size_t cpus_online(void);
|
||||
|
||||
/* Control routines for starting CPUs. */
|
||||
struct cpu_control_ops {
|
||||
/* Return the maximum number of CPUs supported. */
|
||||
size_t (*total_cpus)(void);
|
||||
/*
|
||||
* Start the requested CPU and have it start running entry().
|
||||
* Returns 0 on success, < 0 on error.
|
||||
*/
|
||||
int (*start_cpu)(unsigned int id, void (*entry)(void));
|
||||
};
|
||||
|
||||
/*
|
||||
* Initialize all DEVICE_PATH_CPUS under the DEVICE_PATH_CPU_CLUSTER cluster.
|
||||
* type DEVICE_PATH_CPUS. Start up is controlled by cntrl_ops.
|
||||
*/
|
||||
void arch_initialize_cpus(device_t cluster, struct cpu_control_ops *cntrl_ops);
|
||||
|
||||
/*
|
||||
* Run cpu_action returning < 0 on error, 0 on success. There are synchronous
|
||||
* and asynchronous methods. Both cases ensure the action has been picked up
|
||||
* by the target cpu. The synchronous variants will wait for the action to
|
||||
* be completed before returning.
|
||||
*
|
||||
* Though the current implementation allows queuing actions on the main cpu,
|
||||
* the main cpu doesn't process its own queue.
|
||||
*/
|
||||
int arch_run_on_cpu(unsigned int cpu, struct cpu_action *action);
|
||||
int arch_run_on_all_cpus(struct cpu_action *action);
|
||||
int arch_run_on_all_cpus_but_self(struct cpu_action *action);
|
||||
int arch_run_on_cpu_async(unsigned int cpu, struct cpu_action *action);
|
||||
int arch_run_on_all_cpus_async(struct cpu_action *action);
|
||||
int arch_run_on_all_cpus_but_self_async(struct cpu_action *action);
|
||||
|
||||
/* Wait for actions to be perfomed. */
|
||||
void arch_cpu_wait_for_action(void);
|
||||
/* Initialize CPU0 under the DEVICE_PATH_CPU_CLUSTER cluster. */
|
||||
void arch_initialize_cpu(device_t cluster);
|
||||
|
||||
#endif /* !__PRE_RAM__ */
|
||||
|
||||
/*
|
||||
* Returns logical cpu in range [0:MAX_CPUS). SoC should define this.
|
||||
* Additionally, this is needed early in arm64 init so it should not
|
||||
* rely on a stack. Standard clobber list is fair game: x0-x7 and x0
|
||||
* returns the logical cpu number.
|
||||
*/
|
||||
unsigned int smp_processor_id(void);
|
||||
static inline unsigned int smp_processor_id(void) { return 0; }
|
||||
|
||||
/*
|
||||
* Stages and rmodules have 2 entry points: BSP and non-BSP. Provided
|
||||
* a pointer the correct non-BSP entry point will be returned. The
|
||||
* first instruction is for BSP and the 2nd is for non-BSP. Instructions
|
||||
* are all 32-bit on arm64.
|
||||
*/
|
||||
static inline void *secondary_entry_point(void *e)
|
||||
{
|
||||
uintptr_t nonbsp = (uintptr_t)e;
|
||||
|
||||
return (void *)(nonbsp + sizeof(uint32_t));
|
||||
}
|
||||
|
||||
/*
|
||||
* The arm64_cpu_startup() initializes a CPU's exception stack and regular
|
||||
* stack as well initializing the C environment for the processor. It
|
||||
* calls into the array of function pointers at symbol c_entry depending
|
||||
* on BSP state. Note that arm64_cpu_startup contains secondary entry
|
||||
* point which can be obtained by secondary_entry_point().
|
||||
* The arm64_cpu_startup() initializes CPU's exception stack and regular
|
||||
* stack as well initializing the C environment for the processor. Finally it
|
||||
* calls into c_entry.
|
||||
*/
|
||||
void arm64_cpu_startup(void);
|
||||
|
||||
/*
|
||||
* The arm64_cpu_startup_resume() initializes a CPU's exception stack and
|
||||
* regular stack as well initializing the C environment for the processor. It
|
||||
* calls into the array of function pointers at symbol c_entry depending
|
||||
* on BSP state. Note that arm64_cpu_startup contains secondary entry
|
||||
* point which can be obtained by secondary_entry_point().
|
||||
* Additionally, it also restores saved register data and enables MMU, caches
|
||||
* and exceptions before jumping to C environment for both BSP and non-BSP CPUs.
|
||||
*/
|
||||
void arm64_cpu_startup_resume(void);
|
||||
|
||||
/*
|
||||
* The arm64_arch_timer_init() initializes the per CPU's cntfrq register of
|
||||
* The arm64_arch_timer_init() initializes the CPU's cntfrq register of
|
||||
* ARM arch timer.
|
||||
*/
|
||||
void arm64_arch_timer_init(void);
|
||||
|
|
|
@ -1,29 +0,0 @@
|
|||
#ifndef ARCH_SMP_SPINLOCK_H
|
||||
#define ARCH_SMP_SPINLOCK_H
|
||||
|
||||
#include <arch/barrier.h>
|
||||
#include <stdint.h>
|
||||
|
||||
typedef struct {
|
||||
volatile uint32_t lock;
|
||||
} spinlock_t;
|
||||
|
||||
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
|
||||
#define DECLARE_SPIN_LOCK(x) static spinlock_t x = SPIN_LOCK_UNLOCKED;
|
||||
|
||||
static inline void spin_lock(spinlock_t *spin)
|
||||
{
|
||||
while (1) {
|
||||
if (load_acquire_exclusive(&spin->lock) != 0)
|
||||
continue;
|
||||
if (store_release_exclusive(&spin->lock, 1))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void spin_unlock(spinlock_t *spin)
|
||||
{
|
||||
store_release(&spin->lock, 0);
|
||||
}
|
||||
|
||||
#endif
|
|
@ -15,51 +15,41 @@
|
|||
|
||||
/*
|
||||
* ======================== stage_entry.S =====================================
|
||||
* This file acts as an entry point to the different stages of arm64 as well as
|
||||
* for the secure monitor. They share the same process of setting up stacks and
|
||||
* jumping to c code. It is important to save x25 from corruption as it contains
|
||||
* the argument for secure monitor.
|
||||
* This file acts as an entry point to the different stages of arm64. They share
|
||||
* the same process of setting up stacks and jumping to c code. It is important
|
||||
* to save x25 from corruption as it contains the argument for rmodule.
|
||||
* =============================================================================
|
||||
*/
|
||||
|
||||
#include <arch/asm.h>
|
||||
#define __ASSEMBLY__
|
||||
#include <arch/lib_helpers.h>
|
||||
#include <arch/startup.h>
|
||||
|
||||
#define STACK_SZ CONFIG_STACK_SIZE
|
||||
#define EXCEPTION_STACK_SZ CONFIG_STACK_SIZE
|
||||
|
||||
/*
|
||||
* The stacks for each of the armv8 cores grows down from _estack. It is sized
|
||||
* according to MAX_CPUS. Additionally provide exception stacks for each CPU.
|
||||
* Stack for armv8 CPU grows down from _estack. Additionally, provide exception
|
||||
* stack for the CPU.
|
||||
*/
|
||||
.section .bss, "aw", @nobits
|
||||
|
||||
.global _arm64_startup_data
|
||||
.balign 8
|
||||
_arm64_startup_data:
|
||||
.space NUM_ELEMENTS*PER_ELEMENT_SIZE_BYTES
|
||||
|
||||
.global _stack
|
||||
.global _estack
|
||||
.balign STACK_SZ
|
||||
_stack:
|
||||
.space CONFIG_MAX_CPUS*STACK_SZ
|
||||
.space STACK_SZ
|
||||
_estack:
|
||||
|
||||
.global _stack_exceptions
|
||||
.global _estack_exceptions
|
||||
.balign EXCEPTION_STACK_SZ
|
||||
_stack_exceptions:
|
||||
.space CONFIG_MAX_CPUS*EXCEPTION_STACK_SZ
|
||||
.space EXCEPTION_STACK_SZ
|
||||
_estack_exceptions:
|
||||
|
||||
ENTRY(cpu_get_stack)
|
||||
mov x1, #STACK_SZ
|
||||
mul x0, x0, x1
|
||||
ldr x1, 1f
|
||||
sub x0, x1, x0
|
||||
ldr x0, 1f
|
||||
ret
|
||||
.align 3
|
||||
1:
|
||||
|
@ -67,10 +57,7 @@ ENTRY(cpu_get_stack)
|
|||
ENDPROC(cpu_get_stack)
|
||||
|
||||
ENTRY(cpu_get_exception_stack)
|
||||
mov x1, #EXCEPTION_STACK_SZ
|
||||
mul x0, x0, x1
|
||||
ldr x1, 1f
|
||||
sub x0, x1, x0
|
||||
ldr x0, 1f
|
||||
ret
|
||||
.align 3
|
||||
1:
|
||||
|
@ -87,11 +74,7 @@ ENDPROC(cpu_get_exception_stack)
|
|||
* any rmodules.
|
||||
*/
|
||||
ENTRY(arm64_c_environment)
|
||||
bl smp_processor_id /* x0 = cpu */
|
||||
mov x24, x0
|
||||
|
||||
|
||||
/* Set the exception stack for this cpu. */
|
||||
/* Set the exception stack for the cpu. */
|
||||
bl cpu_get_exception_stack
|
||||
msr SPSel, #1
|
||||
isb
|
||||
|
@ -101,16 +84,12 @@ ENTRY(arm64_c_environment)
|
|||
msr SPSel, #0
|
||||
isb
|
||||
|
||||
/* Set stack for this cpu. */
|
||||
mov x0, x24 /* x0 = cpu */
|
||||
/* Set the non-exception stack for the cpu. */
|
||||
bl cpu_get_stack
|
||||
mov sp, x0
|
||||
|
||||
/* Get entry point by dereferencing c_entry. */
|
||||
ldr x1, 1f
|
||||
/* Retrieve entry in c_entry array using x26 as the index. */
|
||||
adds x1, x1, x26, lsl #3
|
||||
ldr x1, [x1]
|
||||
/* Move back the arguments from x25 to x0 */
|
||||
mov x0, x25
|
||||
br x1
|
||||
|
@ -119,21 +98,7 @@ ENTRY(arm64_c_environment)
|
|||
.quad c_entry
|
||||
ENDPROC(arm64_c_environment)
|
||||
|
||||
/* The first 2 instructions are for BSP and secondary CPUs,
|
||||
* respectively. x26 holds the index into c_entry array. */
|
||||
.macro split_bsp_path
|
||||
b 2000f
|
||||
b 2001f
|
||||
2000:
|
||||
mov x26, #0
|
||||
b 2002f
|
||||
2001:
|
||||
mov x26, #1
|
||||
2002:
|
||||
.endm
|
||||
|
||||
ENTRY(_start)
|
||||
split_bsp_path
|
||||
/* Save any arguments to current rmodule in x25 */
|
||||
mov x25, x0
|
||||
b arm64_c_environment
|
||||
|
@ -153,77 +118,12 @@ ENDPROC(_start)
|
|||
write_current sctlr, x0, x1
|
||||
.endm
|
||||
|
||||
/*
|
||||
* This macro assumes x2 has base address and returns value read in x0
|
||||
* x1 is used as temporary register.
|
||||
*/
|
||||
.macro get_element_addr index
|
||||
add x1, x2, #(\index * PER_ELEMENT_SIZE_BYTES)
|
||||
ldr x0, [x1]
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Uses following registers:
|
||||
* x0 = reading stored value
|
||||
* x1 = temp reg
|
||||
* x2 = base address of saved data region
|
||||
*/
|
||||
.macro startup_restore
|
||||
adr x2, _arm64_startup_data
|
||||
|
||||
get_element_addr MAIR_INDEX
|
||||
write_current mair, x0, x1
|
||||
|
||||
get_element_addr TCR_INDEX
|
||||
write_current tcr, x0, x1
|
||||
|
||||
get_element_addr TTBR0_INDEX
|
||||
write_current ttbr0, x0, x1
|
||||
|
||||
get_element_addr SCR_INDEX
|
||||
write_el3 scr, x0, x1
|
||||
|
||||
get_element_addr VBAR_INDEX
|
||||
write_current vbar, x0, x1
|
||||
|
||||
get_element_addr CNTFRQ_INDEX
|
||||
write_el0 cntfrq, x0, x1
|
||||
|
||||
get_element_addr CPTR_INDEX
|
||||
write_el3 cptr, x0, x1
|
||||
|
||||
get_element_addr CPACR_INDEX
|
||||
write_el1 cpacr, x0, x1
|
||||
|
||||
dsb sy
|
||||
isb
|
||||
|
||||
tlbiall_current x1
|
||||
read_current x0, sctlr
|
||||
orr x0, x0, #(1 << 12) /* Enable Instruction Cache */
|
||||
orr x0, x0, #(1 << 2) /* Enable Data/Unified Cache */
|
||||
orr x0, x0, #(1 << 0) /* Enable MMU */
|
||||
write_current sctlr, x0, x1
|
||||
|
||||
dsb sy
|
||||
isb
|
||||
.endm
|
||||
|
||||
CPU_RESET_ENTRY(arm64_cpu_startup)
|
||||
split_bsp_path
|
||||
bl arm64_cpu_early_setup
|
||||
setup_sctlr
|
||||
b arm64_c_environment
|
||||
ENDPROC(arm64_cpu_startup)
|
||||
|
||||
CPU_RESET_ENTRY(arm64_cpu_startup_resume)
|
||||
split_bsp_path
|
||||
bl arm64_cpu_early_setup
|
||||
setup_sctlr
|
||||
startup_restore
|
||||
b arm64_c_environment
|
||||
ENDPROC(arm64_cpu_startup_resume)
|
||||
|
||||
/*
|
||||
* stage_entry is defined as a weak symbol to allow SoCs/CPUs to define a custom
|
||||
* entry point to perform any fixups that need to be done immediately after
|
||||
|
|
|
@ -1,53 +0,0 @@
|
|||
/*
|
||||
* This file is part of the coreboot project.
|
||||
*
|
||||
* Copyright (C) 2014 Google Inc
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; version 2 of
|
||||
* the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <arch/cache.h>
|
||||
#include <arch/lib_helpers.h>
|
||||
#include <arch/startup.h>
|
||||
#include <console/console.h>
|
||||
|
||||
/* This space is defined in stage_entry.S. */
|
||||
extern u8 _arm64_startup_data[];
|
||||
|
||||
static inline void save_element(size_t index, uint64_t val)
|
||||
{
|
||||
uint64_t *ptr = (uint64_t *)_arm64_startup_data;
|
||||
|
||||
ptr[index] = val;
|
||||
}
|
||||
|
||||
/*
|
||||
* startup_save_cpu_data is used to save register values that need to be setup
|
||||
* when a CPU starts booting. This is used by secondary CPUs as well as resume
|
||||
* path to directly setup MMU and other related registers.
|
||||
*/
|
||||
void startup_save_cpu_data(void)
|
||||
{
|
||||
save_element(MAIR_INDEX, raw_read_mair_current());
|
||||
save_element(TCR_INDEX, raw_read_tcr_current());
|
||||
save_element(TTBR0_INDEX, raw_read_ttbr0_current());
|
||||
save_element(VBAR_INDEX, raw_read_vbar_current());
|
||||
save_element(CNTFRQ_INDEX, raw_read_cntfrq_el0());
|
||||
save_element(CPACR_INDEX, raw_read_cpacr_el1());
|
||||
|
||||
if (get_current_el() == EL3) {
|
||||
save_element(SCR_INDEX, raw_read_scr_el3());
|
||||
save_element(CPTR_INDEX, raw_read_cptr_el3());
|
||||
}
|
||||
|
||||
dcache_clean_by_mva(_arm64_startup_data,
|
||||
NUM_ELEMENTS * PER_ELEMENT_SIZE_BYTES);
|
||||
}
|
|
@ -16,7 +16,6 @@
|
|||
chip soc/nvidia/tegra132
|
||||
device cpu_cluster 0 on
|
||||
device cpu 0 on end
|
||||
device cpu 1 on end
|
||||
end
|
||||
|
||||
register "display_controller" = "TEGRA_ARM_DISPLAYA"
|
||||
|
|
|
@ -12,7 +12,6 @@ config SOC_NVIDIA_TEGRA132
|
|||
select HAVE_HARD_RESET
|
||||
select HAVE_UART_SPECIAL
|
||||
select ARM_BOOTBLOCK_CUSTOM
|
||||
select SMP
|
||||
select GENERIC_GPIO_LIB
|
||||
select HAS_PRECBMEM_TIMESTAMP_REGION
|
||||
|
||||
|
|
|
@ -61,7 +61,6 @@ ramstage-y += 32bit_reset.S
|
|||
ramstage-y += addressmap.c
|
||||
ramstage-y += cbmem.c
|
||||
ramstage-y += cpu.c
|
||||
ramstage-y += cpu_lib.S
|
||||
ramstage-y += clock.c
|
||||
ramstage-$(CONFIG_MAINBOARD_DO_NATIVE_VGA_INIT) += dc.c
|
||||
ramstage-$(CONFIG_MAINBOARD_DO_DSI_INIT) += dsi.c
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
/*
|
||||
* This file is part of the coreboot project.
|
||||
*
|
||||
* Copyright 2014 Google Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
|
||||
.text
|
||||
.global smp_processor_id
|
||||
smp_processor_id:
|
||||
/* Core 0 and 1 are encoded in the Aff0 (7:0) field of MPIDR_EL1. */
|
||||
mrs x0, mpidr_el1
|
||||
uxtb w0, w0
|
||||
ret
|
|
@ -55,25 +55,6 @@ static void soc_read_resources(device_t dev)
|
|||
ram_resource(dev, index++, begin * KiB, size * KiB);
|
||||
}
|
||||
|
||||
static size_t cntrl_total_cpus(void)
|
||||
{
|
||||
return CONFIG_MAX_CPUS;
|
||||
}
|
||||
|
||||
static int cntrl_start_cpu(unsigned int id, void (*entry)(void))
|
||||
{
|
||||
if (id != 1)
|
||||
return -1;
|
||||
start_cpu(1, entry);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpu_control_ops cntrl_ops = {
|
||||
.total_cpus = cntrl_total_cpus,
|
||||
.start_cpu = cntrl_start_cpu,
|
||||
};
|
||||
|
||||
|
||||
static void lock_down_vpr(void)
|
||||
{
|
||||
struct tegra_mc_regs *regs = (void *)(uintptr_t)TEGRA_MC_BASE;
|
||||
|
@ -87,7 +68,7 @@ static void soc_init(device_t dev)
|
|||
{
|
||||
clock_init_arm_generic_timer();
|
||||
|
||||
arch_initialize_cpus(dev, &cntrl_ops);
|
||||
arch_initialize_cpu(dev);
|
||||
|
||||
/* Lock down VPR */
|
||||
lock_down_vpr();
|
||||
|
|
|
@ -64,7 +64,6 @@ romstage-$(CONFIG_DRIVERS_UART) += uart.c
|
|||
ramstage-y += addressmap.c
|
||||
ramstage-y += cbmem.c
|
||||
ramstage-y += cpu.c
|
||||
ramstage-y += cpu_lib.S
|
||||
ramstage-y += clock.c
|
||||
ramstage-$(CONFIG_MAINBOARD_DO_NATIVE_VGA_INIT) += dc.c
|
||||
ramstage-$(CONFIG_MAINBOARD_DO_DSI_INIT) += dsi.c
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
/*
|
||||
* This file is part of the coreboot project.
|
||||
*
|
||||
* Copyright 2014 Google Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
|
||||
.text
|
||||
.global smp_processor_id
|
||||
smp_processor_id:
|
||||
/* Core 0 and 1 are encoded in the Aff0 (7:0) field of MPIDR_EL1. */
|
||||
mrs x0, mpidr_el1
|
||||
uxtb w0, w0
|
||||
ret
|
|
@ -58,30 +58,11 @@ static void soc_read_resources(device_t dev)
|
|||
ram_resource(dev, index++, begin * KiB, size * KiB);
|
||||
}
|
||||
|
||||
static size_t cntrl_total_cpus(void)
|
||||
{
|
||||
return CONFIG_MAX_CPUS;
|
||||
}
|
||||
|
||||
static int cntrl_start_cpu(unsigned int id, void (*entry)(void))
|
||||
{
|
||||
if (id >= CONFIG_MAX_CPUS)
|
||||
return -1;
|
||||
start_cpu(id, entry);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpu_control_ops cntrl_ops = {
|
||||
.total_cpus = cntrl_total_cpus,
|
||||
.start_cpu = cntrl_start_cpu,
|
||||
};
|
||||
|
||||
|
||||
static void soc_init(device_t dev)
|
||||
{
|
||||
clock_init_arm_generic_timer();
|
||||
|
||||
arch_initialize_cpus(dev, &cntrl_ops);
|
||||
arch_initialize_cpu(dev);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_MAINBOARD_DO_NATIVE_VGA_INIT))
|
||||
return;
|
||||
|
@ -132,9 +113,8 @@ struct chip_operations soc_nvidia_tegra210_ops = {
|
|||
|
||||
static void tegra210_cpu_init(device_t cpu)
|
||||
{
|
||||
if (cpu_is_bsp())
|
||||
if (tegra210_run_mtc() != 0)
|
||||
printk(BIOS_ERR, "MTC: No training data.\n");
|
||||
if (tegra210_run_mtc() != 0)
|
||||
printk(BIOS_ERR, "MTC: No training data.\n");
|
||||
}
|
||||
|
||||
static const struct cpu_device_id ids[] = {
|
||||
|
|
Loading…
Reference in New Issue