arm64: split cpu.c

The cpu.c contains some helpful construts as well as ramstage
devicetree handling. Split the 2 pieces so that cpu.c can be
reused in secmon.

BUG=chrome-os-partner:30785
BRANCH=None
TEST=Built and booted.

Change-Id: Iec0f8462411897a255f7aa289191ce6761e08bb0
Signed-off-by: Patrick Georgi <pgeorgi@chromium.org>
Original-Commit-Id: 4f30f1186950424b65df6858965a09ca51637e4f
Original-Change-Id: Ie87bd35bf1ccd777331250dcdaae07dab82d3d18
Original-Signed-off-by: Aaron Durbin <adurbin@chromium.org>
Original-Reviewed-on: https://chromium-review.googlesource.com/218842
Original-Reviewed-by: Furquan Shaikh <furquan@chromium.org>
Reviewed-on: http://review.coreboot.org/9089
Tested-by: build bot (Jenkins)
Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
This commit is contained in:
Aaron Durbin 2014-09-18 11:52:16 -05:00 committed by Patrick Georgi
parent 4f89d97c06
commit b30c9b1c9a
4 changed files with 256 additions and 220 deletions

View File

@ -116,6 +116,7 @@ ramstage-y += c_entry.c
ramstage-y += stages.c ramstage-y += stages.c
ramstage-y += div0.c ramstage-y += div0.c
ramstage-y += cpu.c ramstage-y += cpu.c
ramstage-y += cpu_ramstage.c
ramstage-y += eabi_compat.c ramstage-y += eabi_compat.c
ramstage-y += boot.c ramstage-y += boot.c
ramstage-y += tables.c ramstage-y += tables.c

View File

@ -19,163 +19,19 @@
#include <stdint.h> #include <stdint.h>
#include <stdlib.h> #include <stdlib.h>
#include <arch/barrier.h>
#include <arch/lib_helpers.h> #include <arch/lib_helpers.h>
#include <cpu/cpu.h> #include <cpu/cpu.h>
#include <console/console.h> #include <console/console.h>
#include <gic.h>
#include "cpu-internal.h" #include "cpu-internal.h"
static struct cpu_info cpu_infos[CONFIG_MAX_CPUS]; struct cpu_info cpu_infos[CONFIG_MAX_CPUS];
struct cpu_info *bsp_cpu_info; struct cpu_info *bsp_cpu_info;
static inline struct cpu_info *cpu_info_for_cpu(unsigned int id)
{
return &cpu_infos[id];
}
struct cpu_info *cpu_info(void) struct cpu_info *cpu_info(void)
{ {
return cpu_info_for_cpu(smp_processor_id()); return cpu_info_for_cpu(smp_processor_id());
} }
static int cpu_online(struct cpu_info *ci)
{
return load_acquire(&ci->online) != 0;
}
static void cpu_mark_online(struct cpu_info *ci)
{
store_release(&ci->online, 1);
}
static inline void cpu_disable_dev(device_t dev)
{
dev->enabled = 0;
}
static struct cpu_driver *locate_cpu_driver(uint32_t midr)
{
struct cpu_driver *cur;
for (cur = cpu_drivers; cur != ecpu_drivers; cur++) {
const struct cpu_device_id *id_table = cur->id_table;
for (; id_table->midr != CPU_ID_END; id_table++) {
if (id_table->midr == midr)
return cur;
}
}
return NULL;
}
static int cpu_set_device_operations(device_t dev)
{
uint32_t midr;
struct cpu_driver *driver;
midr = raw_read_midr_el1();
driver = locate_cpu_driver(midr);
if (driver == NULL) {
printk(BIOS_WARNING, "No CPU driver for MIDR %08x\n", midr);
return -1;
}
dev->ops = driver->ops;
return 0;
}
/* Set up default SCR values. */
static void el3_init(void)
{
uint32_t scr;
if (get_current_el() != EL3)
return;
scr = raw_read_scr_el3();
/* Default to non-secure EL1 and EL0. */
scr &= ~(SCR_NS_MASK);
scr |= SCR_NS_ENABLE;
/* Disable IRQ, FIQ, and external abort interrupt routing. */
scr &= ~(SCR_IRQ_MASK | SCR_FIQ_MASK | SCR_EA_MASK);
scr |= SCR_IRQ_DISABLE | SCR_FIQ_DISABLE | SCR_EA_DISABLE;
/* Enable HVC */
scr &= ~(SCR_HVC_MASK);
scr |= SCR_HVC_ENABLE;
/* Disable SMC */
scr &= ~(SCR_SMC_MASK);
scr |= SCR_SMC_DISABLE;
/* Disable secure instruction fetches. */
scr &= ~(SCR_SIF_MASK);
scr |= SCR_SIF_DISABLE;
/* All lower exception levels 64-bit by default. */
scr &= ~(SCR_RW_MASK);
scr |= SCR_LOWER_AARCH64;
/* Disable secure EL1 access to secure timer. */
scr &= ~(SCR_ST_MASK);
scr |= SCR_ST_DISABLE;
/* Don't trap on WFE or WFI instructions. */
scr &= ~(SCR_TWI_MASK | SCR_TWE_MASK);
scr |= SCR_TWI_DISABLE | SCR_TWE_DISABLE;
raw_write_scr_el3(scr);
isb();
}
static void init_this_cpu(void *arg)
{
struct cpu_info *ci = arg;
device_t dev = ci->cpu;
cpu_set_device_operations(dev);
el3_init();
/* Initialize the GIC. */
gic_init();
if (dev->ops != NULL && dev->ops->init != NULL) {
dev->initialized = 1;
printk(BIOS_DEBUG, "%s init\n", dev_path(dev));
dev->ops->init(dev);
}
}
/* Fill in cpu_info structures according to device tree. */
static void init_cpu_info(struct bus *bus)
{
device_t cur;
for (cur = bus->children; cur != NULL; cur = cur->sibling) {
struct cpu_info *ci;
unsigned int id = cur->path.cpu.id;
if (cur->path.type != DEVICE_PATH_CPU)
continue;
/* IDs are currently mapped 1:1 with logical CPU numbers. */
if (id >= CONFIG_MAX_CPUS) {
printk(BIOS_WARNING,
"CPU id %x too large. Disabling.\n", id);
cpu_disable_dev(cur);
continue;
}
ci = cpu_info_for_cpu(id);
if (ci->cpu != NULL) {
printk(BIOS_WARNING,
"Duplicate ID %x in device tree.\n", id);
cpu_disable_dev(cur);
}
ci->cpu = cur;
ci->id = cur->path.cpu.id;
}
/* Mark current cpu online. */
cpu_mark_online(cpu_info());
}
static inline int action_queue_empty(struct cpu_action_queue *q) static inline int action_queue_empty(struct cpu_action_queue *q)
{ {
return load_acquire_exclusive(&q->todo) == NULL; return load_acquire_exclusive(&q->todo) == NULL;
@ -246,8 +102,8 @@ static void action_run_on_cpu(struct cpu_info *ci, struct cpu_action *action,
{ {
struct cpu_action_queue *q = &ci->action_queue; struct cpu_action_queue *q = &ci->action_queue;
/* Don't run actions on non-online or enabled devices. */ /* Don't run actions on non-online cpus. */
if (!cpu_online(ci) || ci->cpu == NULL || !ci->cpu->enabled) if (!cpu_online(ci))
return; return;
if (ci->id == smp_processor_id()) { if (ci->id == smp_processor_id()) {
@ -333,14 +189,12 @@ int arch_run_on_all_cpus_but_self_async(struct cpu_action *action)
return __arch_run_on_all_cpus_but_self(action, 0); return __arch_run_on_all_cpus_but_self(action, 0);
} }
void arch_secondary_cpu_init(void)
void arch_cpu_wait_for_action(void)
{ {
struct cpu_info *ci = cpu_info(); struct cpu_info *ci = cpu_info();
struct cpu_action_queue *q = &ci->action_queue; struct cpu_action_queue *q = &ci->action_queue;
/* Mark this CPU online. */
cpu_mark_online(ci);
while (1) { while (1) {
struct cpu_action *orig; struct cpu_action *orig;
struct cpu_action action; struct cpu_action action;
@ -351,72 +205,3 @@ void arch_secondary_cpu_init(void)
action_queue_complete(q, orig); action_queue_complete(q, orig);
} }
} }
void arch_initialize_cpus(device_t cluster, struct cpu_control_ops *cntrl_ops)
{
size_t max_cpus;
size_t i;
struct cpu_info *ci;
void (*entry)(void);
struct bus *bus;
if (cluster->path.type != DEVICE_PATH_CPU_CLUSTER) {
printk(BIOS_ERR,
"CPU init failed. Device is not a CPU_CLUSTER: %s\n",
dev_path(cluster));
return;
}
bus = cluster->link_list;
/* Check if no children under this device. */
if (bus == NULL)
return;
entry = prepare_secondary_cpu_startup();
/* Initialize the cpu_info structures. */
init_cpu_info(bus);
max_cpus = cntrl_ops->total_cpus();
if (max_cpus > CONFIG_MAX_CPUS) {
printk(BIOS_WARNING,
"max_cpus (%zu) exceeds CONFIG_MAX_CPUS (%zu).\n",
max_cpus, (size_t)CONFIG_MAX_CPUS);
max_cpus = CONFIG_MAX_CPUS;
}
for (i = 0; i < max_cpus; i++) {
device_t dev;
struct cpu_action action;
ci = cpu_info_for_cpu(i);
dev = ci->cpu;
/* Disregard CPUs not in device tree. */
if (dev == NULL)
continue;
/* Skip disabled CPUs. */
if (!dev->enabled)
continue;
if (!cpu_online(ci)) {
/* Start the CPU. */
printk(BIOS_DEBUG, "Starting CPU%x\n", ci->id);
if (cntrl_ops->start_cpu(ci->id, entry)) {
printk(BIOS_ERR,
"Failed to start CPU%x\n", ci->id);
continue;
}
/* Wait for CPU to come online. */
while (!cpu_online(ci));
printk(BIOS_DEBUG, "CPU%x online.\n", ci->id);
}
/* Send it the init action. */
action.run = init_this_cpu;
action.arg = ci;
action_run_on_cpu(ci, &action, 1);
}
}

View File

@ -0,0 +1,230 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2014 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA, 02110-1301 USA
*/
#include <stdint.h>
#include <stdlib.h>
#include <arch/lib_helpers.h>
#include <cpu/cpu.h>
#include <console/console.h>
#include <gic.h>
#include "cpu-internal.h"
static inline void cpu_disable_dev(device_t dev)
{
dev->enabled = 0;
}
static struct cpu_driver *locate_cpu_driver(uint32_t midr)
{
struct cpu_driver *cur;
for (cur = cpu_drivers; cur != ecpu_drivers; cur++) {
const struct cpu_device_id *id_table = cur->id_table;
for (; id_table->midr != CPU_ID_END; id_table++) {
if (id_table->midr == midr)
return cur;
}
}
return NULL;
}
static int cpu_set_device_operations(device_t dev)
{
uint32_t midr;
struct cpu_driver *driver;
midr = raw_read_midr_el1();
driver = locate_cpu_driver(midr);
if (driver == NULL) {
printk(BIOS_WARNING, "No CPU driver for MIDR %08x\n", midr);
return -1;
}
dev->ops = driver->ops;
return 0;
}
/* Set up default SCR values. */
static void el3_init(void)
{
uint32_t scr;
if (get_current_el() != EL3)
return;
scr = raw_read_scr_el3();
/* Default to non-secure EL1 and EL0. */
scr &= ~(SCR_NS_MASK);
scr |= SCR_NS_ENABLE;
/* Disable IRQ, FIQ, and external abort interrupt routing. */
scr &= ~(SCR_IRQ_MASK | SCR_FIQ_MASK | SCR_EA_MASK);
scr |= SCR_IRQ_DISABLE | SCR_FIQ_DISABLE | SCR_EA_DISABLE;
/* Enable HVC */
scr &= ~(SCR_HVC_MASK);
scr |= SCR_HVC_ENABLE;
/* Disable SMC */
scr &= ~(SCR_SMC_MASK);
scr |= SCR_SMC_DISABLE;
/* Disable secure instruction fetches. */
scr &= ~(SCR_SIF_MASK);
scr |= SCR_SIF_DISABLE;
/* All lower exception levels 64-bit by default. */
scr &= ~(SCR_RW_MASK);
scr |= SCR_LOWER_AARCH64;
/* Disable secure EL1 access to secure timer. */
scr &= ~(SCR_ST_MASK);
scr |= SCR_ST_DISABLE;
/* Don't trap on WFE or WFI instructions. */
scr &= ~(SCR_TWI_MASK | SCR_TWE_MASK);
scr |= SCR_TWI_DISABLE | SCR_TWE_DISABLE;
raw_write_scr_el3(scr);
isb();
}
static void init_this_cpu(void *arg)
{
struct cpu_info *ci = arg;
device_t dev = ci->cpu;
cpu_set_device_operations(dev);
el3_init();
/* Initialize the GIC. */
gic_init();
if (dev->ops != NULL && dev->ops->init != NULL) {
dev->initialized = 1;
printk(BIOS_DEBUG, "%s init\n", dev_path(dev));
dev->ops->init(dev);
}
}
/* Fill in cpu_info structures according to device tree. */
static void init_cpu_info(struct bus *bus)
{
device_t cur;
for (cur = bus->children; cur != NULL; cur = cur->sibling) {
struct cpu_info *ci;
unsigned int id = cur->path.cpu.id;
if (cur->path.type != DEVICE_PATH_CPU)
continue;
/* IDs are currently mapped 1:1 with logical CPU numbers. */
if (id >= CONFIG_MAX_CPUS) {
printk(BIOS_WARNING,
"CPU id %x too large. Disabling.\n", id);
cpu_disable_dev(cur);
continue;
}
ci = cpu_info_for_cpu(id);
if (ci->cpu != NULL) {
printk(BIOS_WARNING,
"Duplicate ID %x in device tree.\n", id);
cpu_disable_dev(cur);
}
ci->cpu = cur;
ci->id = cur->path.cpu.id;
}
/* Mark current cpu online. */
cpu_mark_online(cpu_info());
}
void arch_initialize_cpus(device_t cluster, struct cpu_control_ops *cntrl_ops)
{
size_t max_cpus;
size_t i;
struct cpu_info *ci;
void (*entry)(void);
struct bus *bus;
if (cluster->path.type != DEVICE_PATH_CPU_CLUSTER) {
printk(BIOS_ERR,
"CPU init failed. Device is not a CPU_CLUSTER: %s\n",
dev_path(cluster));
return;
}
bus = cluster->link_list;
/* Check if no children under this device. */
if (bus == NULL)
return;
entry = prepare_secondary_cpu_startup();
/* Initialize the cpu_info structures. */
init_cpu_info(bus);
max_cpus = cntrl_ops->total_cpus();
if (max_cpus > CONFIG_MAX_CPUS) {
printk(BIOS_WARNING,
"max_cpus (%zu) exceeds CONFIG_MAX_CPUS (%zu).\n",
max_cpus, (size_t)CONFIG_MAX_CPUS);
max_cpus = CONFIG_MAX_CPUS;
}
for (i = 0; i < max_cpus; i++) {
device_t dev;
struct cpu_action action;
ci = cpu_info_for_cpu(i);
dev = ci->cpu;
/* Disregard CPUs not in device tree. */
if (dev == NULL)
continue;
/* Skip disabled CPUs. */
if (!dev->enabled)
continue;
if (!cpu_online(ci)) {
/* Start the CPU. */
printk(BIOS_DEBUG, "Starting CPU%x\n", ci->id);
if (cntrl_ops->start_cpu(ci->id, entry)) {
printk(BIOS_ERR,
"Failed to start CPU%x\n", ci->id);
continue;
}
/* Wait for CPU to come online. */
while (!cpu_online(ci));
printk(BIOS_DEBUG, "CPU%x online.\n", ci->id);
}
/* Send it the init action. */
action.run = init_this_cpu;
action.arg = ci;
arch_run_on_cpu(ci->id, &action);
}
}
void arch_secondary_cpu_init(void)
{
/* Mark this CPU online. */
cpu_mark_online(cpu_info());
arch_cpu_wait_for_action();
}

View File

@ -23,6 +23,7 @@
#define asmlinkage #define asmlinkage
#if !defined(__PRE_RAM__) #if !defined(__PRE_RAM__)
#include <arch/barrier.h>
#include <device/device.h> #include <device/device.h>
enum { enum {
@ -67,6 +68,12 @@ struct cpu_info {
struct cpu_info *cpu_info(void); struct cpu_info *cpu_info(void);
extern struct cpu_info *bsp_cpu_info; extern struct cpu_info *bsp_cpu_info;
extern struct cpu_info cpu_infos[CONFIG_MAX_CPUS];
static inline struct cpu_info *cpu_info_for_cpu(unsigned int id)
{
return &cpu_infos[id];
}
/* Ran only by BSP at initial boot strapping. */ /* Ran only by BSP at initial boot strapping. */
static inline void cpu_set_bsp(void) static inline void cpu_set_bsp(void)
@ -79,6 +86,16 @@ static inline int cpu_is_bsp(void)
return cpu_info() == bsp_cpu_info; return cpu_info() == bsp_cpu_info;
} }
static inline int cpu_online(struct cpu_info *ci)
{
return load_acquire(&ci->online) != 0;
}
static inline void cpu_mark_online(struct cpu_info *ci)
{
store_release(&ci->online, 1);
}
/* Control routines for starting CPUs. */ /* Control routines for starting CPUs. */
struct cpu_control_ops { struct cpu_control_ops {
/* Return the maximum number of CPUs supported. */ /* Return the maximum number of CPUs supported. */
@ -112,6 +129,9 @@ int arch_run_on_cpu_async(unsigned int cpu, struct cpu_action *action);
int arch_run_on_all_cpus_async(struct cpu_action *action); int arch_run_on_all_cpus_async(struct cpu_action *action);
int arch_run_on_all_cpus_but_self_async(struct cpu_action *action); int arch_run_on_all_cpus_but_self_async(struct cpu_action *action);
/* Wait for actions to be perfomed. */
void arch_cpu_wait_for_action(void);
#endif /* !__PRE_RAM__ */ #endif /* !__PRE_RAM__ */
/* /*