arm64: remove secmon

It's been decided to only support ARM Trusted Firmware for
any EL3 monitor. That means any SoC that requires PSCI
needs to add its support for ATF otherwise multi-processor
bring up won't work.

Change-Id: Ic931dbf5eff8765f4964374910123a197148f0ff
Signed-off-by: Aaron Durbin <adurbin@chromium.org>
Reviewed-on: http://review.coreboot.org/11897
Tested-by: build bot (Jenkins)
Reviewed-by: Julius Werner <jwerner@chromium.org>
This commit is contained in:
Aaron Durbin 2015-10-14 10:08:10 -05:00 committed by Julius Werner
parent a62191b827
commit 8c8e2b7e4c
34 changed files with 6 additions and 2206 deletions

View File

@ -63,7 +63,7 @@ subdirs-y += site-local
#######################################################################
# Add source classes and their build options
classes-y := ramstage romstage bootblock smm smmstub cpu_microcode libverstage verstage secmon
classes-y := ramstage romstage bootblock smm smmstub cpu_microcode libverstage verstage
# Add dynamic classes for rmodules
$(foreach supported_arch,$(ARCH_SUPPORTED), \

View File

@ -28,13 +28,6 @@ config ARM64_BOOTBLOCK_CUSTOM
bool
default n
config ARM64_USE_SECURE_MONITOR
bool
default n
select RELOCATABLE_MODULES
depends on ARCH_RAMSTAGE_ARM64
depends on !ARM64_USE_ARM_TRUSTED_FIRMWARE
config ARM64_USE_SPINTABLE
bool
default n

View File

@ -151,14 +151,6 @@ rmodules_arm64-y += memcpy.S
rmodules_arm64-y += memmove.S
rmodules_arm64-y += eabi_compat.c
secmon-y += stage_entry.S
secmon-y += cpu-stubs.c
secmon-y += startup.c
secmon-y += ../../lib/malloc.c
secmon-y += memset.S
secmon-y += memmove.S
secmon-y += memcpy.S
ramstage-srcs += $(wildcard src/mainboard/$(MAINBOARDDIR)/mainboard.c)
# Build the ramstage

View File

@ -17,8 +17,6 @@
subdirs-y += lib/
subdirs-$(CONFIG_ARM64_USE_SECURE_MONITOR) += secmon/
armv8_flags = -march=armv8-a -I$(src)/arch/arm64/include/armv8/ -D__COREBOOT_ARM_ARCH__=8
armv8_asm_flags = $(armv8_flags)
@ -88,8 +86,6 @@ ramstage-y += cpu.S
ramstage-y += exception.c
ramstage-y += mmu.c
ramstage-$(CONFIG_ARM64_USE_SECURE_MONITOR) += secmon_loader.c
ramstage-c-ccopts += $(armv8_flags)
ramstage-S-ccopts += $(armv8_asm_flags)

View File

@ -32,8 +32,4 @@ endif
ifeq ($(CONFIG_ARCH_RAMSTAGE_ARMV8_64),y)
ramstage-y += $(lib_access)
ifeq ($(CONFIG_ARM64_USE_SECURE_MONITOR),y)
secmon-y += $(lib_access)
endif
endif

View File

@ -1,53 +0,0 @@
################################################################################
##
## This file is part of the coreboot project.
##
## Copyright (C) 2014 Google Inc.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; version 2 of the License.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
################################################################################
$(eval $(call create_class_compiler,secmon,arm64))
SECMON_DIR=$(obj)/arch/arm64/armv8/secmon
SECMON_BIN=$(SECMON_DIR)/secmon
SECMON_OBJ=$(SECMON_BIN).o
SECMON_ELF=$(SECMON_BIN).elf
SECMON_RMOD=$(SECMON_ELF).rmod
SECMON_RAMSTAGE=$(SECMON_DIR)/secmon.manual
secmon-generic-ccopts += -I$(src)/arch/arm64/include/armv8/ -include $(src)/include/kconfig.h -D__SECMON__
secmon-y += secmon_init.c
secmon-y += psci.c
secmon-y += smc.c
secmon-y += trampoline.S
secmon-y += ../cache.c
secmon-y += ../cache_helpers.S
secmon-y += ../cpu.S
secmon-y += ../exception.c
secmon-y += ../../cpu.c
secmon-y += ../../transition_asm.S ../../transition.c
secmon-y += ../../../../drivers/gic/gic.c
ramstage-srcs += $(SECMON_RAMSTAGE)
$(SECMON_OBJ): $$(secmon-objs)
$(CC_secmon) $(LDFLAGS) -nostdlib -r -o $@ $^
$(eval $(call rmodule_link,$(SECMON_ELF), $(SECMON_OBJ), 8192,arm64))
$(SECMON_BIN): $(SECMON_RMOD)
$(OBJCOPY_secmon) -O binary $< $@
$(SECMON_BIN).ramstage.manual: $(SECMON_BIN)
@printf " OBJCOPY $(subst $(obj)/,,$(@))\n"
cd $(dir $@); $(OBJCOPY_secmon) -I binary $(notdir $<) -O elf64-littleaarch64 -B aarch64 $(notdir $@)

View File

@ -1,705 +0,0 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2014 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <gic.h>
#include <string.h>
#include <stdlib.h>
#include <smp/spinlock.h>
#include <arch/cpu.h>
#include <arch/psci.h>
#include <arch/smc.h>
#include <arch/transition.h>
#include <arch/lib_helpers.h>
#include <console/console.h>
#include "secmon.h"
DECLARE_SPIN_LOCK(psci_spinlock);
/* Root of PSCI node tree. */
static struct psci_node psci_root;
/* Array of all the psci_nodes in system. */
static size_t psci_num_nodes;
static struct psci_node **psci_nodes;
static inline void psci_lock(void)
{
spin_lock(&psci_spinlock);
}
static inline void psci_unlock(void)
{
spin_unlock(&psci_spinlock);
}
static inline int psci_state_locked(const struct psci_node *e)
{
return e->state;
}
static inline void psci_set_state_locked(struct psci_node *e, int s)
{
e->state = s;
}
static struct psci_node *psci_node_lookup(uint64_t mpidr, int level)
{
size_t i;
/* The array of node pointers are in depth-first order of the tree. */
for (i = 0; i < psci_num_nodes; i++) {
struct psci_node *current = psci_nodes[i];
if (current->mpidr > mpidr)
break;
if (current->mpidr < mpidr)
continue;
if (current->level == level)
return current;
}
return NULL;
}
static inline struct psci_node *node_self(void)
{
return psci_node_lookup(cpu_info()->mpidr, PSCI_AFFINITY_LEVEL_0);
}
/* Find the ancestor of node affected by a state transition limited by level. */
static struct psci_node *psci_find_ancestor(struct psci_node *e, int level,
int state)
{
struct psci_node *p;
/* If all siblings of the node are already off then parent can be
* set to off as well. */
if (state == PSCI_STATE_OFF) {
while (1) {
size_t i;
struct psci_node *s;
if (psci_root_node(e))
return e;
p = psci_node_parent(e);
if (p->level > level)
return e;
for (i = 0; i < p->children.num; i++) {
s = &p->children.nodes[i];
/* Don't check target. */
if (s == e)
continue;
if (psci_state_locked(s) != PSCI_STATE_OFF)
return e;
}
e = p;
}
}
/* All ancestors in state OFF are affected. */
if (state == PSCI_STATE_ON_PENDING) {
while (1) {
/* At the root. Return last affected node. */
if (psci_root_node(e))
return e;
p = psci_node_parent(e);
if (p->level > level)
return e;
/* This parent is already ON. */
if (psci_state_locked(p) != PSCI_STATE_OFF)
return e;
e = p;
}
}
/* Default to returning node passed in. */
return e;
}
static void psci_set_hierarchy_state(struct psci_node *from,
struct psci_node *to,
int state)
{
struct psci_node *end;
end = psci_node_parent(to);
while (from != end) {
/* Raced with another CPU as state is already set. */
if (psci_state_locked(from) == state)
break;
psci_set_state_locked(from, state);
from = psci_node_parent(from);
}
}
static void psci_cpu_on_callback(void *arg)
{
struct exc_state state;
int target_el;
struct psci_node *e = arg;
psci_lock();
psci_set_hierarchy_state(e, e->cpu_state.ancestor, PSCI_STATE_ON);
psci_unlock();
/* Target EL is determined if HVC is enabled or not. */
target_el = (raw_read_scr_el3() & SCR_HVC_ENABLE) ? EL2 : EL1;
memset(&state, 0, sizeof(state));
state.elx.spsr = get_eret_el(target_el, SPSR_USE_H);
transition_with_entry(e->cpu_state.startup.run,
e->cpu_state.startup.arg, &state);
}
static void psci_cpu_on_prepare(struct psci_cmd *cmd,
const struct cpu_action *a)
{
struct psci_node *ancestor;
struct psci_node *e;
int state = PSCI_STATE_ON_PENDING;
e = cmd->target;
e->cpu_state.startup = *a;
ancestor = psci_find_ancestor(e, PSCI_AFFINITY_LEVEL_HIGHEST, state);
e->cpu_state.ancestor = ancestor;
cmd->ancestor = ancestor;
}
static int psci_schedule_cpu_on(struct psci_node *e)
{
struct cpu_info *ci;
struct cpu_action action = {
.run = &psci_cpu_on_callback,
.arg = e,
};
ci = e->cpu_state.ci;
if (ci == NULL || arch_run_on_cpu_async(ci->id, &action)) {
psci_set_hierarchy_state(e, e->cpu_state.ancestor,
PSCI_STATE_OFF);
return PSCI_RET_INTERNAL_FAILURE;
}
return PSCI_RET_SUCCESS;
}
static void psci_cpu_resume_prepare(struct psci_cmd *cmd,
const struct cpu_action *a)
{
struct psci_node *ancestor;
struct psci_node *e;
int state = PSCI_STATE_ON_PENDING;
e = cmd->target;
e->cpu_state.resume = *a;
ancestor = psci_find_ancestor(e, PSCI_AFFINITY_LEVEL_HIGHEST, state);
e->cpu_state.ancestor = ancestor;
cmd->ancestor = ancestor;
}
static void psci_schedule_cpu_resume(struct psci_node *e)
{
struct cpu_info *ci;
struct cpu_action *action;
if (e->cpu_state.resume.run == NULL)
return;
ci = e->cpu_state.ci;
action = &e->cpu_state.resume;
arch_run_on_cpu(ci->id, action);
}
void psci_turn_on_self(const struct cpu_action *action)
{
struct psci_node *e = node_self();
struct psci_cmd cmd = {
.type = PSCI_CMD_ON,
};
if (e == NULL) {
printk(BIOS_ERR, "Couldn't turn on self: mpidr %llx\n",
cpu_info()->mpidr);
return;
}
cmd.target = e;
psci_lock();
psci_cpu_on_prepare(&cmd, action);
psci_set_hierarchy_state(e, cmd.ancestor, PSCI_STATE_ON_PENDING);
psci_unlock();
psci_schedule_cpu_on(e);
}
void psci_cpu_entry(void)
{
gic_enable();
/*
* Just wait for an action to be performed.
*/
psci_schedule_cpu_resume(node_self());
secmon_wait_for_action();
}
static void psci_cpu_resume(void *arg)
{
uint64_t power_state = (uint64_t)arg;
struct psci_node *e;
struct psci_power_state state;
struct psci_cmd cmd = {
.type = PSCI_CMD_RESUME,
};
psci_power_state_unpack(power_state, &state);
psci_lock();
e = node_self();
/* clear the resume action after resume */
e->cpu_state.resume.run = NULL;
e->cpu_state.resume.arg = NULL;
cmd.target = e;
cmd.state = &state;
soc_psci_ops.cmd_prepare(&cmd);
psci_unlock();
soc_psci_ops.cmd_commit(&cmd);
psci_lock();
psci_set_hierarchy_state(e, e->cpu_state.ancestor, PSCI_STATE_ON);
psci_unlock();
psci_schedule_cpu_on(node_self());
}
static void psci_cpu_suspend(struct psci_func *pf)
{
uint64_t power_state;
uint64_t entry;
uint64_t context_id;
struct psci_node *e;
struct psci_power_state state;
struct cpu_action action;
struct cpu_action resume_action;
struct psci_cmd cmd = {
.type = PSCI_CMD_SUSPEND,
};
int ret;
power_state = psci64_arg(pf, PSCI_PARAM_0);
entry = psci64_arg(pf, PSCI_PARAM_1);
context_id = psci64_arg(pf, PSCI_PARAM_2);
psci_power_state_unpack(power_state, &state);
psci_lock();
e = node_self();
cmd.target = e;
cmd.state = &state;
action.run = (void *)entry;
action.arg = (void *)context_id;
resume_action.run = &psci_cpu_resume;
resume_action.arg = (void*)power_state;
psci_cpu_on_prepare(&cmd, &action);
psci_cpu_resume_prepare(&cmd, &resume_action);
ret = soc_psci_ops.cmd_prepare(&cmd);
if (ret == PSCI_RET_SUCCESS)
psci_set_hierarchy_state(e, cmd.ancestor, PSCI_STATE_OFF);
psci_unlock();
if (ret != PSCI_RET_SUCCESS)
return psci32_return(pf, ret);
gic_disable();
ret = soc_psci_ops.cmd_commit(&cmd);
/* PSCI_POWER_STATE_TYPE_STANDBY mode only */
psci_lock();
resume_action.run = NULL;
resume_action.arg = NULL;
psci_cpu_resume_prepare(&cmd, &resume_action);
psci_unlock();
if (ret != PSCI_RET_SUCCESS)
return psci32_return(pf, ret);
psci_lock();
psci_set_hierarchy_state(e, e->cpu_state.ancestor, PSCI_STATE_ON);
psci_unlock();
psci32_return(pf, PSCI_RET_SUCCESS);
}
static void psci_cpu_on(struct psci_func *pf)
{
uint64_t entry;
uint64_t target_mpidr;
uint64_t context_id;
int cpu_state;
int ret;
struct psci_node *e;
struct cpu_action action;
struct psci_cmd cmd = {
.type = PSCI_CMD_ON,
};
target_mpidr = psci64_arg(pf, PSCI_PARAM_0);
entry = psci64_arg(pf, PSCI_PARAM_1);
context_id = psci64_arg(pf, PSCI_PARAM_2);
e = psci_node_lookup(target_mpidr, PSCI_AFFINITY_LEVEL_0);
if (e == NULL) {
psci32_return(pf, PSCI_RET_INVALID_PARAMETERS);
return;
}
psci_lock();
cpu_state = psci_state_locked(e);
if (cpu_state == PSCI_STATE_ON_PENDING) {
psci32_return(pf, PSCI_RET_ON_PENDING);
psci_unlock();
return;
} else if (cpu_state == PSCI_STATE_ON) {
psci32_return(pf, PSCI_RET_ALREADY_ON);
psci_unlock();
return;
}
cmd.target = e;
action.run = (void *)entry;
action.arg = (void *)context_id;
psci_cpu_on_prepare(&cmd, &action);
ret = soc_psci_ops.cmd_prepare(&cmd);
if (ret == PSCI_RET_SUCCESS)
psci_set_hierarchy_state(e, cmd.ancestor,
PSCI_STATE_ON_PENDING);
psci_unlock();
if (ret != PSCI_RET_SUCCESS)
return psci32_return(pf, ret);
ret = soc_psci_ops.cmd_commit(&cmd);
if (ret != PSCI_RET_SUCCESS) {
psci_lock();
psci_set_hierarchy_state(e, cmd.ancestor, PSCI_STATE_OFF);
psci_unlock();
return psci32_return(pf, ret);
}
psci32_return(pf, psci_schedule_cpu_on(e));
}
static int psci_turn_off_node(struct psci_node *e, int level,
int state_id)
{
int ret;
struct psci_cmd cmd = {
.type = PSCI_CMD_OFF,
.state_id = state_id,
.target = e,
};
psci_lock();
cmd.ancestor = psci_find_ancestor(e, level, PSCI_STATE_OFF);
ret = soc_psci_ops.cmd_prepare(&cmd);
if (ret == PSCI_RET_SUCCESS)
psci_set_hierarchy_state(e, cmd.ancestor, PSCI_STATE_OFF);
psci_unlock();
if (ret != PSCI_RET_SUCCESS)
return ret;
gic_disable();
/* Should never return. */
ret = soc_psci_ops.cmd_commit(&cmd);
/* Adjust ret to be an error. */
if (ret == PSCI_RET_SUCCESS)
ret = PSCI_RET_INTERNAL_FAILURE;
/* Turn things back on. */
psci_lock();
psci_set_hierarchy_state(e, cmd.ancestor, PSCI_STATE_ON);
psci_unlock();
return ret;
}
int psci_turn_off_self(void)
{
struct psci_node *e = node_self();
if (e == NULL) {
printk(BIOS_ERR, "No PSCI node for MPIDR %llx.\n",
cpu_info()->mpidr);
return PSCI_RET_INTERNAL_FAILURE;
}
/* -1 state id indicates to SoC to make its own decision for
* internal state when powering off the node. */
return psci_turn_off_node(e, PSCI_AFFINITY_LEVEL_HIGHEST, -1);
}
static int psci_handler(struct smc_call *smc)
{
struct psci_func pf_storage;
struct psci_func *pf = &pf_storage;
psci_func_init(pf, smc);
switch (pf->id) {
case PSCI_CPU_SUSPEND64:
psci_cpu_suspend(pf);
break;
case PSCI_CPU_ON64:
psci_cpu_on(pf);
break;
case PSCI_CPU_OFF32:
psci32_return(pf, psci_turn_off_self());
break;
default:
psci32_return(pf, PSCI_RET_NOT_SUPPORTED);
break;
}
return 0;
}
static void psci_link_cpu_info(void *arg)
{
struct psci_node *e = node_self();
if (e == NULL) {
printk(BIOS_ERR, "No PSCI node for MPIDR %llx.\n",
cpu_info()->mpidr);
return;
}
e->cpu_state.ci = cpu_info();
}
static int psci_init_node(struct psci_node *e,
struct psci_node *parent,
int level, uint64_t mpidr)
{
size_t i;
uint64_t mpidr_inc;
struct psci_node_group *ng;
size_t num_children;
memset(e, 0, sizeof(*e));
e->mpidr = mpidr;
psci_set_state_locked(e, PSCI_STATE_OFF);
e->parent = parent;
e->level = level;
if (level == PSCI_AFFINITY_LEVEL_0)
return 0;
num_children = soc_psci_ops.children_at_level(level, mpidr);
if (num_children == 0)
return 0;
ng = &e->children;
ng->num = num_children;
ng->nodes = malloc(ng->num * sizeof(struct psci_node));
if (ng->nodes == NULL) {
printk(BIOS_DEBUG, "PSCI: Allocation failure at level %d\n",
level);
return -1;
}
/* Switch to next level below. */
level = psci_level_below(level);
mpidr_inc = mpidr_mask(!!(level == PSCI_AFFINITY_LEVEL_3),
!!(level == PSCI_AFFINITY_LEVEL_2),
!!(level == PSCI_AFFINITY_LEVEL_1),
!!(level == PSCI_AFFINITY_LEVEL_0));
for (i = 0; i < ng->num; i++) {
struct psci_node *c = &ng->nodes[i];
/* Recursively initialize the nodes. */
if (psci_init_node(c, e, level, mpidr))
return -1;
mpidr += mpidr_inc;
}
return 0;
}
static size_t psci_count_children(struct psci_node *e)
{
size_t i;
size_t count;
if (e->level == PSCI_AFFINITY_LEVEL_0)
return 0;
count = e->children.num;
for (i = 0; i < e->children.num; i++)
count += psci_count_children(&e->children.nodes[i]);
return count;
}
static size_t psci_write_nodes(struct psci_node *e, size_t index)
{
size_t i;
/*
* Recursively save node pointers in array. Node pointers are
* ordered in ascending mpidr and descending level within same mpidr.
* i.e. each node is saved in depth-first order of the tree.
*/
if (e->level != PSCI_AFFINITY_ROOT) {
psci_nodes[index] = e;
index++;
}
if (e->level == PSCI_AFFINITY_LEVEL_0)
return index;
for (i = 0; i < e->children.num; i++)
index = psci_write_nodes(&e->children.nodes[i], index);
return index;
}
static int psci_allocate_nodes(void)
{
int level;
size_t num_children;
uint64_t mpidr;
struct psci_node *e;
mpidr = 0;
level = PSCI_AFFINITY_ROOT;
/* Find where the root should start. */
while (psci_level_below(level) >= PSCI_AFFINITY_LEVEL_0) {
num_children = soc_psci_ops.children_at_level(level, mpidr);
if (num_children == 0) {
printk(BIOS_ERR, "PSCI: No children at level %d!\n",
level);
return -1;
}
/* The root starts where the affinity levels branch. */
if (num_children > 1)
break;
level = psci_level_below(level);
}
if (psci_init_node(&psci_root, NULL, level, mpidr)) {
printk(BIOS_ERR, "PSCI init node failure.\n");
return -1;
}
num_children = psci_count_children(&psci_root);
/* Count the root node if isn't a fake node. */
if (psci_root.level != PSCI_AFFINITY_ROOT)
num_children++;
psci_nodes = malloc(num_children * sizeof(void *));
psci_num_nodes = num_children;
if (psci_nodes == NULL) {
printk(BIOS_ERR, "PSCI node pointer array failure.\n");
return -1;
}
num_children = psci_write_nodes(&psci_root, 0);
if (num_children != psci_num_nodes) {
printk(BIOS_ERR, "Wrong nodes written: %zd vs %zd.\n",
num_children, psci_num_nodes);
return -1;
}
/*
* By default all nodes are set to PSCI_STATE_OFF. In order not
* to race with other CPUs turning themselves off set the BSPs
* affinity node to ON.
*/
e = node_self();
if (e == NULL) {
printk(BIOS_ERR, "No PSCI node for BSP.\n");
return -1;
}
psci_set_state_locked(e, PSCI_STATE_ON);
return 0;
}
void psci_init(uintptr_t cpu_on_entry)
{
struct cpu_action action = {
.run = &psci_link_cpu_info,
};
if (psci_allocate_nodes()) {
printk(BIOS_ERR, "PSCI support not enabled.\n");
return;
}
if (arch_run_on_all_cpus_async(&action))
printk(BIOS_ERR, "Error linking cpu_info to PSCI nodes.\n");
/* Register PSCI handlers. */
if (smc_register_range(PSCI_CPU_SUSPEND32, PSCI_CPU_ON32,
&psci_handler))
printk(BIOS_ERR, "Couldn't register PSCI handler.\n");
if (smc_register_range(PSCI_CPU_SUSPEND64, PSCI_CPU_ON64,
&psci_handler))
printk(BIOS_ERR, "Couldn't register PSCI handler.\n");
/* Inform SoC layer of CPU_ON entry point. */
psci_soc_init(cpu_on_entry);
}

View File

@ -1,29 +0,0 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2014 Google Inc
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2 of
* the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __SECMON_SECMON_H__
#define __SECMON_SECMON_H__
/*
* The secmon_trampoline() switches mode to EL3t, reinitializing both
* EL3t and EL3h stacks.
*/
void secmon_trampoline(void *entry, void *arg);
/* Wait for action to take place. */
void secmon_wait_for_action(void);
#endif /* __SECMON_SECMON_H__ */

View File

@ -1,132 +0,0 @@
/*
* This file is part of the coreboot project.
*
* Copyright (C) 2014 Google Inc
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2 of
* the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <arch/barrier.h>
#include <arch/cache.h>
#include <arch/io.h>
#include <arch/exception.h>
#include <arch/lib_helpers.h>
#include <arch/psci.h>
#include <arch/secmon.h>
#include <arch/smc.h>
#include <arch/startup.h>
#include <console/console.h>
#include <stddef.h>
#include "secmon.h"
static void secmon_init(struct secmon_params *params, int bsp);
static void secmon_init_bsp(void *arg)
{
secmon_init(arg, 1);
}
static void secmon_init_nonbsp(void *arg)
{
secmon_init(arg, 0);
}
/*
* This variable holds entry point for secmon init code. Once the stacks are
* setup by the stage_entry.S, it jumps to c_entry.
*/
void (*c_entry[2])(void *) = { &secmon_init_bsp, &secmon_init_nonbsp };
static void cpu_resume(void *unused)
{
psci_cpu_entry();
}
static void cpu_resume_init(void)
{
/* Change entry points into secmon. */
c_entry[0] = c_entry[1] = cpu_resume;
dcache_clean_by_mva(&c_entry, sizeof(c_entry));
/* Back up state. */
startup_save_cpu_data();
}
static void start_up_cpu(void *arg)
{
struct secmon_params *params = arg;
struct cpu_action *action;
if (cpu_is_bsp())
action = &params->bsp;
else
action = &params->secondary;
if (action->run == NULL)
psci_turn_off_self();
psci_turn_on_self(action);
}
static void cpu_init(int bsp)
{
struct cpu_info *ci = cpu_info();
ci->id = smp_processor_id();
cpu_mark_online(ci);
if (bsp)
cpu_set_bsp();
}
static void wait_for_all_cpus(size_t expected)
{
while (cpus_online() != expected)
;
}
static void secmon_init(struct secmon_params *params, int bsp)
{
struct cpu_action action = {
.run = start_up_cpu,
.arg = params,
};
exception_hwinit();
cpu_init(bsp);
if (!cpu_is_bsp())
secmon_wait_for_action();
/* Wait for all CPUs to enter secmon. */
wait_for_all_cpus(params->online_cpus);
smc_init();
psci_init((uintptr_t)arm64_cpu_startup_resume);
/* Initialize the resume path. */
cpu_resume_init();
/* Make sure all non-BSP CPUs take action before the BSP. */
arch_run_on_all_cpus_but_self_async(&action);
/* Turn on BSP. */
start_up_cpu(params);
printk(BIOS_ERR, "CPU turn on failed for BSP.\n");
secmon_wait_for_action();
}
void secmon_wait_for_action(void)
{
arch_cpu_wait_for_action();
}

View File

@ -1,163 +0,0 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2014 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <string.h>
#include <stdlib.h>
#include <arch/cpu.h>
#include <arch/smc.h>
#include <arch/exception.h>
#include <arch/lib_helpers.h>
#include <console/console.h>
enum {
/* SMC called from AARCH32 */
EC_SMC_AARCH32 = 0x13,
/* SMC called from AARCH64 */
EC_SMC_AARCH64 = 0x17,
SMC_NUM_RANGES = 8,
};
struct smc_range {
uint32_t func_begin;
uint32_t func_end;
int (*handler)(struct smc_call *);
};
struct smc_ranges {
size_t used;
struct smc_range handlers[SMC_NUM_RANGES];
};
static struct smc_ranges smc_functions;
static struct smc_range *smc_handler_by_function(uint32_t fid)
{
int i;
for (i = 0; i < smc_functions.used; i++) {
struct smc_range *r = &smc_functions.handlers[i];
if (fid >= r->func_begin && fid <= r->func_end)
return r;
}
return NULL;
}
int smc_register_range(uint32_t min, uint32_t max, int (*h)(struct smc_call *))
{
struct smc_range *r;
if (smc_functions.used == SMC_NUM_RANGES)
return -1;
if (min > max)
return -1;
/* This check isn't exhaustive but it's fairly quick. */
if (smc_handler_by_function(min) || smc_handler_by_function(max))
return -1;
r = &smc_functions.handlers[smc_functions.used];
r->func_begin = min;
r->func_end = max;
r->handler = h;
smc_functions.used++;
return 0;
}
static int smc_cleanup(struct exc_state *state, struct smc_call *smc, int ret)
{
memcpy(&state->regs.x, &smc->results, sizeof(smc->results));
return ret;
}
static int smc_return_with_error(struct exc_state *state, struct smc_call *smc)
{
smc32_return(smc, SMC_UNKNOWN_FUNC);
return smc_cleanup(state, smc, EXC_RET_HANDLED);
}
static int smc_handler(struct exc_state *state, uint64_t vector_id)
{
struct smc_call smc_storage;
struct smc_call *smc = &smc_storage;
uint32_t exception_class;
uint32_t esr;
struct smc_range *r;
memcpy(&smc->args, &state->regs.x, sizeof(smc->args));
memcpy(&smc->results, &state->regs.x, sizeof(smc->results));
esr = raw_read_esr_el3();
exception_class = (esr >> 26) & 0x3f;
/* No support for SMC calls from AARCH32 */
if (exception_class == EC_SMC_AARCH32)
return smc_return_with_error(state, smc);
/* Check to ensure this is an SMC from AARCH64. */
if (exception_class != EC_SMC_AARCH64)
return EXC_RET_IGNORED;
/* Ensure immediate value is 0. */
if ((esr & 0xffff) != 0)
return smc_return_with_error(state, smc);
r = smc_handler_by_function(smc_function_id(smc));
if (r != NULL) {
if (!r->handler(smc))
return smc_cleanup(state, smc, EXC_RET_HANDLED);
}
return smc_return_with_error(state, smc);
}
/* SMC calls can be generated by 32-bit or 64-bit code. */
static struct exception_handler smc_handler64 = {
.handler = &smc_handler,
};
static struct exception_handler smc_handler32 = {
.handler = &smc_handler,
};
static void enable_smc(void *arg)
{
uint32_t scr;
/* Enable SMC */
scr = raw_read_scr_el3();
scr &= ~(SCR_SMC_MASK);
scr |= SCR_SMC_ENABLE;
raw_write_scr_el3(scr);
}
void smc_init(void)
{
struct cpu_action action = {
.run = enable_smc,
};
arch_run_on_all_cpus_async(&action);
/* Register SMC handlers. */
exception_handler_register(EXC_VID_LOW64_SYNC, &smc_handler64);
exception_handler_register(EXC_VID_LOW32_SYNC, &smc_handler32);
}

View File

@ -1,47 +0,0 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2014 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <arch/asm.h>
/*
* Call entry(arg) after reinitializing stack state.
* void secmon_trampoline(void *entry, void *arg);
*/
ENTRY(secmon_trampoline)
mov x22, x0 /* x22 = function pointer */
mov x23, x1 /* x23 = argument */
bl smp_processor_id /* x0 = cpu */
mov x24, x0
/* Set the exception stack for this cpu. */
bl cpu_get_exception_stack
msr SPSel, #1
isb
mov sp, x0
/* Have stack pointer use SP_EL0. */
msr SPSel, #0
isb
/* Set stack for this cpu. */
mov x0, x24 /* x0 = cpu */
bl cpu_get_stack
mov sp, x0
/* Call the function with specified argument. */
mov x1, x22
mov x0, x23
br x1
ENDPROC(secmon_trampoline)

View File

@ -1,157 +0,0 @@
/*
* This file is part of the coreboot project.
*
* Copyright (C) 2014 Google Inc
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2 of
* the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* secmon_loader.c: Responsible for loading the rmodule, providing entry point
* and parameter location for the rmodule.
*/
#include <arch/cache.h>
#include <arch/lib_helpers.h>
#include <arch/secmon.h>
#include <arch/spintable.h>
#include <arch/stages.h>
#include <console/console.h>
#include <rmodule.h>
#include <string.h>
/* SECMON entry point encoded as an rmodule */
extern unsigned char _binary_secmon_start[];
typedef void (*secmon_entry_t)(struct secmon_params *);
void __attribute__((weak)) soc_get_secmon_base_size(uint64_t *secmon_base, size_t *secmon_size)
{
/* Default weak implementation initializes to 0 */
*secmon_base = 0;
*secmon_size = 0;
}
static secmon_entry_t secmon_load_rmodule(void)
{
struct rmodule secmon_mod;
uint64_t secmon_base;
size_t secmon_size;
/* Get base address and size of the area available for secure monitor
* rmodule.
*/
soc_get_secmon_base_size(&secmon_base, &secmon_size);
if ((secmon_base == 0) || (secmon_size == 0)) {
printk(BIOS_ERR, "ARM64: secmon_base / secmon_size invalid\n");
return NULL;
}
printk(BIOS_DEBUG,"secmon_base:%lx,secmon_size:%lx\n",
(unsigned long)secmon_base, (unsigned long)secmon_size);
/* Fail if can't parse secmon module */
if (rmodule_parse(&_binary_secmon_start, &secmon_mod)) {
printk(BIOS_ERR, "ARM64: secmon_mod not found\n");
return NULL;
}
/* Load rmodule at secmon_base */
if (rmodule_load((void *)secmon_base, &secmon_mod)) {
printk(BIOS_ERR, "ARM64:secmon_mod cannot load\n");
return NULL;
}
/* Identify the entry point for secure monitor */
return rmodule_entry(&secmon_mod);
}
struct secmon_runit {
secmon_entry_t entry;
struct secmon_params params;
};
static void secmon_start(void *arg)
{
uint32_t scr;
secmon_entry_t entry;
struct secmon_params *p;
struct secmon_runit *r = arg;
entry = r->entry;
p = &r->params;
/* Obtain secondary entry point for non-BSP CPUs. */
if (!cpu_is_bsp())
entry = secondary_entry_point(entry);
printk(BIOS_DEBUG, "CPU%x entering secure monitor %p.\n",
cpu_info()->id, entry);
/* We want to enforce the following policies:
* NS bit is set for lower EL
*/
scr = raw_read_scr_el3();
scr |= SCR_NS;
raw_write_scr_el3(scr);
/* Invalidate instruction cache. Necessary for non-BSP. */
icache_invalidate_all();
entry(p);
}
static void fill_secmon_params(struct secmon_params *p,
void (*bsp_entry)(void *), void *bsp_arg)
{
const struct spintable_attributes *spin_attrs;
memset(p, 0, sizeof(*p));
p->online_cpus = cpus_online();
spin_attrs = spintable_get_attributes();
if (spin_attrs != NULL) {
p->secondary.run = spin_attrs->entry;
p->secondary.arg = spin_attrs->addr;
}
p->bsp.run = bsp_entry;
p->bsp.arg = bsp_arg;
}
void secmon_run(void (*entry)(void *), void *cb_tables)
{
static struct secmon_runit runit;
struct cpu_action action = {
.run = secmon_start,
.arg = &runit,
};
printk(BIOS_SPEW, "payload jump @ %p\n", entry);
if (get_current_el() != EL3) {
printk(BIOS_DEBUG, "Secmon Error: Can only be loaded in EL3\n");
return;
}
runit.entry = secmon_load_rmodule();
if (runit.entry == NULL)
die("ARM64 Error: secmon load error");
printk(BIOS_DEBUG, "ARM64: Loaded the el3 monitor...jumping to %p\n",
runit.entry);
fill_secmon_params(&runit.params, entry, cb_tables);
arch_run_on_all_cpus_but_self_async(&action);
secmon_start(&runit);
}

View File

@ -15,7 +15,6 @@
#include <arch/cache.h>
#include <arch/lib_helpers.h>
#include <arch/secmon.h>
#include <arch/stages.h>
#include <arch/spintable.h>
#include <arch/transition.h>
@ -37,8 +36,6 @@ static void run_payload(struct prog *prog)
if (IS_ENABLED(CONFIG_ARM64_USE_ARM_TRUSTED_FIRMWARE))
arm_tf_run_bl31((u64)doit, (u64)arg, payload_spsr);
else if (IS_ENABLED(CONFIG_ARM64_USE_SECURE_MONITOR))
secmon_run(doit, arg);
else {
uint8_t current_el = get_current_el();

View File

@ -17,5 +17,4 @@
ifeq ($(CONFIG_ARCH_RAMSTAGE_ARM64),y)
ramstage-$(CONFIG_ARCH_ARM64_CPU_CORTEX_A57) += cortex_a57.S
secmon-$(CONFIG_ARCH_ARM64_CPU_CORTEX_A57) += cortex_a57.S
endif

View File

@ -28,7 +28,6 @@ PHDRS
TARGET(binary)
#endif
/* secmon uses rmodules */
#if ENV_RMODULE
ENTRY(_start)
#else

View File

@ -1,263 +0,0 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2014 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __ARCH_PSCI_H__
#define __ARCH_PSCI_H__
#include <stdint.h>
#include <arch/cpu.h>
#include <arch/smc.h>
/* PSCI v0.2 power state encoding for CPU_SUSPEND function */
#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff
#define PSCI_0_2_POWER_STATE_ID_SHIFT 0
#define PSCI_0_2_POWER_STATE_TYPE_SHIFT 16
#define PSCI_0_2_POWER_STATE_TYPE_MASK \
(0x1 << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
#define PSCI_0_2_POWER_STATE_AFFL_SHIFT 24
#define PSCI_0_2_POWER_STATE_AFFL_MASK \
(0x3 << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
#define PSCI_POWER_STATE_TYPE_STANDBY 0
#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
struct psci_power_state {
u16 id;
u8 type;
u8 affinity_level;
};
/* Return Values */
enum {
PSCI_RET_SUCCESS = 0,
PSCI_RET_NOT_SUPPORTED = -1,
PSCI_RET_INVALID_PARAMETERS = -2,
PSCI_RET_DENIED = -3,
PSCI_RET_ALREADY_ON = -4,
PSCI_RET_ON_PENDING = -5,
PSCI_RET_INTERNAL_FAILURE = -6,
PSCI_RET_NOT_PRESENT = -7,
PSCI_RET_DISABLED = -8,
};
/* Generic PSCI state. */
enum {
PSCI_STATE_OFF = 0,
PSCI_STATE_ON_PENDING,
PSCI_STATE_ON,
};
/* Affinity level support. */
enum {
PSCI_AFFINITY_LEVEL_0,
PSCI_AFFINITY_LEVEL_1,
PSCI_AFFINITY_LEVEL_2,
PSCI_AFFINITY_LEVEL_3,
PSCI_AFFINITY_ROOT,
PSCI_AFFINITY_LEVEL_HIGHEST = PSCI_AFFINITY_ROOT,
};
static inline int psci_level_below(int level)
{
return level - 1;
}
struct psci_node;
struct psci_cpu_state {
struct cpu_info *ci;
struct cpu_action startup;
struct cpu_action resume;
/* Ancestor of target to update state in CPU_ON case. */
struct psci_node *ancestor;
};
struct psci_node_group {
size_t num;
struct psci_node *nodes;
};
struct psci_node {
uint64_t mpidr;
/* Affinity level of node. */
int level;
/* Generic power state of this entity. */
int state;
/* The SoC can stash its own state accounting in here. */
int soc_state;
/* Parent of curernt entity. */
struct psci_node *parent;
/*
* CPUs are leaves in the tree. They don't have children. The
* CPU-specific bits of storage can be shared with the children
* storage.
*/
union {
struct psci_node_group children;
struct psci_cpu_state cpu_state;
};
};
static inline struct psci_node *psci_node_parent(const struct psci_node *n)
{
return n->parent;
}
static inline int psci_root_node(const struct psci_node *n)
{
return psci_node_parent(n) == NULL;
}
enum {
PSCI_CMD_ON,
PSCI_CMD_OFF,
PSCI_CMD_SUSPEND,
PSCI_CMD_RESUME,
};
/*
* PSCI actions are serialized into a command for the SoC to process. There are
* 2 phases of a command being processed: prepare and commit. The prepare() is
* called with the PSCI locks held for the state of the PSCI nodes. If
* successful, the appropriate locks will be dropped and commit() will be
* called with the same structure. It is permissible for the SoC support code
* to modify the struture passed in (e.g. to update the requested state_id to
* reflect dynamic constraints on how deep of a state to enter).
*/
struct psci_cmd {
/* Command type. */
int type;
/*
* PSCI state id for PSCI_CMD_OFF and PSCI_CMD_STANDBY commands.
* A value of -1 indicates a CPU_OFF request.
*/
int state_id;
struct psci_power_state *state;
/*
* target is the command's target, but it can affect up to the
* ancestor entity. If target == ancestor then it only affects
* target, otherwise all entites up the hierarchy including ancestor.
*/
struct psci_node *target;
struct psci_node *ancestor;
};
struct psci_soc_ops {
/*
* Return number of entities one level below given parent affinitly
* level and mpidr.
*/
size_t (*children_at_level)(int parent_level, uint64_t mpidr);
int (*cmd_prepare)(struct psci_cmd *cmd);
int (*cmd_commit)(struct psci_cmd *cmd);
};
/* Each SoC needs to provide the functions in the psci_soc_ops structure. */
extern struct psci_soc_ops soc_psci_ops;
/* PSCI Functions. */
enum {
/* 32-bit System level functions. */
PSCI_VERSION = SMC_FUNC_FAST32(0x4, 0x0),
PSCI_SYSTEM_OFF = SMC_FUNC_FAST32(0x4, 0x8),
PSCI_SYSTEM_RESET = SMC_FUNC_FAST32(0x4, 0x9),
/* 32-bit CPU support functions. */
PSCI_CPU_SUSPEND32 = SMC_FUNC_FAST32(0x4, 0x1),
PSCI_CPU_OFF32 = SMC_FUNC_FAST32(0x4, 0x2),
PSCI_CPU_ON32 = SMC_FUNC_FAST32(0x4, 0x3),
/* 64-bit CPU support functions. */
PSCI_CPU_SUSPEND64 = SMC_FUNC_FAST64(0x4, 0x1),
PSCI_CPU_ON64 = SMC_FUNC_FAST64(0x4, 0x3),
};
/* Parameter arguments. */
enum {
PSCI_PARAM_0 = 1,
PSCI_PARAM_1,
PSCI_PARAM_2,
PSCI_PARAM_3,
PSCI_RETURN_0 = 1,
PSCI_RETURN_1,
PSCI_RETURN_2,
PSCI_RETURN_3,
};
struct psci_func {
uint32_t id;
struct smc_call *smc;
};
static inline void psci_power_state_unpack(uint32_t power_state,
struct psci_power_state *state)
{
state->id = (power_state & PSCI_0_2_POWER_STATE_ID_MASK) >>
PSCI_0_2_POWER_STATE_ID_SHIFT;
state->type = (power_state & PSCI_0_2_POWER_STATE_TYPE_MASK) >>
PSCI_0_2_POWER_STATE_TYPE_SHIFT;
state->affinity_level =
(power_state & PSCI_0_2_POWER_STATE_AFFL_MASK) >>
PSCI_0_2_POWER_STATE_AFFL_SHIFT;
}
static inline void psci_func_init(struct psci_func *pf, struct smc_call *smc)
{
pf->id = smc_function_id(smc);
pf->smc = smc;
}
static inline uint64_t psci64_arg(struct psci_func *pf, unsigned i)
{
return smc64_arg(pf->smc, i);
}
static inline uint32_t psci32_arg(struct psci_func *pf, unsigned i)
{
return psci64_arg(pf, i);
}
static inline void psci64_result(struct psci_func *pf, unsigned i, uint64_t v)
{
smc64_result(pf->smc, i, v);
}
static inline void psci32_result(struct psci_func *pf, unsigned i, uint32_t v)
{
uint64_t v64 = v;
psci64_result(pf, i, v64);
}
static inline void psci32_return(struct psci_func *pf, int32_t val)
{
psci32_result(pf, 0, val);
}
static inline void psci64_return(struct psci_func *pf, int64_t val)
{
psci64_result(pf, 0, val);
}
void psci_init(uintptr_t cpu_on_entry);
void psci_soc_init(uintptr_t cpu_on_entry);
/* Turn on the current CPU within the PSCI subsystem. */
void psci_turn_on_self(const struct cpu_action *action);
int psci_turn_off_self(void);
/* Entry point for CPUs just turning on or waking up. */
void psci_cpu_entry(void);
#endif /* __ARCH_PSCI_H__ */

View File

@ -1,118 +0,0 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2014 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __ARCH_SMC_H__
#define __ARCH_SMC_H__
#include <stdint.h>
enum {
FUNC_ID_CALL_TYPE_SHIFT = 31,
FUNC_ID_CALL_TYPE_MASK = (1 << FUNC_ID_CALL_TYPE_SHIFT),
FUNC_ID_FASTCALL = (1 << FUNC_ID_CALL_TYPE_SHIFT),
FUNC_ID_STDCALL = (0 << FUNC_ID_CALL_TYPE_SHIFT),
FUNC_ID_CALL_CONVENTION_SHIFT = 30,
FUNC_ID_CALL_CONVENTION_MASK = (1 << FUNC_ID_CALL_CONVENTION_SHIFT),
FUNC_ID_SMC32 = (0 << FUNC_ID_CALL_CONVENTION_SHIFT),
FUNC_ID_SMC64 = (1 << FUNC_ID_CALL_CONVENTION_SHIFT),
FUNC_ID_ENTITY_SHIFT = 24,
FUNC_ID_ENTITY_MASK = (0x3f << FUNC_ID_ENTITY_SHIFT),
FUNC_ID_FUNC_NUMBER_SHIFT = 0,
FUNC_ID_FUNC_NUMBER_MASK = (0xffff << FUNC_ID_FUNC_NUMBER_SHIFT),
FUNC_ID_MASK = FUNC_ID_CALL_TYPE_MASK | FUNC_ID_CALL_CONVENTION_MASK |
FUNC_ID_ENTITY_MASK | FUNC_ID_FUNC_NUMBER_MASK,
SMC_NUM_ARGS = 8, /* The last is optional hypervisor id. */
SMC_NUM_RESULTS = 4,
SMC_UNKNOWN_FUNC = 0xffffffff,
};
#define SMC_FUNC(entity, number, call_convention, call_type) \
((call_type) | (call_convention) | \
((entity) << FUNC_ID_ENTITY_SHIFT) | (number))
#define SMC_FUNC_FAST(entity, number, call_convention) \
SMC_FUNC((entity), (number), (call_convention), FUNC_ID_FASTCALL)
#define SMC_FUNC_FAST32(entity, number) \
SMC_FUNC_FAST((entity), (number), FUNC_ID_SMC32)
#define SMC_FUNC_FAST64(entity, number) \
SMC_FUNC_FAST((entity), (number), FUNC_ID_SMC64)
struct smc_call {
uint64_t args[SMC_NUM_ARGS];
uint64_t results[SMC_NUM_RESULTS];
};
/* SMC immediate value needs to be 0. */
/* Check mod AARCHx mode against calling convention. */
static inline uint64_t smc64_arg(const struct smc_call *smc, unsigned i)
{
return smc->args[i];
}
static inline uint32_t smc32_arg(const struct smc_call *smc, unsigned i)
{
return smc64_arg(smc, i);
}
static inline void smc64_result(struct smc_call *smc, unsigned i, uint64_t v)
{
smc->results[i] = v;
}
static inline void smc32_result(struct smc_call *smc, unsigned i, uint32_t v)
{
uint64_t v64 = v;
smc64_result(smc, i, v64);
}
static inline void smc32_return(struct smc_call *smc, int32_t v)
{
smc32_result(smc, 0, v);
}
static inline uint32_t smc_hypervisor_id(const struct smc_call *smc)
{
/* Set in W7 */
return smc32_arg(smc, 7);
}
static inline uint32_t smc_session_id(const struct smc_call *smc)
{
/* Set in W6 */
return smc32_arg(smc, 6);
}
static inline uint32_t smc_function_id(const struct smc_call *smc)
{
/* Function ID in W0. */
return smc32_arg(smc, 0) & FUNC_ID_MASK;
}
/* Initialize the SMC layer. */
void smc_init(void);
/* Register a handler for a given function range, inclusive. */
int smc_register_range(uint32_t min, uint32_t max, int (*)(struct smc_call *));
#endif /* __ARCH_SMC_H__ */

View File

@ -1,30 +0,0 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2014 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __ARCH_ARM64_ARMV8_SECMON__
#define __ARCH_ARM64_ARMV8_SECMON__
#include <arch/cpu.h>
struct secmon_params {
size_t online_cpus;
struct cpu_action bsp;
struct cpu_action secondary;
};
void secmon_run(void (*entry)(void *), void *arg);
void soc_get_secmon_base_size(uint64_t *secmon_base, size_t *secmon_size);
#endif /*__ARCH_ARM64_ARMV8_SECMON__ */

View File

@ -84,7 +84,7 @@ ENDPROC(cpu_get_exception_stack)
*/
/*
* IMPORTANT: Ensure x25 is not corrupted because it saves the argument to
* secmon
* any rmodules.
*/
ENTRY(arm64_c_environment)
bl smp_processor_id /* x0 = cpu */
@ -134,7 +134,7 @@ ENDPROC(arm64_c_environment)
ENTRY(_start)
split_bsp_path
/* Save the arguments to secmon in x25 */
/* Save any arguments to current rmodule in x25 */
mov x25, x0
b arm64_c_environment
ENDPROC(_start)

View File

@ -21,7 +21,3 @@ romstage-y += die.c
bootblock-$(CONFIG_BOOTBLOCK_CONSOLE) += vtxprintf.c printk.c
bootblock-$(CONFIG_BOOTBLOCK_CONSOLE) += init.c console.c
bootblock-$(CONFIG_BOOTBLOCK_CONSOLE) += die.c
secmon-y += vtxprintf.c printk.c
secmon-y += init.c console.c
secmon-y += die.c

View File

@ -53,8 +53,6 @@ void console_init(void)
"ramstage"
#elif ENV_VERSTAGE
"verstage"
#elif ENV_SECMON
"secmon"
#else
"UNKNOWN"
#endif

View File

@ -3,7 +3,6 @@ ifeq ($(CONFIG_DRIVERS_UART),y)
romstage-y += util.c
ramstage-y += util.c
bootblock-y += util.c
secmon-y += util.c
verstage-y += util.c
smm-$(CONFIG_DEBUG_SMI) += util.c

View File

@ -47,8 +47,8 @@ void __attribute__ ((noreturn)) die(const char *msg);
#define __CONSOLE_ENABLE__ \
((ENV_BOOTBLOCK && CONFIG_BOOTBLOCK_CONSOLE) || \
ENV_SECMON || ENV_VERSTAGE || \
ENV_ROMSTAGE || ENV_RAMSTAGE || (ENV_SMM && CONFIG_DEBUG_SMI))
ENV_VERSTAGE || ENV_ROMSTAGE || ENV_RAMSTAGE || \
(ENV_SMM && CONFIG_DEBUG_SMI))
#if __CONSOLE_ENABLE__
void console_init(void);

View File

@ -53,7 +53,7 @@ void oxford_remap(unsigned int new_base);
#define __CONSOLE_SERIAL_ENABLE__ CONFIG_CONSOLE_SERIAL && \
(ENV_BOOTBLOCK || ENV_ROMSTAGE || ENV_RAMSTAGE || ENV_VERSTAGE || \
ENV_SECMON || (ENV_SMM && CONFIG_DEBUG_SMI))
(ENV_SMM && CONFIG_DEBUG_SMI))
#if __CONSOLE_SERIAL_ENABLE__
static inline void __uart_init(void) { uart_init(CONFIG_UART_FOR_CONSOLE); }

View File

@ -24,7 +24,6 @@
#define ENV_ROMSTAGE 0
#define ENV_RAMSTAGE 0
#define ENV_SMM 0
#define ENV_SECMON 0
#define ENV_VERSTAGE 0
#define ENV_RMODULE 0
@ -33,7 +32,6 @@
#define ENV_ROMSTAGE 1
#define ENV_RAMSTAGE 0
#define ENV_SMM 0
#define ENV_SECMON 0
#define ENV_VERSTAGE 0
#define ENV_RMODULE 0
@ -42,16 +40,6 @@
#define ENV_ROMSTAGE 0
#define ENV_RAMSTAGE 0
#define ENV_SMM 1
#define ENV_SECMON 0
#define ENV_VERSTAGE 0
#define ENV_RMODULE 0
#elif defined(__SECMON__)
#define ENV_BOOTBLOCK 0
#define ENV_ROMSTAGE 0
#define ENV_RAMSTAGE 0
#define ENV_SMM 0
#define ENV_SECMON 1
#define ENV_VERSTAGE 0
#define ENV_RMODULE 0
@ -60,7 +48,6 @@
#define ENV_ROMSTAGE 0
#define ENV_RAMSTAGE 0
#define ENV_SMM 0
#define ENV_SECMON 0
#define ENV_VERSTAGE 1
#define ENV_RMODULE 0
@ -69,7 +56,6 @@
#define ENV_ROMSTAGE 0
#define ENV_RAMSTAGE 1
#define ENV_SMM 0
#define ENV_SECMON 0
#define ENV_VERSTAGE 0
#define ENV_RMODULE 0
@ -78,7 +64,6 @@
#define ENV_ROMSTAGE 0
#define ENV_RAMSTAGE 0
#define ENV_SMM 0
#define ENV_SECMON 0
#define ENV_VERSTAGE 0
#define ENV_RMODULE 1
@ -93,7 +78,6 @@
#define ENV_ROMSTAGE 0
#define ENV_RAMSTAGE 0
#define ENV_SMM 0
#define ENV_SECMON 0
#define ENV_VERSTAGE 0
#define ENV_RMODULE 0
#endif

View File

@ -164,14 +164,12 @@ bootblock-y += version.c
romstage-y += version.c
ramstage-y += version.c
smm-y += version.c
secmon-y += version.c
verstage-y += version.c
$(obj)/lib/version.bootblock.o : $(obj)/build.h
$(obj)/lib/version.romstage.o : $(obj)/build.h
$(obj)/lib/version.ramstage.o : $(obj)/build.h
$(obj)/lib/version.smm.o : $(obj)/build.h
$(obj)/lib/version.secmon.o : $(obj)/build.h
$(obj)/lib/version.verstage.o : $(obj)/build.h
romstage-y += bootmode.c
@ -181,7 +179,6 @@ bootblock-y += halt.c
romstage-y += halt.c
ramstage-y += halt.c
smm-y += halt.c
secmon-y += halt.c
ifneq ($(CONFIG_ARCH_X86),y)
# X86 bootblock uses custom ldscripts that are all glued together,

View File

@ -13,7 +13,6 @@ config SOC_NVIDIA_TEGRA132
select HAVE_UART_SPECIAL
select ARM_BOOTBLOCK_CUSTOM
select SMP
select ARM64_USE_SECURE_MONITOR
select GENERIC_GPIO_LIB
select HAS_PRECBMEM_TIMESTAMP_REGION

View File

@ -91,16 +91,6 @@ ramstage-y += ramstage.c
ramstage-y += mmu_operations.c
ramstage-$(CONFIG_DRIVERS_UART) += uart.c
ramstage-y += ../tegra/usb.c
ramstage-$(CONFIG_ARM64_USE_SECURE_MONITOR) += secmon.c
secmon-y += 32bit_reset.S
secmon-y += cpu.c
secmon-y += cpu_lib.S
secmon-y += flow_ctrl.c
secmon-y += power.c
secmon-y += psci.c
secmon-y += uart.c
secmon-y += gic.c
modules_arm-y += monotonic_timer.c

View File

@ -1,141 +0,0 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2014 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <arch/cpu.h>
#include <arch/io.h>
#include <arch/psci.h>
#include <soc/addressmap.h>
#include <soc/clk_rst.h>
#include <soc/cpu.h>
#include <soc/flow_ctrl.h>
#include <soc/power.h>
#include <console/console.h>
static void *cpu_on_entry_point;
void psci_soc_init(uintptr_t cpu_on_entry)
{
/*
* Stash secmon entry point for CPUs starting up. The 32-bit reset
* vector register is accessible in < EL3 so one has to attempt to
* plug the potential race for that register being changed out from
* under us. Therefore, we set the appropriate registers here, but
* it is also done on each CPU_ON request.
*/
cpu_on_entry_point = (void *)cpu_on_entry;
cpu_prepare_startup(cpu_on_entry_point);
}
static size_t children_at_level(int parent_level, uint64_t mpidr)
{
if (mpidr != 0)
return 0;
/* T132 just has 2 cores. 0. Level 1 has 2 children at level 0. */
switch (parent_level) {
case PSCI_AFFINITY_ROOT:
return 1;
case PSCI_AFFINITY_LEVEL_3:
return 1;
case PSCI_AFFINITY_LEVEL_2:
return 1;
case PSCI_AFFINITY_LEVEL_1:
return 2;
case PSCI_AFFINITY_LEVEL_0:
return 0;
default:
return 0;
}
}
#define TEGRA132_PM_CORE_C7 0x3
static inline void tegra132_enter_sleep(unsigned long pmstate)
{
asm volatile(
" isb\n"
" msr actlr_el1, %0\n"
" wfi\n"
:
: "r" (pmstate));
}
static void prepare_cpu_on(int cpu)
{
uint32_t partid;
partid = cpu ? POWER_PARTID_CE1 : POWER_PARTID_CE0;
power_ungate_partition(partid);
flowctrl_write_cpu_halt(cpu, 0);
}
static int cmd_prepare(struct psci_cmd *cmd)
{
int ret;
switch (cmd->type) {
case PSCI_CMD_ON:
prepare_cpu_on(cmd->target->cpu_state.ci->id);
ret = PSCI_RET_SUCCESS;
break;
case PSCI_CMD_OFF:
if (cmd->state_id != -1) {
ret = PSCI_RET_INVALID_PARAMETERS;
break;
}
cmd->state_id = TEGRA132_PM_CORE_C7;
ret = PSCI_RET_SUCCESS;
break;
default:
ret = PSCI_RET_NOT_SUPPORTED;
break;
}
return ret;
}
static int cmd_commit(struct psci_cmd *cmd)
{
int ret;
struct cpu_info *ci;
ci = cmd->target->cpu_state.ci;
switch (cmd->type) {
case PSCI_CMD_ON:
/* Take CPU out of reset */
start_cpu_silent(ci->id, cpu_on_entry_point);
ret = PSCI_RET_SUCCESS;
break;
case PSCI_CMD_OFF:
flowctrl_cpu_off(ci->id);
tegra132_enter_sleep(cmd->state_id);
/* Never reach here */
ret = PSCI_RET_NOT_SUPPORTED;
printk(BIOS_ERR, "t132 CPU%d PSCI_CMD_OFF fail\n", ci->id);
break;
default:
ret = PSCI_RET_NOT_SUPPORTED;
break;
}
return ret;
}
struct psci_soc_ops soc_psci_ops = {
.children_at_level = &children_at_level,
.cmd_prepare = &cmd_prepare,
.cmd_commit = &cmd_commit,
};

View File

@ -1,46 +0,0 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2014 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <arch/secmon.h>
#include <console/console.h>
#include <soc/addressmap.h>
#include <soc/mmu_operations.h>
static void soc_get_secure_mem(uint64_t *base, size_t *size)
{
uintptr_t tz_base_mib;
size_t tz_size_mib;
carveout_range(CARVEOUT_TZ, &tz_base_mib, &tz_size_mib);
tz_base_mib *= MiB;
tz_size_mib *= MiB;
*base = tz_base_mib;
*size = tz_size_mib;
}
void soc_get_secmon_base_size(uint64_t *base, size_t *size)
{
uintptr_t tz_base;
size_t ttb_size, tz_size;
soc_get_secure_mem(&tz_base, &tz_size);
ttb_size = TTB_SIZE * MiB;
*base = tz_base + ttb_size;
*size = tz_size - ttb_size;
}

View File

@ -98,19 +98,9 @@ ramstage-y += ramstage.c
ramstage-y += mmu_operations.c
ramstage-$(CONFIG_DRIVERS_UART) += uart.c
ramstage-y += ../tegra/usb.c
ramstage-$(CONFIG_ARM64_USE_SECURE_MONITOR) += secmon.c
ramstage-$(CONFIG_HAVE_MTC) += mtc.c
ramstage-y += stage_entry.S
secmon-y += cpu.c
secmon-y += cpu_lib.S
secmon-y += flow_ctrl.c
secmon-y += power.c
secmon-y += psci.c
secmon-y += stage_entry.S
secmon-y += uart.c
secmon-y += gic.c
rmodules_arm-y += monotonic_timer.c
CPPFLAGS_common += -Isrc/soc/nvidia/tegra210/include/

View File

@ -43,11 +43,6 @@ static void enable_cpu_power_partitions(void)
power_ungate_partition(POWER_PARTID_CRAIL);
power_ungate_partition(POWER_PARTID_C0NC);
power_ungate_partition(POWER_PARTID_CE0);
if (IS_ENABLED(CONFIG_ARM64_USE_SECURE_MONITOR)) {
power_ungate_partition(POWER_PARTID_CE1);
power_ungate_partition(POWER_PARTID_CE2);
power_ungate_partition(POWER_PARTID_CE3);
}
if (IS_ENABLED(CONFIG_ARM64_USE_ARM_TRUSTED_FIRMWARE)) {
/*

View File

@ -1,190 +0,0 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2014 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <assert.h>
#include <arch/cpu.h>
#include <arch/io.h>
#include <arch/psci.h>
#include <soc/addressmap.h>
#include <soc/clk_rst.h>
#include <soc/cpu.h>
#include <soc/flow_ctrl.h>
#include <soc/power.h>
#include <stdlib.h>
#include <console/console.h>
extern void tegra210_reset_handler(void);
#define TEGRA210_PM_STATE_C7 7
static void *cpu_on_entry_point;
void psci_soc_init(uintptr_t cpu_on_entry)
{
/*
* Stash secmon entry point for CPUs starting up. The 32-bit reset
* vector register is accessible in < EL3 so one has to attempt to
* plug the potential race for that register being changed out from
* under us. Therefore, we set the appropriate registers here, but
* it is also done on each CPU_ON request.
*/
cpu_on_entry_point = tegra210_reset_handler;
cpu_prepare_startup(cpu_on_entry_point);
}
static size_t children_at_level(int parent_level, uint64_t mpidr)
{
if (mpidr != 0)
return 0;
/*
* T210 has 2 clusters. Each cluster has 4 cores. Currently we are
* concentrating only on one of the clusters i.e. A57 cluster. For A53
* bringup, correct the cluster details for A53 cluster as well.
* Since, A57 cluster has 4 cores, level 1 has 4 children at level 0.
* TODO(furquan): Update for A53.
*/
switch (parent_level) {
case PSCI_AFFINITY_ROOT:
return 1;
case PSCI_AFFINITY_LEVEL_3:
return 1;
case PSCI_AFFINITY_LEVEL_2:
return 1;
case PSCI_AFFINITY_LEVEL_1:
return 4;
case PSCI_AFFINITY_LEVEL_0:
return 0;
default:
return 0;
}
}
static void prepare_cpu_on(int cpu)
{
cpu_prepare_startup(cpu_on_entry_point);
}
static void prepare_cpu_suspend(int cpu, uint32_t state_id)
{
flowctrl_write_cc4_ctrl(cpu, 0xffffffff);
switch (state_id) {
case TEGRA210_PM_STATE_C7:
flowctrl_cpu_suspend(cpu);
break;
default:
return;
}
}
static void prepare_cpu_resume(int cpu)
{
flowctrl_write_cpu_csr(cpu, 0);
flowctrl_write_cpu_halt(cpu, 0);
flowctrl_write_cc4_ctrl(cpu, 0);
}
static void cpu_suspend_commit(int cpu, uint32_t state_id)
{
int l2_flush;
switch (state_id) {
case TEGRA210_PM_STATE_C7:
l2_flush = NO_L2_FLUSH;
break;
default:
return;
}
cortex_a57_cpu_power_down(l2_flush);
/* should never be here */
}
static int cmd_prepare(struct psci_cmd *cmd)
{
int ret;
struct cpu_info *ci;
ci = cmd->target->cpu_state.ci;
switch (cmd->type) {
case PSCI_CMD_SUSPEND:
cmd->state_id = cmd->state->id;
prepare_cpu_on(ci->id);
prepare_cpu_suspend(ci->id, cmd->state_id);
ret = PSCI_RET_SUCCESS;
break;
case PSCI_CMD_RESUME:
prepare_cpu_resume(ci->id);
ret = PSCI_RET_SUCCESS;
break;
case PSCI_CMD_ON:
prepare_cpu_on(ci->id);
ret = PSCI_RET_SUCCESS;
break;
case PSCI_CMD_OFF:
if (cmd->state_id != -1) {
ret = PSCI_RET_INVALID_PARAMETERS;
break;
}
ret = PSCI_RET_SUCCESS;
break;
default:
ret = PSCI_RET_NOT_SUPPORTED;
break;
}
return ret;
}
static int cmd_commit(struct psci_cmd *cmd)
{
int ret;
struct cpu_info *ci;
ci = cmd->target->cpu_state.ci;
switch (cmd->type) {
case PSCI_CMD_SUSPEND:
cpu_suspend_commit(ci->id, cmd->state_id);
ret = PSCI_RET_SUCCESS;
break;
case PSCI_CMD_RESUME:
ret = PSCI_RET_SUCCESS;
break;
case PSCI_CMD_ON:
/* Take CPU out of reset */
flowctrl_cpu_on(ci->id);
ret = PSCI_RET_SUCCESS;
break;
case PSCI_CMD_OFF:
flowctrl_cpu_off(ci->id);
cortex_a57_cpu_power_down(NO_L2_FLUSH);
/* Never reach here */
ret = PSCI_RET_NOT_SUPPORTED;
printk(BIOS_ERR, "t210 CPU%d PSCI_CMD_OFF fail\n", ci->id);
break;
default:
ret = PSCI_RET_NOT_SUPPORTED;
break;
}
return ret;
}
struct psci_soc_ops soc_psci_ops = {
.children_at_level = &children_at_level,
.cmd_prepare = &cmd_prepare,
.cmd_commit = &cmd_commit,
};

View File

@ -1,46 +0,0 @@
/*
* This file is part of the coreboot project.
*
* Copyright 2014 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <arch/secmon.h>
#include <console/console.h>
#include <soc/addressmap.h>
#include <soc/mmu_operations.h>
static void soc_get_secure_mem(uint64_t *base, size_t *size)
{
uintptr_t tz_base_mib;
size_t tz_size_mib;
carveout_range(CARVEOUT_TZ, &tz_base_mib, &tz_size_mib);
tz_base_mib *= MiB;
tz_size_mib *= MiB;
*base = tz_base_mib;
*size = tz_size_mib;
}
void soc_get_secmon_base_size(uint64_t *base, size_t *size)
{
uintptr_t tz_base;
size_t ttb_size, tz_size;
soc_get_secure_mem(&tz_base, &tz_size);
ttb_size = CONFIG_TTB_SIZE_MB * MiB;
*base = tz_base + ttb_size;
*size = tz_size - ttb_size;
}