riscv: update misaligned memory access exception handling

Support for more situations: floating point, compressed instructions,
etc. Add support for redirect exception to S-Mode.


Change-Id: I9983d56245eab1d458a84cb1432aeb805df7a49f
Signed-off-by: Xiang Wang <wxjstz@126.com>
Reviewed-on: https://review.coreboot.org/27972
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Jonathan Neuschäfer <j.neuschaefer@gmx.net>
This commit is contained in:
Xiang Wang 2018-08-09 16:20:35 +08:00 committed by Patrick Georgi
parent aa5f821ee3
commit cda59b56ba
5 changed files with 652 additions and 68 deletions

View File

@ -43,6 +43,8 @@ ifeq ($(CONFIG_ARCH_BOOTBLOCK_RISCV),y)
bootblock-y = bootblock.S bootblock-y = bootblock.S
bootblock-y += trap_util.S bootblock-y += trap_util.S
bootblock-y += trap_handler.c bootblock-y += trap_handler.c
bootblock-y += fp_asm.S
bootblock-y += misaligned.c
bootblock-y += mcall.c bootblock-y += mcall.c
bootblock-y += virtual_memory.c bootblock-y += virtual_memory.c
bootblock-y += boot.c bootblock-y += boot.c
@ -101,6 +103,8 @@ ramstage-y += ramstage.S
ramstage-y += mcall.c ramstage-y += mcall.c
ramstage-y += trap_util.S ramstage-y += trap_util.S
ramstage-y += trap_handler.c ramstage-y += trap_handler.c
ramstage-y += fp_asm.S
ramstage-y += misaligned.c
ramstage-y += virtual_memory.c ramstage-y += virtual_memory.c
ramstage-y += stages.c ramstage-y += stages.c
ramstage-y += misc.c ramstage-y += misc.c

362
src/arch/riscv/fp_asm.S Normal file
View File

@ -0,0 +1,362 @@
/*
* This file is part of the coreboot project.
*
* Copyright (C) 2018 HardenedLinux
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* This file define some function used to swap value between memory
* and float register. This will be used in misaligned access exception
* handling.
*/
#if defined(__riscv_flen)
#if __riscv_flen >= 32
.text
/* void read_f32(int regnum, uint32_t* v)
* regnum : which register want to read
* v : address to hold bits info of reading
*/
.align 1
.globl read_f32
read_f32:
la a2, .Lr32_t
andi a0, a0, 31
slli a0, a0, 1
add a0, a0, a2
lbu a0, 0(a0)
add a0, a0, a2
jr a0
.align 2
.Lr32_t:
.half .Lr32_f0 - .Lr32_t
.half .Lr32_f1 - .Lr32_t
.half .Lr32_f2 - .Lr32_t
.half .Lr32_f3 - .Lr32_t
.half .Lr32_f4 - .Lr32_t
.half .Lr32_f5 - .Lr32_t
.half .Lr32_f6 - .Lr32_t
.half .Lr32_f7 - .Lr32_t
.half .Lr32_f8 - .Lr32_t
.half .Lr32_f9 - .Lr32_t
.half .Lr32_f10 - .Lr32_t
.half .Lr32_f11 - .Lr32_t
.half .Lr32_f12 - .Lr32_t
.half .Lr32_f13 - .Lr32_t
.half .Lr32_f14 - .Lr32_t
.half .Lr32_f15 - .Lr32_t
.half .Lr32_f16 - .Lr32_t
.half .Lr32_f17 - .Lr32_t
.half .Lr32_f18 - .Lr32_t
.half .Lr32_f19 - .Lr32_t
.half .Lr32_f20 - .Lr32_t
.half .Lr32_f21 - .Lr32_t
.half .Lr32_f22 - .Lr32_t
.half .Lr32_f23 - .Lr32_t
.half .Lr32_f24 - .Lr32_t
.half .Lr32_f25 - .Lr32_t
.half .Lr32_f26 - .Lr32_t
.half .Lr32_f27 - .Lr32_t
.half .Lr32_f28 - .Lr32_t
.half .Lr32_f29 - .Lr32_t
.half .Lr32_f30 - .Lr32_t
.half .Lr32_f31 - .Lr32_t
#define read32(which) .Lr32_##which: fsw which, 0(a1); ret
read32(f0)
read32(f1)
read32(f2)
read32(f3)
read32(f4)
read32(f5)
read32(f6)
read32(f7)
read32(f8)
read32(f9)
read32(f10)
read32(f11)
read32(f12)
read32(f13)
read32(f14)
read32(f15)
read32(f16)
read32(f17)
read32(f18)
read32(f19)
read32(f20)
read32(f21)
read32(f22)
read32(f23)
read32(f24)
read32(f25)
read32(f26)
read32(f27)
read32(f28)
read32(f29)
read32(f30)
read32(f31)
/* void write_f32(int regnum, uint32_t* v)
* regnum: which register want to write
* v : address to hold bits info of writing
*/
.align 1
.globl write_f32
write_f32:
la a2, .Lw32_t
andi a0, a0, 31
slli a0, a0, 1
add a0, a0, a2
lbu a0, 0(a0)
add a0, a0, a2
jr a0
.align 2
.Lw32_t:
.half .Lw32_f0 - .Lw32_t
.half .Lw32_f1 - .Lw32_t
.half .Lw32_f2 - .Lw32_t
.half .Lw32_f3 - .Lw32_t
.half .Lw32_f4 - .Lw32_t
.half .Lw32_f5 - .Lw32_t
.half .Lw32_f6 - .Lw32_t
.half .Lw32_f7 - .Lw32_t
.half .Lw32_f8 - .Lw32_t
.half .Lw32_f9 - .Lw32_t
.half .Lw32_f10 - .Lw32_t
.half .Lw32_f11 - .Lw32_t
.half .Lw32_f12 - .Lw32_t
.half .Lw32_f13 - .Lw32_t
.half .Lw32_f14 - .Lw32_t
.half .Lw32_f15 - .Lw32_t
.half .Lw32_f16 - .Lw32_t
.half .Lw32_f17 - .Lw32_t
.half .Lw32_f18 - .Lw32_t
.half .Lw32_f19 - .Lw32_t
.half .Lw32_f20 - .Lw32_t
.half .Lw32_f21 - .Lw32_t
.half .Lw32_f22 - .Lw32_t
.half .Lw32_f23 - .Lw32_t
.half .Lw32_f24 - .Lw32_t
.half .Lw32_f25 - .Lw32_t
.half .Lw32_f26 - .Lw32_t
.half .Lw32_f27 - .Lw32_t
.half .Lw32_f28 - .Lw32_t
.half .Lw32_f29 - .Lw32_t
.half .Lw32_f30 - .Lw32_t
.half .Lw32_f31 - .Lw32_t
#define write32(which) .Lw32_##which: flw which, 0(a1); ret
write32(f0)
write32(f1)
write32(f2)
write32(f3)
write32(f4)
write32(f5)
write32(f6)
write32(f7)
write32(f8)
write32(f9)
write32(f10)
write32(f11)
write32(f12)
write32(f13)
write32(f14)
write32(f15)
write32(f16)
write32(f17)
write32(f18)
write32(f19)
write32(f20)
write32(f21)
write32(f22)
write32(f23)
write32(f24)
write32(f25)
write32(f26)
write32(f27)
write32(f28)
write32(f29)
write32(f30)
write32(f31)
#endif // __riscv_flen >= 32
#if __riscv_flen >= 64
.text
/* void read_f64(int regnum, uint64_t* v)
* regnum : which register want to read
* v : address to hold bits info of reading
*/
.align 1
.globl read_f64
read_f64:
la a2, .Lr64_t
andi a0, a0, 31
slli a0, a0, 1
add a0, a0, a2
lbu a0, 0(a0)
add a0, a0, a2
jr a0
.align 2
.Lr64_t:
.half .Lr64_f0 - .Lr64_t
.half .Lr64_f1 - .Lr64_t
.half .Lr64_f2 - .Lr64_t
.half .Lr64_f3 - .Lr64_t
.half .Lr64_f4 - .Lr64_t
.half .Lr64_f5 - .Lr64_t
.half .Lr64_f6 - .Lr64_t
.half .Lr64_f7 - .Lr64_t
.half .Lr64_f8 - .Lr64_t
.half .Lr64_f9 - .Lr64_t
.half .Lr64_f10 - .Lr64_t
.half .Lr64_f11 - .Lr64_t
.half .Lr64_f12 - .Lr64_t
.half .Lr64_f13 - .Lr64_t
.half .Lr64_f14 - .Lr64_t
.half .Lr64_f15 - .Lr64_t
.half .Lr64_f16 - .Lr64_t
.half .Lr64_f17 - .Lr64_t
.half .Lr64_f18 - .Lr64_t
.half .Lr64_f19 - .Lr64_t
.half .Lr64_f20 - .Lr64_t
.half .Lr64_f21 - .Lr64_t
.half .Lr64_f22 - .Lr64_t
.half .Lr64_f23 - .Lr64_t
.half .Lr64_f24 - .Lr64_t
.half .Lr64_f25 - .Lr64_t
.half .Lr64_f26 - .Lr64_t
.half .Lr64_f27 - .Lr64_t
.half .Lr64_f28 - .Lr64_t
.half .Lr64_f29 - .Lr64_t
.half .Lr64_f30 - .Lr64_t
.half .Lr64_f31 - .Lr64_t
#define read64(which) .Lr64_##which: fsd which, 0(a1); ret
read64(f0)
read64(f1)
read64(f2)
read64(f3)
read64(f4)
read64(f5)
read64(f6)
read64(f7)
read64(f8)
read64(f9)
read64(f10)
read64(f11)
read64(f12)
read64(f13)
read64(f14)
read64(f15)
read64(f16)
read64(f17)
read64(f18)
read64(f19)
read64(f20)
read64(f21)
read64(f22)
read64(f23)
read64(f24)
read64(f25)
read64(f26)
read64(f27)
read64(f28)
read64(f29)
read64(f30)
read64(f31)
/* void write_f64(int regnum, uint64_t* v)
* regnum: which register want to write
* v : address to hold bits info of writing
*/
.align 1
.globl write_f64
write_f64:
la a2, .Lw64_t
andi a0, a0, 31
slli a0, a0, 1
add a0, a0, a2
lbu a0, 0(a0)
add a0, a0, a2
jr a0
.align 2
.Lw64_t:
.half .Lw64_f0 - .Lw64_t
.half .Lw64_f1 - .Lw64_t
.half .Lw64_f2 - .Lw64_t
.half .Lw64_f3 - .Lw64_t
.half .Lw64_f4 - .Lw64_t
.half .Lw64_f5 - .Lw64_t
.half .Lw64_f6 - .Lw64_t
.half .Lw64_f7 - .Lw64_t
.half .Lw64_f8 - .Lw64_t
.half .Lw64_f9 - .Lw64_t
.half .Lw64_f10 - .Lw64_t
.half .Lw64_f11 - .Lw64_t
.half .Lw64_f12 - .Lw64_t
.half .Lw64_f13 - .Lw64_t
.half .Lw64_f14 - .Lw64_t
.half .Lw64_f15 - .Lw64_t
.half .Lw64_f16 - .Lw64_t
.half .Lw64_f17 - .Lw64_t
.half .Lw64_f18 - .Lw64_t
.half .Lw64_f19 - .Lw64_t
.half .Lw64_f20 - .Lw64_t
.half .Lw64_f21 - .Lw64_t
.half .Lw64_f22 - .Lw64_t
.half .Lw64_f23 - .Lw64_t
.half .Lw64_f24 - .Lw64_t
.half .Lw64_f25 - .Lw64_t
.half .Lw64_f26 - .Lw64_t
.half .Lw64_f27 - .Lw64_t
.half .Lw64_f28 - .Lw64_t
.half .Lw64_f29 - .Lw64_t
.half .Lw64_f30 - .Lw64_t
.half .Lw64_f31 - .Lw64_t
#define write64(which) .Lw64_##which: fld which, 0(a1); ret
write64(f0)
write64(f1)
write64(f2)
write64(f3)
write64(f4)
write64(f5)
write64(f6)
write64(f7)
write64(f8)
write64(f9)
write64(f10)
write64(f11)
write64(f12)
write64(f13)
write64(f14)
write64(f15)
write64(f16)
write64(f17)
write64(f18)
write64(f19)
write64(f20)
write64(f21)
write64(f22)
write64(f23)
write64(f24)
write64(f25)
write64(f26)
write64(f27)
write64(f28)
write64(f29)
write64(f30)
write64(f31)
#endif // __riscv_flen >= 64
#endif // defined(__riscv_flen)

View File

@ -32,8 +32,7 @@
#include <stdint.h> #include <stdint.h>
typedef struct typedef struct {
{
uintptr_t gpr[32]; uintptr_t gpr[32];
uintptr_t status; uintptr_t status;
uintptr_t epc; uintptr_t epc;
@ -53,9 +52,9 @@ static inline void exception_init(void)
{ {
} }
void trap_handler(trapframe* tf); void redirect_trap(void);
void handle_supervisor_call(trapframe* tf); void trap_handler(trapframe *tf);
void handle_misaligned_load(trapframe *tf); void handle_supervisor_call(trapframe *tf);
void handle_misaligned_store(trapframe *tf); void handle_misaligned(trapframe *tf);
#endif #endif

267
src/arch/riscv/misaligned.c Normal file
View File

@ -0,0 +1,267 @@
/*
* This file is part of the coreboot project.
*
* Copyright (C) 2018 HardenedLinux
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <stddef.h>
#include <stdint.h>
#include <vm.h>
#include <arch/exception.h>
#include <commonlib/helpers.h>
/* these functions are defined in src/arch/riscv/fp_asm.S */
#if defined(__riscv_flen)
#if __riscv_flen >= 32
extern void read_f32(int regnum, uint32_t *v);
extern void write_f32(int regnum, uint32_t *v);
#endif // __riscv_flen >= 32
#if __riscv_flen >= 64
extern void read_f64(int regnum, uint64_t *v);
extern void write_f64(int regnum, uint64_t *v);
#endif // __riscv_flen >= 64
#endif // defined(__riscv_flen)
/* This union makes it easy to read multibyte types by byte operations. */
union endian_buf {
uint8_t b[8];
uint16_t h[4];
uint32_t w[2];
uint64_t d[1];
uintptr_t v;
};
/* This struct hold info of load/store instruction */
struct memory_instruction_info {
/* opcode/mask used to identify instruction,
* (instruction_val) & mask == opcode */
uint32_t opcode;
uint32_t mask;
/* reg_shift/reg_mask/reg_addition used to get register number
* ((instruction_val >> reg_shift) & reg_mask) + reg_addition */
unsigned int reg_shift;
unsigned int reg_mask;
unsigned int reg_addition;
unsigned int is_fp : 1; /* mark as a float operation */
unsigned int is_load : 1; /* mark as a load operation */
unsigned int width : 8; /* Record the memory width of the operation */
unsigned int sign_extend : 1; /* mark need to be sign extended */
};
static struct memory_instruction_info insn_info[] = {
#if __riscv_xlen == 128
{ 0x00002000, 0x0000e003, 2, 7, 8, 0, 1, 16, 1}, // C.LQ
#else
{ 0x00002000, 0x0000e003, 2, 7, 8, 1, 1, 8, 0}, // C.FLD
#endif
{ 0x00004000, 0x0000e003, 2, 7, 8, 0, 1, 4, 1}, // C.LW
#if __riscv_xlen == 32
{ 0x00006000, 0x0000e003, 2, 7, 8, 1, 1, 4, 0}, // C.FLW
#else
{ 0x00006000, 0x0000e003, 2, 7, 8, 0, 1, 8, 1}, // C.LD
#endif
#if __riscv_xlen == 128
{ 0x0000a000, 0x0000e003, 2, 7, 8, 0, 0, 16, 0}, // C.SQ
#else
{ 0x0000a000, 0x0000e003, 2, 7, 8, 1, 0, 8, 0}, // C.FSD
#endif
{ 0x0000c000, 0x0000e003, 2, 7, 8, 0, 0, 4, 0}, // C.SW
#if __riscv_xlen == 32
{ 0x0000e000, 0x0000e003, 2, 7, 8, 1, 0, 4, 0}, // C.FSW
#else
{ 0x0000e000, 0x0000e003, 2, 7, 8, 0, 0, 8, 0}, // C.SD
#endif
#if __riscv_xlen == 128
{ 0x00002002, 0x0000e003, 7, 15, 0, 0, 1, 16, 1}, // C.LQSP
#else
{ 0x00002002, 0x0000e003, 7, 15, 0, 1, 1, 8, 0}, // C.FLDSP
#endif
{ 0x00004002, 0x0000e003, 7, 15, 0, 0, 1, 4, 1}, // C.LWSP
#if __riscv_xlen == 32
{ 0x00006002, 0x0000e003, 7, 15, 0, 1, 1, 4, 0}, // C.FLWSP
#else
{ 0x00006002, 0x0000e003, 7, 15, 0, 0, 1, 8, 1}, // C.LDSP
#endif
#if __riscv_xlen == 128
{ 0x0000a002, 0x0000e003, 2, 15, 0, 0, 0, 16, 0}, // C.SQSP
#else
{ 0x0000a002, 0x0000e003, 2, 15, 0, 1, 0, 8, 0}, // C.FSDSP
#endif
{ 0x0000c002, 0x0000e003, 2, 15, 0, 0, 0, 4, 0}, // C.SWSP
#if __riscv_xlen == 32
{ 0x0000e002, 0x0000e003, 2, 15, 0, 1, 0, 4, 0}, // C.FSWSP
#else
{ 0x0000e002, 0x0000e003, 2, 15, 0, 0, 0, 8, 0}, // C.SDSP
#endif
{ 0x00000003, 0x0000707f, 7, 15, 0, 0, 1, 1, 1}, // LB
{ 0x00001003, 0x0000707f, 7, 15, 0, 0, 1, 2, 1}, // LH
{ 0x00002003, 0x0000707f, 7, 15, 0, 0, 1, 4, 1}, // LW
#if __riscv_xlen > 32
{ 0x00003003, 0x0000707f, 7, 15, 0, 0, 1, 8, 1}, // LD
#endif
{ 0x00004003, 0x0000707f, 7, 15, 0, 0, 1, 1, 0}, // LBU
{ 0x00005003, 0x0000707f, 7, 15, 0, 0, 1, 2, 0}, // LHU
{ 0x00006003, 0x0000707f, 7, 15, 0, 0, 1, 4, 0}, // LWU
{ 0x00000023, 0x0000707f, 20, 15, 0, 0, 0, 1, 0}, // SB
{ 0x00001023, 0x0000707f, 20, 15, 0, 0, 0, 2, 0}, // SH
{ 0x00002023, 0x0000707f, 20, 15, 0, 0, 0, 4, 0}, // SW
#if __riscv_xlen > 32
{ 0x00003023, 0x0000707f, 20, 15, 0, 0, 0, 8, 0}, // SD
#endif
#if defined(__riscv_flen)
#if __riscv_flen >= 32
{ 0x00002007, 0x0000707f, 7, 15, 0, 1, 1, 4, 0}, // FLW
{ 0x00003007, 0x0000707f, 7, 15, 0, 1, 1, 8, 0}, // FLD
#endif // __riscv_flen >= 32
#if __riscv_flen >= 64
{ 0x00002027, 0x0000707f, 20, 15, 0, 1, 0, 4, 0}, // FSW
{ 0x00003027, 0x0000707f, 20, 15, 0, 1, 0, 8, 0}, // FSD
#endif // __riscv_flen >= 64
#endif // defined(__riscv_flen)
};
static struct memory_instruction_info *match_instruction(uintptr_t insn)
{
int i;
for (i = 0; i < ARRAY_SIZE(insn_info); i++)
if ((insn_info[i].mask & insn) == insn_info[i].opcode)
return &(insn_info[i]);
return NULL;
}
static int fetch_16bit_instruction(uintptr_t vaddr, uintptr_t *insn)
{
uint16_t ins = mprv_read_mxr_u16((uint16_t *)vaddr);
if (EXTRACT_FIELD(ins, 0x3) != 3) {
*insn = ins;
return 0;
}
return -1;
}
static int fetch_32bit_instruction(uintptr_t vaddr, uintptr_t *insn)
{
uint32_t l = (uint32_t)mprv_read_mxr_u16((uint16_t *)vaddr + 0);
uint32_t h = (uint32_t)mprv_read_mxr_u16((uint16_t *)vaddr + 2);
uint32_t ins = (h << 16) | l;
if ((EXTRACT_FIELD(ins, 0x3) == 3) &&
(EXTRACT_FIELD(ins, 0x1c) != 0x7)) {
*insn = ins;
return 0;
}
return -1;
}
void handle_misaligned(trapframe *tf)
{
uintptr_t insn = 0;
union endian_buf buff;
/* try to fetch 16/32 bits instruction */
if (fetch_16bit_instruction(tf->epc, &insn))
if (fetch_32bit_instruction(tf->epc, &insn))
redirect_trap();
/* matching instruction */
struct memory_instruction_info *match = match_instruction(insn);
if (!match) {
redirect_trap();
return;
}
int regnum;
regnum = ((insn >> match->reg_shift) & match->reg_mask);
regnum = regnum + match->reg_addition;
buff.v = 0;
if (match->is_load) {
/* load operation */
/* reading from memory by bytes prevents misaligned
* memory access */
for (int i = 0; i < match->width; i++) {
uint8_t *addr = (uint8_t *)(tf->badvaddr + i);
buff.b[i] = mprv_read_u8(addr);
}
/* sign extend for signed integer loading */
if (match->sign_extend)
if (buff.v >> (8 * match->width - 1))
buff.v |= -1 << (8 * match->width);
/* write to register */
if (match->is_fp) {
int done = 0;
#if defined(__riscv_flen)
#if __riscv_flen >= 32
/* single-precision floating-point */
if (match->width == 4) {
write_f32(regnum, buff.w);
done = 1;
}
#endif // __riscv_flen >= 32
#if __riscv_flen >= 64
/* double-precision floating-point */
if (match->width == 8) {
write_f64(regnum, buff.d);
done = 1;
}
#endif // __riscv_flen >= 64
#endif // defined(__riscv_flen)
if (!done)
redirect_trap();
} else {
tf->gpr[regnum] = buff.v;
}
} else {
/* store operation */
/* reading from register */
if (match->is_fp) {
int done = 0;
#if defined(__riscv_flen)
#if __riscv_flen >= 32
if (match->width == 4) {
read_f32(regnum, buff.w);
done = 1;
}
#endif // __riscv_flen >= 32
#if __riscv_flen >= 64
if (match->width == 8) {
read_f64(regnum, buff.d);
done = 1;
}
#endif // __riscv_flen >= 64
#endif // defined(__riscv_flen)
if (!done)
redirect_trap();
} else {
buff.v = tf->gpr[regnum];
}
/* writing to memory by bytes prevents misaligned
* memory access */
for (int i = 0; i < match->width; i++) {
uint8_t *addr = (uint8_t *)(tf->badvaddr + i);
mprv_write_u8(addr, buff.b[i]);
}
}
}

View File

@ -166,12 +166,9 @@ void trap_handler(trapframe *tf)
print_trap_information(tf); print_trap_information(tf);
break; break;
case CAUSE_MISALIGNED_LOAD: case CAUSE_MISALIGNED_LOAD:
print_trap_information(tf);
handle_misaligned_load(tf);
return;
case CAUSE_MISALIGNED_STORE: case CAUSE_MISALIGNED_STORE:
print_trap_information(tf); print_trap_information(tf);
handle_misaligned_store(tf); handle_misaligned(tf);
return; return;
default: default:
printk(BIOS_EMERG, "================================\n"); printk(BIOS_EMERG, "================================\n");
@ -184,62 +181,17 @@ void trap_handler(trapframe *tf)
die("Can't recover from trap. Halting.\n"); die("Can't recover from trap. Halting.\n");
} }
static uint32_t fetch_instruction(uintptr_t vaddr) { /* This function used to redirect trap to s-mode. */
printk(BIOS_SPEW, "fetching instruction at 0x%016zx\n", (size_t)vaddr); void redirect_trap(void)
return mprv_read_u32((uint32_t *) vaddr); {
} write_csr(sbadaddr, read_csr(mbadaddr));
write_csr(sepc, read_csr(mepc));
void handle_misaligned_load(trapframe *tf) { write_csr(scause, read_csr(mcause));
printk(BIOS_DEBUG, "Trapframe ptr: %p\n", tf); write_csr(mepc, read_csr(stvec));
uintptr_t faultingInstructionAddr = tf->epc;
insn_t faultingInstruction = fetch_instruction(faultingInstructionAddr); uintptr_t status = read_csr(mstatus);
printk(BIOS_DEBUG, "Faulting instruction: 0x%x\n", faultingInstruction); uintptr_t mpp = EXTRACT_FIELD(status, MSTATUS_MPP);
insn_t widthMask = 0x7000; status = INSERT_FIELD(status, MSTATUS_MPP, 1);
insn_t memWidth = (faultingInstruction & widthMask) >> 12; status = INSERT_FIELD(status, MSTATUS_SPP, mpp & 1);
insn_t destMask = 0xF80; write_csr(mstatus, status);
insn_t destRegister = (faultingInstruction & destMask) >> 7;
printk(BIOS_DEBUG, "Width: %d bits\n", (1 << memWidth) * 8);
if (memWidth == 3) {
// load double, handle the issue
void* badAddress = (void*) tf->badvaddr;
uint64_t value = 0;
for (int i = 0; i < 8; i++) {
value <<= 8;
value += mprv_read_u8(badAddress+i);
}
tf->gpr[destRegister] = value;
} else {
// panic, this should not have happened
die("Code should not reach this path, misaligned on a non-64 bit store/load\n");
}
// return to where we came from
write_csr(mepc, read_csr(mepc) + 4);
}
void handle_misaligned_store(trapframe *tf) {
printk(BIOS_DEBUG, "Trapframe ptr: %p\n", tf);
uintptr_t faultingInstructionAddr = tf->epc;
insn_t faultingInstruction = fetch_instruction(faultingInstructionAddr);
printk(BIOS_DEBUG, "Faulting instruction: 0x%x\n", faultingInstruction);
insn_t widthMask = 0x7000;
insn_t memWidth = (faultingInstruction & widthMask) >> 12;
insn_t srcMask = 0x1F00000;
insn_t srcRegister = (faultingInstruction & srcMask) >> 20;
printk(BIOS_DEBUG, "Width: %d bits\n", (1 << memWidth) * 8);
if (memWidth == 3) {
// store double, handle the issue
void* badAddress = (void*) tf->badvaddr;
uint64_t value = tf->gpr[srcRegister];
for (int i = 0; i < 8; i++) {
mprv_write_u8(badAddress+i, value);
value >>= 8;
}
} else {
// panic, this should not have happened
die("Code should not reach this path, misaligned on a non-64 bit store/load\n");
}
// return to where we came from
write_csr(mepc, read_csr(mepc) + 4);
} }