arm64: introduce data cache ops by set/way to the level specified

This patchs introduces level specific data cache maintenance operations
to cache_helpers.S. It's derived form ARM trusted firmware repository.
Please reference here.

https://github.com/ARM-software/arm-trusted-firmware/blob/master/
lib/aarch64/cache_helpers.S

BRANCH=none
BUG=none
TEST=boot on smaug/foster

Change-Id: Ib58a6d6f95eb51ce5d80749ff51d9d389b0d1343
Signed-off-by: Patrick Georgi <pgeorgi@chromium.org>
Original-Commit-Id: b3d1a16bd0089740f1f2257146c771783beece82
Original-Change-Id: Ifcd1dbcd868331107d0d47af73545a3a159fdff6
Original-Signed-off-by: Joseph Lo <josephl@nvidia.com>
Original-Reviewed-on: https://chromium-review.googlesource.com/265826
Original-Reviewed-by: Aaron Durbin <adurbin@chromium.org>
Reviewed-on: http://review.coreboot.org/9979
Tested-by: build bot (Jenkins)
Reviewed-by: Marc Jones <marc.jones@se-eng.com>
This commit is contained in:
Joseph Lo 2015-04-14 16:03:58 +08:00 committed by Patrick Georgi
parent 53a2f6078a
commit c4301f7969
7 changed files with 184 additions and 60 deletions

View File

@ -37,6 +37,7 @@ bootblock-y += bootblock.S
bootblock-y += bootblock_simple.c
endif
bootblock-y += cache.c
bootblock-y += cache_helpers.S
bootblock-y += cpu.S
bootblock-$(CONFIG_BOOTBLOCK_CONSOLE) += exception.c
@ -52,6 +53,7 @@ ifeq ($(CONFIG_ARCH_VERSTAGE_ARMV8_64),y)
verstage-y += cache.c
verstage-y += cpu.S
verstage-y += cache_helpers.S
verstage-y += exception.c
verstage-c-ccopts += $(armv8_flags)
@ -65,6 +67,7 @@ endif
ifeq ($(CONFIG_ARCH_ROMSTAGE_ARMV8_64),y)
romstage-y += cache.c
romstage-y += cache_helpers.S
romstage-y += cpu.S
romstage-y += exception.c
@ -82,6 +85,7 @@ endif
ifeq ($(CONFIG_ARCH_RAMSTAGE_ARMV8_64),y)
ramstage-y += cache.c
ramstage-y += cache_helpers.S
ramstage-y += cpu.S
ramstage-y += exception.c
ramstage-y += mmu.c

View File

@ -34,6 +34,7 @@
#include <stdint.h>
#include <arch/cache.h>
#include <arch/cache_helpers.h>
#include <arch/lib_helpers.h>
void tlb_invalidate_all(void)
@ -126,7 +127,7 @@ void dcache_mmu_disable(void)
{
uint32_t sctlr;
flush_dcache_all();
flush_dcache_all(DCCISW);
sctlr = raw_read_sctlr_current();
sctlr &= ~(SCTLR_C | SCTLR_M);
raw_write_sctlr_current(sctlr);
@ -143,6 +144,6 @@ void dcache_mmu_enable(void)
void cache_sync_instructions(void)
{
flush_dcache_all(); /* includes trailing DSB (in assembly) */
flush_dcache_all(DCCISW); /* includes trailing DSB (in assembly) */
icache_invalidate_all(); /* includdes leading DSB and trailing ISB. */
}

View File

@ -0,0 +1,123 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch/asm.h>
#include <arch/cache_helpers.h>
/* ---------------------------------------------------------------
* Data cache operations by set/way to the level specified
*
* The main function, do_dcsw_op requires:
* x0: The operation type (0-2), as defined in cache_helpers.h
* x3: The last cache level to operate on
* x9: clidr_el1
* and will carry out the operation on each data cache from level 0
* to the level in x3 in sequence
*
* The dcsw_op macro sets up the x3 and x9 parameters based on
* clidr_el1 cache information before invoking the main function
* ---------------------------------------------------------------
*/
.macro dcsw_op shift, fw, ls
mrs x9, clidr_el1
ubfx x3, x9, \shift, \fw
lsl x3, x3, \ls
b do_dcsw_op
.endm
do_dcsw_op:
cbz x3, exit
mov x10, xzr
adr x14, dcsw_loop_table // compute inner loop address
add x14, x14, x0, lsl #5 // inner loop is 8x32-bit instructions
mov x0, x9
mov w8, #1
loop1:
add x2, x10, x10, lsr #1 // work out 3x current cache level
lsr x1, x0, x2 // extract cache type bits from clidr
and x1, x1, #7 // mask the bits for current cache only
cmp x1, #2 // see what cache we have at this level
b.lt level_done // nothing to do if no cache or icache
msr csselr_el1, x10 // select current cache level in csselr
isb // isb to sych the new cssr&csidr
mrs x1, ccsidr_el1 // read the new ccsidr
and x2, x1, #7 // extract the length of the cache lines
add x2, x2, #4 // add 4 (line length offset)
ubfx x4, x1, #3, #10 // maximum way number
clz w5, w4 // bit position of way size increment
lsl w9, w4, w5 // w9 = aligned max way number
lsl w16, w8, w5 // w16 = way number loop decrement
orr w9, w10, w9 // w9 = combine way and cache number
ubfx w6, w1, #13, #15 // w6 = max set number
lsl w17, w8, w2 // w17 = set number loop decrement
dsb sy // barrier before we start this level
br x14 // jump to DC operation specific loop
level_done:
add x10, x10, #2 // increment cache number
cmp x3, x10
b.gt loop1
msr csselr_el1, xzr // select cache level 0 in csselr
dsb sy // barrier to complete final cache operation
isb
exit:
ret
.macro dcsw_loop _op
loop2_\_op:
lsl w7, w6, w2 // w7 = aligned max set number
loop3_\_op:
orr w11, w9, w7 // combine cache, way and set number
dc \_op, x11
subs w7, w7, w17 // decrement set number
b.ge loop3_\_op
subs x9, x9, x16 // decrement way number
b.ge loop2_\_op
b level_done
.endm
dcsw_loop_table:
dcsw_loop isw
dcsw_loop cisw
dcsw_loop csw
ENTRY(flush_dcache_louis)
dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
ENDPROC(flush_dcache_louis)
ENTRY(flush_dcache_all)
dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
ENDPROC(flush_dcache_all)

View File

@ -19,63 +19,7 @@
*/
#include <arch/asm.h>
/*
* flush_dcache_all()
*
* Flush the whole D-cache.
*
* Corrupted registers: x0-x7, x9-x11
* From: Linux arch/arm64/mm/cache.S
*/
ENTRY(flush_dcache_all)
dsb sy // ensure ordering with previous memory accesses
mrs x0, clidr_el1 // read clidr
and x3, x0, #0x7000000 // extract loc from clidr
lsr x3, x3, #23 // left align loc bit field
cbz x3, finished // if loc is 0, then no need to clean
mov x10, #0 // start clean at cache level 0
loop1:
add x2, x10, x10, lsr #1 // work out 3x current cache level
lsr x1, x0, x2 // extract cache type bits from clidr
and x1, x1, #7 // mask of the bits for current cache only
cmp x1, #2 // see what cache we have at this level
b.lt skip // skip if no cache, or just i-cache
mrs x9, daif // make CSSELR and CCSIDR access atomic
msr csselr_el1, x10 // select current cache level in csselr
isb // isb to sych the new cssr&csidr
mrs x1, ccsidr_el1 // read the new ccsidr
msr daif, x9
and x2, x1, #7 // extract the length of the cache lines
add x2, x2, #4 // add 4 (line length offset)
mov x4, #0x3ff
and x4, x4, x1, lsr #3 // find maximum number on the way size
clz x5, x4 // find bit position of way size increment
mov x7, #0x7fff
and x7, x7, x1, lsr #13 // extract max number of the index size
loop2:
mov x9, x4 // create working copy of max way size
loop3:
lsl x6, x9, x5
orr x11, x10, x6 // factor way and cache number into x11
lsl x6, x7, x2
orr x11, x11, x6 // factor index number into x11
dc cisw, x11 // clean & invalidate by set/way
subs x9, x9, #1 // decrement the way
b.ge loop3
subs x7, x7, #1 // decrement the index
b.ge loop2
skip:
add x10, x10, #2 // increment cache number
cmp x3, x10
b.gt loop1
finished:
mov x10, #0 // swith back to cache level 0
msr csselr_el1, x10 // select current cache level in csselr
dsb sy
isb
ret
ENDPROC(flush_dcache_all)
#include <arch/cache_helpers.h>
/*
* Bring an ARMv8 processor we just gained control of (e.g. from IROM) into a
@ -92,6 +36,7 @@ ENTRY(arm_init_caches)
/* FIXME: How to enable branch prediction on ARMv8? */
/* Flush and invalidate dcache */
mov x0, #DCCISW
bl flush_dcache_all
/* Deactivate MMU (0), Alignment Check (1) and DCache (2) */

View File

@ -35,6 +35,7 @@ secmon-y += psci.c
secmon-y += smc.c
secmon-y += trampoline.S
secmon-y += ../cache.c
secmon-y += ../cache_helpers.S
secmon-y += ../cpu.S
secmon-y += ../exception.c
secmon-y += ../../cpu.c

View File

@ -0,0 +1,47 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __CACHE_HELPERS_H
/* CLIDR definitions */
#define LOUIS_SHIFT 21
#define LOC_SHIFT 24
#define CLIDR_FIELD_WIDTH 3
/* CSSELR definitions */
#define LEVEL_SHIFT 1
/* D$ set/way op type defines */
#define DCISW 0x0
#define DCCISW 0x1
#define DCCSW 0x2
#endif /* __CACHE_HELPERS_H */

View File

@ -69,7 +69,10 @@ void dcache_clean_invalidate_by_mva(void const *addr, size_t len);
void dcache_invalidate_by_mva(void const *addr, size_t len);
/* dcache invalidate all */
void flush_dcache_all(void);
void flush_dcache_all(int op_type);
/* flush the dcache up to the Level of Unification Inner Shareable */
void flush_dcache_louis(int op_type);
/* returns number of bytes per cache line */
unsigned int dcache_line_bytes(void);