arm: Import armv7_dcache_wbinv_all function from NetBSD

This patch pulls in NetBSD's full cache flushing algorithm for ARM, to
replace our old, slow and slightly overzealous C-only implementation.
It's a beautiful piece of code that manages to run on only caller-saved
registers (meaning it doesn't need to write to memory) in a very tight
loop, and it's BSD-licensed to boot (which we need for libpayload).
Unfortunately it's also not quite correct, but I can fix that. Pulling
the original in a separate commit to make it more obvious what changes
are mine.

Change-Id: I7a71c9e570866a6e25f756cb09ae2b6445048d83
Signed-off-by: Julius Werner <jwerner@chromium.org>
Reviewed-on: https://chromium-review.googlesource.com/183878
Reviewed-by: Stefan Reinauer <reinauer@google.com>
Reviewed-by: Vincent Palatin <vpalatin@chromium.org>
Reviewed-by: David Hendricks <dhendrix@chromium.org>
(cherry picked from commit 4698467320613d7ddc39714f40aacbc990af9399)
Signed-off-by: Isaac Christensen <isaac.christensen@se-eng.com>
Reviewed-on: http://review.coreboot.org/6931
Tested-by: build bot (Jenkins)
Reviewed-by: Patrick Georgi <patrick@georgi-clan.de>
This commit is contained in:
Julius Werner 2014-01-13 11:13:23 -08:00 committed by Isaac Christensen
parent 81f90c58d2
commit 12de698c24
1 changed files with 99 additions and 0 deletions

99
src/arch/arm/armv7/cpu.S Normal file
View File

@ -0,0 +1,99 @@
/*
* Optimized assembly for low-level CPU operations on ARMv7 processors.
*
* Cache flushing code based off sys/arch/arm/arm/cpufunc_asm_armv7.S in NetBSD
*
* Copyright (c) 2010 Per Odlund <per.odlund@armagedon.se>
* Copyright (c) 2014 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* These work very hard to not push registers onto the stack and to limit themselves
* to use r0-r3 and ip.
*/
/* * LINTSTUB: void armv7_dcache_wbinv_all(void); */
ENTRY_NP(armv7_dcache_wbinv_all)
mrc p15, 1, r0, c0, c0, 1 @ read CLIDR
ands r3, r0, #0x07000000
beq .Ldone_wbinv
lsr r3, r3, #23 @ left align loc (low 4 bits)
mov r1, #0
.Lstart_wbinv:
add r2, r3, r3, lsr #1 @ r2 = level * 3 / 2
mov r1, r0, lsr r2 @ r1 = cache type
bfc r1, #3, #28
cmp r1, #2 @ is it data or i&d?
blt .Lnext_level_wbinv @ nope, skip level
mcr p15, 2, r3, c0, c0, 0 @ select cache level
isb
mrc p15, 1, r0, c0, c0, 0 @ read CCSIDR
ubfx ip, r0, #0, #3 @ get linesize from CCSIDR
add ip, ip, #4 @ apply bias
ubfx r2, r0, #13, #15 @ get numsets - 1 from CCSIDR
lsl r2, r2, ip @ shift to set position
orr r3, r3, r2 @ merge set into way/set/level
mov r1, #1
lsl r1, r1, ip @ r1 = set decr
ubfx ip, r0, #3, #10 @ get numways - 1 from [to be discarded] CCSIDR
clz r2, ip @ number of bits to MSB of way
lsl ip, ip, r2 @ shift by that into way position
mov r0, #1 @
lsl r2, r0, r2 @ r2 now contains the way decr
mov r0, r3 @ get sets/level (no way yet)
orr r3, r3, ip @ merge way into way/set/level
bfc r0, #0, #4 @ clear low 4 bits (level) to get numset - 1
sub r2, r2, r0 @ subtract from way decr
/* r3 = ways/sets/level, r2 = way decr, r1 = set decr, r0 and ip are free */
1: mcr p15, 0, r3, c7, c14, 2 @ writeback and invalidate line
cmp r3, #15 @ are we done with this level (way/set == 0)
bls .Lnext_level_wbinv @ yes, go to next level
lsl r0, r3, #10 @ clear way bits leaving only set/level bits
lsr r0, r0, #4 @ clear level bits leaving only set bits
subne r3, r3, r1 @ non-zero?, decrement set #
subeq r3, r3, r2 @ zero?, decrement way # and restore set count
b 1b
.Lnext_level_wbinv:
mrc p15, 1, r0, c0, c0, 1 @ read CLIDR
and ip, r0, #0x07000000 @ narrow to LoC
lsr ip, ip, #23 @ left align LoC (low 4 bits)
add r3, r3, #2 @ go to next level
cmp r3, ip @ compare
blt .Lstart_wbinv @ not done, next level (r0 == CLIDR)
.Ldone_wbinv:
mov r0, #0 @ default back to cache level 0
mcr p15, 2, r0, c0, c0, 0 @ select cache level
dsb
isb
bx lr
END(armv7_dcache_wbinv_all)