ARMv7: Simplify div64

We don't need the overly complex optimized version, since
we're only doing this in very few non-critical places.

Also, add the div* files to the bootblock, they're needed
if we do printk.

Signed-off-by: Stefan Reinauer <reinauer@google.com>

Change-Id: I83bd766d4b03b488326ade1c13b7c364a7119e7b
Signed-off-by: Ronald G. Minnich <rminnich@gmail.com>
Reviewed-on: http://review.coreboot.org/2508
Tested-by: build bot (Jenkins)
This commit is contained in:
Stefan Reinauer 2013-02-14 15:53:18 -08:00 committed by Ronald G. Minnich
parent 14c2398ce9
commit 1cd6160821
3 changed files with 7 additions and 190 deletions

View File

@ -3,12 +3,7 @@
#ifndef __ASM_ARM_DIV64
#define __ASM_ARM_DIV64
//#include <asm/system.h>
//#include <linux/types.h>
// FIXME
#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
#define __LINUX_ARM_ARCH__ 7
/*
* The semantics of do_div() are:
@ -34,7 +29,7 @@
#define __xh "r1"
#endif
#define __do_div_asm(n, base) \
#define do_div(n, base) \
({ \
register unsigned int __base asm("r4") = base; \
register unsigned long long __n asm("r0") = n; \
@ -51,183 +46,4 @@
n = __res; \
__rem; \
})
#if __GNUC__ < 4
/*
* gcc versions earlier than 4.0 are simply too problematic for the
* optimized implementation below. First there is gcc PR 15089 that
* tend to trig on more complex constructs, spurious .global __udivsi3
* are inserted even if none of those symbols are referenced in the
* generated code, and those gcc versions are not able to do constant
* propagation on long long values anyway.
*/
#define do_div(n, base) __do_div_asm(n, base)
#elif __GNUC__ >= 4
//#include <asm/bug.h>
/*
* If the divisor happens to be constant, we determine the appropriate
* inverse at compile time to turn the division into a few inline
* multiplications instead which is much faster. And yet only if compiling
* for ARMv4 or higher (we need umull/umlal) and if the gcc version is
* sufficiently recent to perform proper long long constant propagation.
* (It is unfortunate that gcc doesn't perform all this internally.)
*/
#define do_div(n, base) \
({ \
unsigned int __r, __b = (base); \
if (!__builtin_constant_p(__b) || __b == 0 || \
(__LINUX_ARM_ARCH__ < 4 && (__b & (__b - 1)) != 0)) { \
/* non-constant divisor (or zero): slow path */ \
__r = __do_div_asm(n, __b); \
} else if ((__b & (__b - 1)) == 0) { \
/* Trivial: __b is constant and a power of 2 */ \
/* gcc does the right thing with this code. */ \
__r = n; \
__r &= (__b - 1); \
n /= __b; \
} else { \
/* Multiply by inverse of __b: n/b = n*(p/b)/p */ \
/* We rely on the fact that most of this code gets */ \
/* optimized away at compile time due to constant */ \
/* propagation and only a couple inline assembly */ \
/* instructions should remain. Better avoid any */ \
/* code construct that might prevent that. */ \
unsigned long long __res, __x, __t, __m, __n = n; \
unsigned int __c, __p, __z = 0; \
/* preserve low part of n for reminder computation */ \
__r = __n; \
/* determine number of bits to represent __b */ \
__p = 1 << __div64_fls(__b); \
/* compute __m = ((__p << 64) + __b - 1) / __b */ \
__m = (~0ULL / __b) * __p; \
__m += (((~0ULL % __b + 1) * __p) + __b - 1) / __b; \
/* compute __res = __m*(~0ULL/__b*__b-1)/(__p << 64) */ \
__x = ~0ULL / __b * __b - 1; \
__res = (__m & 0xffffffff) * (__x & 0xffffffff); \
__res >>= 32; \
__res += (__m & 0xffffffff) * (__x >> 32); \
__t = __res; \
__res += (__x & 0xffffffff) * (__m >> 32); \
__t = (__res < __t) ? (1ULL << 32) : 0; \
__res = (__res >> 32) + __t; \
__res += (__m >> 32) * (__x >> 32); \
__res /= __p; \
/* Now sanitize and optimize what we've got. */ \
if (~0ULL % (__b / (__b & -__b)) == 0) { \
/* those cases can be simplified with: */ \
__n /= (__b & -__b); \
__m = ~0ULL / (__b / (__b & -__b)); \
__p = 1; \
__c = 1; \
} else if (__res != __x / __b) { \
/* We can't get away without a correction */ \
/* to compensate for bit truncation errors. */ \
/* To avoid it we'd need an additional bit */ \
/* to represent __m which would overflow it. */ \
/* Instead we do m=p/b and n/b=(n*m+m)/p. */ \
__c = 1; \
/* Compute __m = (__p << 64) / __b */ \
__m = (~0ULL / __b) * __p; \
__m += ((~0ULL % __b + 1) * __p) / __b; \
} else { \
/* Reduce __m/__p, and try to clear bit 31 */ \
/* of __m when possible otherwise that'll */ \
/* need extra overflow handling later. */ \
unsigned int __bits = -(__m & -__m); \
__bits |= __m >> 32; \
__bits = (~__bits) << 1; \
/* If __bits == 0 then setting bit 31 is */ \
/* unavoidable. Simply apply the maximum */ \
/* possible reduction in that case. */ \
/* Otherwise the MSB of __bits indicates the */ \
/* best reduction we should apply. */ \
if (!__bits) { \
__p /= (__m & -__m); \
__m /= (__m & -__m); \
} else { \
__p >>= __div64_fls(__bits); \
__m >>= __div64_fls(__bits); \
} \
/* No correction needed. */ \
__c = 0; \
} \
/* Now we have a combination of 2 conditions: */ \
/* 1) whether or not we need a correction (__c), and */ \
/* 2) whether or not there might be an overflow in */ \
/* the cross product (__m & ((1<<63) | (1<<31))) */ \
/* Select the best insn combination to perform the */ \
/* actual __m * __n / (__p << 64) operation. */ \
if (!__c) { \
asm ( "umull %Q0, %R0, %1, %Q2\n\t" \
"mov %Q0, #0" \
: "=&r" (__res) \
: "r" (__m), "r" (__n) \
: "cc" ); \
} else if (!(__m & ((1ULL << 63) | (1ULL << 31)))) { \
__res = __m; \
asm ( "umlal %Q0, %R0, %Q1, %Q2\n\t" \
"mov %Q0, #0" \
: "+&r" (__res) \
: "r" (__m), "r" (__n) \
: "cc" ); \
} else { \
asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" \
"cmn %Q0, %Q1\n\t" \
"adcs %R0, %R0, %R1\n\t" \
"adc %Q0, %3, #0" \
: "=&r" (__res) \
: "r" (__m), "r" (__n), "r" (__z) \
: "cc" ); \
} \
if (!(__m & ((1ULL << 63) | (1ULL << 31)))) { \
asm ( "umlal %R0, %Q0, %R1, %Q2\n\t" \
"umlal %R0, %Q0, %Q1, %R2\n\t" \
"mov %R0, #0\n\t" \
"umlal %Q0, %R0, %R1, %R2" \
: "+&r" (__res) \
: "r" (__m), "r" (__n) \
: "cc" ); \
} else { \
asm ( "umlal %R0, %Q0, %R2, %Q3\n\t" \
"umlal %R0, %1, %Q2, %R3\n\t" \
"mov %R0, #0\n\t" \
"adds %Q0, %1, %Q0\n\t" \
"adc %R0, %R0, #0\n\t" \
"umlal %Q0, %R0, %R2, %R3" \
: "+&r" (__res), "+&r" (__z) \
: "r" (__m), "r" (__n) \
: "cc" ); \
} \
__res /= __p; \
/* The reminder can be computed with 32-bit regs */ \
/* only, and gcc is good at that. */ \
{ \
unsigned int __res0 = __res; \
unsigned int __b0 = __b; \
__r -= __res0 * __b0; \
} \
/* BUG_ON(__r >= __b || __res * __b + __r != n); */ \
n = __res; \
} \
__r; \
})
/* our own fls implementation to make sure constant propagation is fine */
#define __div64_fls(bits) \
({ \
unsigned int __left = (bits), __nr = 0; \
if (__left & 0xffff0000) __nr += 16, __left >>= 16; \
if (__left & 0x0000ff00) __nr += 8, __left >>= 8; \
if (__left & 0x000000f0) __nr += 4, __left >>= 4; \
if (__left & 0x0000000c) __nr += 2, __left >>= 2; \
if (__left & 0x00000002) __nr += 1; \
__nr; \
})
#endif
#endif

View File

@ -2,6 +2,8 @@ bootblock-y += syslib.c
bootblock-$(CONFIG_EARLY_CONSOLE) += early_console.c
bootblock-y += cache_v7.c
bootblock-y += cache-cp15.c
bootblock-y += div0.c
bootblock-y += div64.S
romstage-y += cache_v7.c
romstage-y += cache-cp15.c

View File

@ -13,8 +13,7 @@
*/
// FIXME
//#include <linux/linkage.h>
#define __LINUX_ARM_ARCH__ 7
#define __ARM_ARCH__ 7
#ifdef __ARMEB__
#define xh r0
@ -65,7 +64,7 @@ __do_div64:
@ The aligned divisor is stored in yl preserving the original.
@ The bit position is stored in ip.
#if __LINUX_ARM_ARCH__ >= 5
#if __ARM_ARCH__ >= 5
clz yl, r4
clz ip, xh
@ -125,7 +124,7 @@ __do_div64:
@ We still have remainer bits in the low part. Bring them up.
#if __LINUX_ARM_ARCH__ >= 5
#if __ARM_ARCH__ >= 5
clz xh, xl @ we know xh is zero here so...
add xh, xh, #1
@ -152,7 +151,7 @@ __do_div64:
8: @ Division by a power of 2: determine what that divisor order is
@ then simply shift values around
#if __LINUX_ARM_ARCH__ >= 5
#if __ARM_ARCH__ >= 5
clz ip, r4
rsb ip, ip, #31