coreboot-kgpe-d16/payloads/libpayload/libc/memory.c

160 lines
4.7 KiB
C
Raw Permalink Normal View History

/*
* This file is part of the libpayload project.
*
* It has originally been taken from the HelenOS project
* (http://www.helenos.eu), and slightly modified for our purposes.
*
* Copyright (c) 2005 Martin Decky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <libpayload.h>
static void *default_memset(void *const s, const int c, size_t n)
{
libpayload: Reorder default memcpy, speed up memset and memcmp The current default memcpy first copies single bytes to align the amount, then copies the rest as full words. In practice, the start of a buffer is much more likely to be word-aligned then the end, and aligned word access are usually more efficient. This patch reorders those accesses to first copy as many full words as possible and then finish the rest with byte accesses to optimize this common case. This fixes a data abort when using USB on ARM without CONFIG_GPL. Due to some limitations of how DMA memory is set up in coreboot on ARM, it currently does not support unaligned accesses. (This could be fixed with a more complicated patch, but it's usually not an issue... unless, of course, your memcpy happens to be braindead). Also add word-aligned accesses to memset and memcmp while I'm at it, and make memcmp's return value standard's compliant. BUG=chrome-os-partner:24957 TEST=Manual Original-Change-Id: I2a7bcb35626a05a9a43fcfd99eb958b485d7622a Original-Signed-off-by: Julius Werner <jwerner@chromium.org> Original-Reviewed-on: https://chromium-review.googlesource.com/203547 Original-Reviewed-by: Stefan Reinauer <reinauer@chromium.org> Original-Reviewed-by: David Hendricks <dhendrix@chromium.org> (cherry picked from commit 05a64d2e107e1675cc3442e6dabe14a341e55673) Signed-off-by: Marc Jones <marc.jones@se-eng.com> Change-Id: I0030ca8a203c97587b0da31a0a5e9e11b0be050f Reviewed-on: http://review.coreboot.org/8126 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
2014-06-12 19:28:57 +02:00
size_t i;
u8 *dst = s;
libpayload: Reorder default memcpy, speed up memset and memcmp The current default memcpy first copies single bytes to align the amount, then copies the rest as full words. In practice, the start of a buffer is much more likely to be word-aligned then the end, and aligned word access are usually more efficient. This patch reorders those accesses to first copy as many full words as possible and then finish the rest with byte accesses to optimize this common case. This fixes a data abort when using USB on ARM without CONFIG_GPL. Due to some limitations of how DMA memory is set up in coreboot on ARM, it currently does not support unaligned accesses. (This could be fixed with a more complicated patch, but it's usually not an issue... unless, of course, your memcpy happens to be braindead). Also add word-aligned accesses to memset and memcmp while I'm at it, and make memcmp's return value standard's compliant. BUG=chrome-os-partner:24957 TEST=Manual Original-Change-Id: I2a7bcb35626a05a9a43fcfd99eb958b485d7622a Original-Signed-off-by: Julius Werner <jwerner@chromium.org> Original-Reviewed-on: https://chromium-review.googlesource.com/203547 Original-Reviewed-by: Stefan Reinauer <reinauer@chromium.org> Original-Reviewed-by: David Hendricks <dhendrix@chromium.org> (cherry picked from commit 05a64d2e107e1675cc3442e6dabe14a341e55673) Signed-off-by: Marc Jones <marc.jones@se-eng.com> Change-Id: I0030ca8a203c97587b0da31a0a5e9e11b0be050f Reviewed-on: http://review.coreboot.org/8126 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
2014-06-12 19:28:57 +02:00
unsigned long w = c & 0xff;
const u8 *const aligned_start =
(const u8 *)ALIGN_UP((uintptr_t)dst, sizeof(unsigned long));
for (; n > 0 && dst != aligned_start; --n, ++dst)
*dst = (u8)c;
libpayload: Reorder default memcpy, speed up memset and memcmp The current default memcpy first copies single bytes to align the amount, then copies the rest as full words. In practice, the start of a buffer is much more likely to be word-aligned then the end, and aligned word access are usually more efficient. This patch reorders those accesses to first copy as many full words as possible and then finish the rest with byte accesses to optimize this common case. This fixes a data abort when using USB on ARM without CONFIG_GPL. Due to some limitations of how DMA memory is set up in coreboot on ARM, it currently does not support unaligned accesses. (This could be fixed with a more complicated patch, but it's usually not an issue... unless, of course, your memcpy happens to be braindead). Also add word-aligned accesses to memset and memcmp while I'm at it, and make memcmp's return value standard's compliant. BUG=chrome-os-partner:24957 TEST=Manual Original-Change-Id: I2a7bcb35626a05a9a43fcfd99eb958b485d7622a Original-Signed-off-by: Julius Werner <jwerner@chromium.org> Original-Reviewed-on: https://chromium-review.googlesource.com/203547 Original-Reviewed-by: Stefan Reinauer <reinauer@chromium.org> Original-Reviewed-by: David Hendricks <dhendrix@chromium.org> (cherry picked from commit 05a64d2e107e1675cc3442e6dabe14a341e55673) Signed-off-by: Marc Jones <marc.jones@se-eng.com> Change-Id: I0030ca8a203c97587b0da31a0a5e9e11b0be050f Reviewed-on: http://review.coreboot.org/8126 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
2014-06-12 19:28:57 +02:00
for (i = 1; i < sizeof(unsigned long); i <<= 1)
w = (w << (i * 8)) | w;
libpayload: Reorder default memcpy, speed up memset and memcmp The current default memcpy first copies single bytes to align the amount, then copies the rest as full words. In practice, the start of a buffer is much more likely to be word-aligned then the end, and aligned word access are usually more efficient. This patch reorders those accesses to first copy as many full words as possible and then finish the rest with byte accesses to optimize this common case. This fixes a data abort when using USB on ARM without CONFIG_GPL. Due to some limitations of how DMA memory is set up in coreboot on ARM, it currently does not support unaligned accesses. (This could be fixed with a more complicated patch, but it's usually not an issue... unless, of course, your memcpy happens to be braindead). Also add word-aligned accesses to memset and memcmp while I'm at it, and make memcmp's return value standard's compliant. BUG=chrome-os-partner:24957 TEST=Manual Original-Change-Id: I2a7bcb35626a05a9a43fcfd99eb958b485d7622a Original-Signed-off-by: Julius Werner <jwerner@chromium.org> Original-Reviewed-on: https://chromium-review.googlesource.com/203547 Original-Reviewed-by: Stefan Reinauer <reinauer@chromium.org> Original-Reviewed-by: David Hendricks <dhendrix@chromium.org> (cherry picked from commit 05a64d2e107e1675cc3442e6dabe14a341e55673) Signed-off-by: Marc Jones <marc.jones@se-eng.com> Change-Id: I0030ca8a203c97587b0da31a0a5e9e11b0be050f Reviewed-on: http://review.coreboot.org/8126 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
2014-06-12 19:28:57 +02:00
for (i = 0; i < n / sizeof(unsigned long); i++)
((unsigned long *)dst)[i] = w;
libpayload: Reorder default memcpy, speed up memset and memcmp The current default memcpy first copies single bytes to align the amount, then copies the rest as full words. In practice, the start of a buffer is much more likely to be word-aligned then the end, and aligned word access are usually more efficient. This patch reorders those accesses to first copy as many full words as possible and then finish the rest with byte accesses to optimize this common case. This fixes a data abort when using USB on ARM without CONFIG_GPL. Due to some limitations of how DMA memory is set up in coreboot on ARM, it currently does not support unaligned accesses. (This could be fixed with a more complicated patch, but it's usually not an issue... unless, of course, your memcpy happens to be braindead). Also add word-aligned accesses to memset and memcmp while I'm at it, and make memcmp's return value standard's compliant. BUG=chrome-os-partner:24957 TEST=Manual Original-Change-Id: I2a7bcb35626a05a9a43fcfd99eb958b485d7622a Original-Signed-off-by: Julius Werner <jwerner@chromium.org> Original-Reviewed-on: https://chromium-review.googlesource.com/203547 Original-Reviewed-by: Stefan Reinauer <reinauer@chromium.org> Original-Reviewed-by: David Hendricks <dhendrix@chromium.org> (cherry picked from commit 05a64d2e107e1675cc3442e6dabe14a341e55673) Signed-off-by: Marc Jones <marc.jones@se-eng.com> Change-Id: I0030ca8a203c97587b0da31a0a5e9e11b0be050f Reviewed-on: http://review.coreboot.org/8126 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
2014-06-12 19:28:57 +02:00
dst += i * sizeof(unsigned long);
libpayload: Reorder default memcpy, speed up memset and memcmp The current default memcpy first copies single bytes to align the amount, then copies the rest as full words. In practice, the start of a buffer is much more likely to be word-aligned then the end, and aligned word access are usually more efficient. This patch reorders those accesses to first copy as many full words as possible and then finish the rest with byte accesses to optimize this common case. This fixes a data abort when using USB on ARM without CONFIG_GPL. Due to some limitations of how DMA memory is set up in coreboot on ARM, it currently does not support unaligned accesses. (This could be fixed with a more complicated patch, but it's usually not an issue... unless, of course, your memcpy happens to be braindead). Also add word-aligned accesses to memset and memcmp while I'm at it, and make memcmp's return value standard's compliant. BUG=chrome-os-partner:24957 TEST=Manual Original-Change-Id: I2a7bcb35626a05a9a43fcfd99eb958b485d7622a Original-Signed-off-by: Julius Werner <jwerner@chromium.org> Original-Reviewed-on: https://chromium-review.googlesource.com/203547 Original-Reviewed-by: Stefan Reinauer <reinauer@chromium.org> Original-Reviewed-by: David Hendricks <dhendrix@chromium.org> (cherry picked from commit 05a64d2e107e1675cc3442e6dabe14a341e55673) Signed-off-by: Marc Jones <marc.jones@se-eng.com> Change-Id: I0030ca8a203c97587b0da31a0a5e9e11b0be050f Reviewed-on: http://review.coreboot.org/8126 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
2014-06-12 19:28:57 +02:00
for (i = 0; i < n % sizeof(unsigned long); i++)
dst[i] = (u8)c;
libpayload: Reorder default memcpy, speed up memset and memcmp The current default memcpy first copies single bytes to align the amount, then copies the rest as full words. In practice, the start of a buffer is much more likely to be word-aligned then the end, and aligned word access are usually more efficient. This patch reorders those accesses to first copy as many full words as possible and then finish the rest with byte accesses to optimize this common case. This fixes a data abort when using USB on ARM without CONFIG_GPL. Due to some limitations of how DMA memory is set up in coreboot on ARM, it currently does not support unaligned accesses. (This could be fixed with a more complicated patch, but it's usually not an issue... unless, of course, your memcpy happens to be braindead). Also add word-aligned accesses to memset and memcmp while I'm at it, and make memcmp's return value standard's compliant. BUG=chrome-os-partner:24957 TEST=Manual Original-Change-Id: I2a7bcb35626a05a9a43fcfd99eb958b485d7622a Original-Signed-off-by: Julius Werner <jwerner@chromium.org> Original-Reviewed-on: https://chromium-review.googlesource.com/203547 Original-Reviewed-by: Stefan Reinauer <reinauer@chromium.org> Original-Reviewed-by: David Hendricks <dhendrix@chromium.org> (cherry picked from commit 05a64d2e107e1675cc3442e6dabe14a341e55673) Signed-off-by: Marc Jones <marc.jones@se-eng.com> Change-Id: I0030ca8a203c97587b0da31a0a5e9e11b0be050f Reviewed-on: http://review.coreboot.org/8126 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
2014-06-12 19:28:57 +02:00
return s;
}
void *memset(void *s, int c, size_t n)
__attribute__((weak, alias("default_memset")));
static void *default_memcpy(void *dst, const void *src, size_t n)
{
libpayload: Reorder default memcpy, speed up memset and memcmp The current default memcpy first copies single bytes to align the amount, then copies the rest as full words. In practice, the start of a buffer is much more likely to be word-aligned then the end, and aligned word access are usually more efficient. This patch reorders those accesses to first copy as many full words as possible and then finish the rest with byte accesses to optimize this common case. This fixes a data abort when using USB on ARM without CONFIG_GPL. Due to some limitations of how DMA memory is set up in coreboot on ARM, it currently does not support unaligned accesses. (This could be fixed with a more complicated patch, but it's usually not an issue... unless, of course, your memcpy happens to be braindead). Also add word-aligned accesses to memset and memcmp while I'm at it, and make memcmp's return value standard's compliant. BUG=chrome-os-partner:24957 TEST=Manual Original-Change-Id: I2a7bcb35626a05a9a43fcfd99eb958b485d7622a Original-Signed-off-by: Julius Werner <jwerner@chromium.org> Original-Reviewed-on: https://chromium-review.googlesource.com/203547 Original-Reviewed-by: Stefan Reinauer <reinauer@chromium.org> Original-Reviewed-by: David Hendricks <dhendrix@chromium.org> (cherry picked from commit 05a64d2e107e1675cc3442e6dabe14a341e55673) Signed-off-by: Marc Jones <marc.jones@se-eng.com> Change-Id: I0030ca8a203c97587b0da31a0a5e9e11b0be050f Reviewed-on: http://review.coreboot.org/8126 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
2014-06-12 19:28:57 +02:00
size_t i;
void *ret = dst;
if (IS_ALIGNED((uintptr_t)dst, sizeof(unsigned long)) &&
IS_ALIGNED((uintptr_t)src, sizeof(unsigned long))) {
for (i = 0; i < n / sizeof(unsigned long); i++)
((unsigned long *)dst)[i] = ((unsigned long *)src)[i];
src += i * sizeof(unsigned long);
dst += i * sizeof(unsigned long);
n -= i * sizeof(unsigned long);
}
for (i = 0; i < n; i++)
libpayload: Reorder default memcpy, speed up memset and memcmp The current default memcpy first copies single bytes to align the amount, then copies the rest as full words. In practice, the start of a buffer is much more likely to be word-aligned then the end, and aligned word access are usually more efficient. This patch reorders those accesses to first copy as many full words as possible and then finish the rest with byte accesses to optimize this common case. This fixes a data abort when using USB on ARM without CONFIG_GPL. Due to some limitations of how DMA memory is set up in coreboot on ARM, it currently does not support unaligned accesses. (This could be fixed with a more complicated patch, but it's usually not an issue... unless, of course, your memcpy happens to be braindead). Also add word-aligned accesses to memset and memcmp while I'm at it, and make memcmp's return value standard's compliant. BUG=chrome-os-partner:24957 TEST=Manual Original-Change-Id: I2a7bcb35626a05a9a43fcfd99eb958b485d7622a Original-Signed-off-by: Julius Werner <jwerner@chromium.org> Original-Reviewed-on: https://chromium-review.googlesource.com/203547 Original-Reviewed-by: Stefan Reinauer <reinauer@chromium.org> Original-Reviewed-by: David Hendricks <dhendrix@chromium.org> (cherry picked from commit 05a64d2e107e1675cc3442e6dabe14a341e55673) Signed-off-by: Marc Jones <marc.jones@se-eng.com> Change-Id: I0030ca8a203c97587b0da31a0a5e9e11b0be050f Reviewed-on: http://review.coreboot.org/8126 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
2014-06-12 19:28:57 +02:00
((u8 *)dst)[i] = ((u8 *)src)[i];
return ret;
}
void *memcpy(void *dst, const void *src, size_t n)
__attribute__((weak, alias("default_memcpy")));
static void *default_memmove(void *dst, const void *src, size_t n)
{
size_t offs;
ssize_t i;
if (src > dst)
return memcpy(dst, src, n);
if (!IS_ALIGNED((uintptr_t)dst, sizeof(unsigned long)) ||
!IS_ALIGNED((uintptr_t)src, sizeof(unsigned long))) {
for (i = n - 1; i >= 0; i--)
((u8 *)dst)[i] = ((u8 *)src)[i];
return dst;
}
offs = n - (n % sizeof(unsigned long));
for (i = (n % sizeof(unsigned long)) - 1; i >= 0; i--)
libpayload: Reorder default memcpy, speed up memset and memcmp The current default memcpy first copies single bytes to align the amount, then copies the rest as full words. In practice, the start of a buffer is much more likely to be word-aligned then the end, and aligned word access are usually more efficient. This patch reorders those accesses to first copy as many full words as possible and then finish the rest with byte accesses to optimize this common case. This fixes a data abort when using USB on ARM without CONFIG_GPL. Due to some limitations of how DMA memory is set up in coreboot on ARM, it currently does not support unaligned accesses. (This could be fixed with a more complicated patch, but it's usually not an issue... unless, of course, your memcpy happens to be braindead). Also add word-aligned accesses to memset and memcmp while I'm at it, and make memcmp's return value standard's compliant. BUG=chrome-os-partner:24957 TEST=Manual Original-Change-Id: I2a7bcb35626a05a9a43fcfd99eb958b485d7622a Original-Signed-off-by: Julius Werner <jwerner@chromium.org> Original-Reviewed-on: https://chromium-review.googlesource.com/203547 Original-Reviewed-by: Stefan Reinauer <reinauer@chromium.org> Original-Reviewed-by: David Hendricks <dhendrix@chromium.org> (cherry picked from commit 05a64d2e107e1675cc3442e6dabe14a341e55673) Signed-off-by: Marc Jones <marc.jones@se-eng.com> Change-Id: I0030ca8a203c97587b0da31a0a5e9e11b0be050f Reviewed-on: http://review.coreboot.org/8126 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
2014-06-12 19:28:57 +02:00
((u8 *)dst)[i + offs] = ((u8 *)src)[i + offs];
for (i = n / sizeof(unsigned long) - 1; i >= 0; i--)
((unsigned long *)dst)[i] = ((unsigned long *)src)[i];
return dst;
}
void *memmove(void *dst, const void *src, size_t n)
__attribute__((weak, alias("default_memmove")));
/**
* Compare two memory areas.
*
* @param s1 Pointer to the first area to compare.
* @param s2 Pointer to the second area to compare.
libpayload: Reorder default memcpy, speed up memset and memcmp The current default memcpy first copies single bytes to align the amount, then copies the rest as full words. In practice, the start of a buffer is much more likely to be word-aligned then the end, and aligned word access are usually more efficient. This patch reorders those accesses to first copy as many full words as possible and then finish the rest with byte accesses to optimize this common case. This fixes a data abort when using USB on ARM without CONFIG_GPL. Due to some limitations of how DMA memory is set up in coreboot on ARM, it currently does not support unaligned accesses. (This could be fixed with a more complicated patch, but it's usually not an issue... unless, of course, your memcpy happens to be braindead). Also add word-aligned accesses to memset and memcmp while I'm at it, and make memcmp's return value standard's compliant. BUG=chrome-os-partner:24957 TEST=Manual Original-Change-Id: I2a7bcb35626a05a9a43fcfd99eb958b485d7622a Original-Signed-off-by: Julius Werner <jwerner@chromium.org> Original-Reviewed-on: https://chromium-review.googlesource.com/203547 Original-Reviewed-by: Stefan Reinauer <reinauer@chromium.org> Original-Reviewed-by: David Hendricks <dhendrix@chromium.org> (cherry picked from commit 05a64d2e107e1675cc3442e6dabe14a341e55673) Signed-off-by: Marc Jones <marc.jones@se-eng.com> Change-Id: I0030ca8a203c97587b0da31a0a5e9e11b0be050f Reviewed-on: http://review.coreboot.org/8126 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
2014-06-12 19:28:57 +02:00
* @param n Size of the first area in bytes (both must have the same length).
* @return If n is 0, return zero. Otherwise, return a value less than, equal
* to, or greater than zero if s1 is found less than, equal to, or
* greater than s2 respectively.
*/
libpayload: Reorder default memcpy, speed up memset and memcmp The current default memcpy first copies single bytes to align the amount, then copies the rest as full words. In practice, the start of a buffer is much more likely to be word-aligned then the end, and aligned word access are usually more efficient. This patch reorders those accesses to first copy as many full words as possible and then finish the rest with byte accesses to optimize this common case. This fixes a data abort when using USB on ARM without CONFIG_GPL. Due to some limitations of how DMA memory is set up in coreboot on ARM, it currently does not support unaligned accesses. (This could be fixed with a more complicated patch, but it's usually not an issue... unless, of course, your memcpy happens to be braindead). Also add word-aligned accesses to memset and memcmp while I'm at it, and make memcmp's return value standard's compliant. BUG=chrome-os-partner:24957 TEST=Manual Original-Change-Id: I2a7bcb35626a05a9a43fcfd99eb958b485d7622a Original-Signed-off-by: Julius Werner <jwerner@chromium.org> Original-Reviewed-on: https://chromium-review.googlesource.com/203547 Original-Reviewed-by: Stefan Reinauer <reinauer@chromium.org> Original-Reviewed-by: David Hendricks <dhendrix@chromium.org> (cherry picked from commit 05a64d2e107e1675cc3442e6dabe14a341e55673) Signed-off-by: Marc Jones <marc.jones@se-eng.com> Change-Id: I0030ca8a203c97587b0da31a0a5e9e11b0be050f Reviewed-on: http://review.coreboot.org/8126 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
2014-06-12 19:28:57 +02:00
static int default_memcmp(const void *s1, const void *s2, size_t n)
{
size_t i = 0;
const unsigned long *w1 = s1, *w2 = s2;
if (IS_ALIGNED((uintptr_t)s1, sizeof(unsigned long)) &&
IS_ALIGNED((uintptr_t)s2, sizeof(unsigned long)))
for (; i < n / sizeof(unsigned long); i++)
if (w1[i] != w2[i])
break; /* fall through to find differing byte */
libpayload: Reorder default memcpy, speed up memset and memcmp The current default memcpy first copies single bytes to align the amount, then copies the rest as full words. In practice, the start of a buffer is much more likely to be word-aligned then the end, and aligned word access are usually more efficient. This patch reorders those accesses to first copy as many full words as possible and then finish the rest with byte accesses to optimize this common case. This fixes a data abort when using USB on ARM without CONFIG_GPL. Due to some limitations of how DMA memory is set up in coreboot on ARM, it currently does not support unaligned accesses. (This could be fixed with a more complicated patch, but it's usually not an issue... unless, of course, your memcpy happens to be braindead). Also add word-aligned accesses to memset and memcmp while I'm at it, and make memcmp's return value standard's compliant. BUG=chrome-os-partner:24957 TEST=Manual Original-Change-Id: I2a7bcb35626a05a9a43fcfd99eb958b485d7622a Original-Signed-off-by: Julius Werner <jwerner@chromium.org> Original-Reviewed-on: https://chromium-review.googlesource.com/203547 Original-Reviewed-by: Stefan Reinauer <reinauer@chromium.org> Original-Reviewed-by: David Hendricks <dhendrix@chromium.org> (cherry picked from commit 05a64d2e107e1675cc3442e6dabe14a341e55673) Signed-off-by: Marc Jones <marc.jones@se-eng.com> Change-Id: I0030ca8a203c97587b0da31a0a5e9e11b0be050f Reviewed-on: http://review.coreboot.org/8126 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
2014-06-12 19:28:57 +02:00
for (i *= sizeof(unsigned long); i < n; i++)
if (((u8 *)s1)[i] != ((u8 *)s2)[i])
return ((u8 *)s1)[i] - ((u8 *)s2)[i];
return 0;
}
libpayload: Reorder default memcpy, speed up memset and memcmp The current default memcpy first copies single bytes to align the amount, then copies the rest as full words. In practice, the start of a buffer is much more likely to be word-aligned then the end, and aligned word access are usually more efficient. This patch reorders those accesses to first copy as many full words as possible and then finish the rest with byte accesses to optimize this common case. This fixes a data abort when using USB on ARM without CONFIG_GPL. Due to some limitations of how DMA memory is set up in coreboot on ARM, it currently does not support unaligned accesses. (This could be fixed with a more complicated patch, but it's usually not an issue... unless, of course, your memcpy happens to be braindead). Also add word-aligned accesses to memset and memcmp while I'm at it, and make memcmp's return value standard's compliant. BUG=chrome-os-partner:24957 TEST=Manual Original-Change-Id: I2a7bcb35626a05a9a43fcfd99eb958b485d7622a Original-Signed-off-by: Julius Werner <jwerner@chromium.org> Original-Reviewed-on: https://chromium-review.googlesource.com/203547 Original-Reviewed-by: Stefan Reinauer <reinauer@chromium.org> Original-Reviewed-by: David Hendricks <dhendrix@chromium.org> (cherry picked from commit 05a64d2e107e1675cc3442e6dabe14a341e55673) Signed-off-by: Marc Jones <marc.jones@se-eng.com> Change-Id: I0030ca8a203c97587b0da31a0a5e9e11b0be050f Reviewed-on: http://review.coreboot.org/8126 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
2014-06-12 19:28:57 +02:00
int memcmp(const void *s1, const void *s2, size_t n)
__attribute__((weak, alias("default_memcmp")));
void *memchr(const void *s, int c, size_t n)
{
unsigned char *p = (unsigned char *)s;
while (n--)
if (*p != (unsigned char)c)
p++;
else
return p;
return 0;
}