libpayload: xhci: Do not memcpy registers

memcpy() is meant to be used on normal memory and often implemented with
architecture-specific optimizations to make that as performant as
possible. MMIO registers often have special access restrictions that may
be incompatible with whatever memcpy() does. For example, on arm64 it
uses the LDP (load pair) to load 16 bytes at a time, which makes 4-byte
MMIO registers unhappy.

This patch removes the caching of the XHCI capreg registers and changes
it back to a pointer. The CAP_GET() macro is still accessing a full
(non-bitfield) uint32_t at the end so this should still generate a
4-byte access (which was the goal of the original change in CB:39838).

Signed-off-by: Julius Werner <jwerner@chromium.org>
Change-Id: Id058c8813087a8e8cb85f570399e07fb8a597108
Reviewed-on: https://review.coreboot.org/c/coreboot/+/40895
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Duncan Laurie <dlaurie@chromium.org>
This commit is contained in:
Julius Werner 2020-04-29 17:36:12 -07:00
parent 6f028e7993
commit 6c1a669b44
2 changed files with 7 additions and 7 deletions

View File

@ -185,15 +185,15 @@ xhci_init (unsigned long physical_bar)
goto _free_xhci;
}
memcpy(&xhci->capreg, phys_to_virt(physical_bar), sizeof(xhci->capreg));
xhci->capreg = phys_to_virt(physical_bar) + sizeof(xhci->capreg);
xhci->opreg = phys_to_virt(physical_bar) + CAP_GET(CAPLEN, xhci->capreg);
xhci->hcrreg = phys_to_virt(physical_bar) + xhci->capreg.rtsoff;
xhci->dbreg = phys_to_virt(physical_bar) + xhci->capreg.dboff;
xhci->hcrreg = phys_to_virt(physical_bar) + xhci->capreg->rtsoff;
xhci->dbreg = phys_to_virt(physical_bar) + xhci->capreg->dboff;
xhci_debug("regbase: 0x%"PRIx32"\n", physical_bar);
xhci_debug("caplen: 0x%"PRIx32"\n", CAP_GET(CAPLEN, xhci->capreg));
xhci_debug("rtsoff: 0x%"PRIx32"\n", xhci->capreg.rtsoff);
xhci_debug("dboff: 0x%"PRIx32"\n", xhci->capreg.dboff);
xhci_debug("rtsoff: 0x%"PRIx32"\n", xhci->capreg->rtsoff);
xhci_debug("dboff: 0x%"PRIx32"\n", xhci->capreg->dboff);
xhci_debug("hciversion: %"PRIx8".%"PRIx8"\n",
CAP_GET(CAPVER_HI, xhci->capreg), CAP_GET(CAPVER_LO, xhci->capreg));

View File

@ -364,7 +364,7 @@ typedef struct erst_entry {
#define CAP_CSZ_LEN 1
#define CAP_MASK(tok) MASK(CAP_##tok##_START, CAP_##tok##_LEN)
#define CAP_GET(tok, cap) (((cap).CAP_##tok##_FIELD & CAP_MASK(tok)) \
#define CAP_GET(tok, cap) (((cap)->CAP_##tok##_FIELD & CAP_MASK(tok)) \
>> CAP_##tok##_START)
#define CTXSIZE(xhci) (CAP_GET(CSZ, (xhci)->capreg) ? 64 : 32)
@ -378,7 +378,7 @@ typedef struct xhci {
u32 hccparams;
u32 dboff;
u32 rtsoff;
} __packed capreg;
} __packed *capreg;
/* opreg is R/W is most places, so volatile access is necessary.
volatile means that the compiler seeks byte writes if possible,