lp/drivers/usb: Work around QEMU XHCI register issue

The QEMU XHCI controller does not support byte/word reads from the
capability register and it expects dword reads only.

In order to make this work move the access of the capability
register fields to use macros instead of a packed struct bitfield.

This issue was filed upstream:
https://bugs.launchpad.net/qemu/+bug/1693050

The original fix attempt in 2012 was not effective:
6ee021d410

With this change the controller is detected properly by the libpayload
USB drivers.

Change-Id: I048ed14921a4c9c0620c10b315b42476b6e5c512
Signed-off-by: Duncan Laurie <dlaurie@google.com>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/39838
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Nico Huber <nico.h@gmx.de>
Reviewed-by: Angel Pons <th3fanbus@gmail.com>
This commit is contained in:
Duncan Laurie 2020-03-17 19:32:14 -07:00 committed by Patrick Georgi
parent bc885c194c
commit 287cf6c7d1
3 changed files with 81 additions and 78 deletions

View File

@ -185,26 +185,27 @@ xhci_init (unsigned long physical_bar)
goto _free_xhci; goto _free_xhci;
} }
xhci->capreg = phys_to_virt(physical_bar); memcpy(&xhci->capreg, phys_to_virt(physical_bar), sizeof(xhci->capreg));
xhci->opreg = ((void *)xhci->capreg) + xhci->capreg->caplength; xhci->opreg = phys_to_virt(physical_bar) + CAP_GET(CAPLEN, xhci->capreg);
xhci->hcrreg = ((void *)xhci->capreg) + xhci->capreg->rtsoff; xhci->hcrreg = phys_to_virt(physical_bar) + xhci->capreg.rtsoff;
xhci->dbreg = ((void *)xhci->capreg) + xhci->capreg->dboff; xhci->dbreg = phys_to_virt(physical_bar) + xhci->capreg.dboff;
xhci_debug("regbase: 0x%"PRIx32"\n", physical_bar); xhci_debug("regbase: 0x%"PRIx32"\n", physical_bar);
xhci_debug("caplen: 0x%"PRIx32"\n", xhci->capreg->caplength); xhci_debug("caplen: 0x%"PRIx32"\n", CAP_GET(CAPLEN, xhci->capreg));
xhci_debug("rtsoff: 0x%"PRIx32"\n", xhci->capreg->rtsoff); xhci_debug("rtsoff: 0x%"PRIx32"\n", xhci->capreg.rtsoff);
xhci_debug("dboff: 0x%"PRIx32"\n", xhci->capreg->dboff); xhci_debug("dboff: 0x%"PRIx32"\n", xhci->capreg.dboff);
xhci_debug("hciversion: %"PRIx8".%"PRIx8"\n", xhci_debug("hciversion: %"PRIx8".%"PRIx8"\n",
xhci->capreg->hciver_hi, xhci->capreg->hciver_lo); CAP_GET(CAPVER_HI, xhci->capreg), CAP_GET(CAPVER_LO, xhci->capreg));
if ((xhci->capreg->hciversion < 0x96) || if ((CAP_GET(CAPVER, xhci->capreg) < 0x96) ||
(xhci->capreg->hciversion > 0x110)) { (CAP_GET(CAPVER, xhci->capreg) > 0x110)) {
xhci_debug("Unsupported xHCI version\n"); xhci_debug("Unsupported xHCI version\n");
goto _free_xhci; goto _free_xhci;
} }
xhci_debug("context size: %dB\n", CTXSIZE(xhci)); xhci_debug("context size: %dB\n", CTXSIZE(xhci));
xhci_debug("maxslots: 0x%02lx\n", xhci->capreg->MaxSlots); xhci_debug("maxslots: 0x%02lx\n", CAP_GET(MAXSLOTS, xhci->capreg));
xhci_debug("maxports: 0x%02lx\n", xhci->capreg->MaxPorts); xhci_debug("maxports: 0x%02lx\n", CAP_GET(MAXPORTS, xhci->capreg));
const unsigned pagesize = xhci->opreg->pagesize << 12; const unsigned pagesize = xhci->opreg->pagesize << 12;
xhci_debug("pagesize: 0x%04x\n", pagesize); xhci_debug("pagesize: 0x%04x\n", pagesize);
@ -213,7 +214,8 @@ xhci_init (unsigned long physical_bar)
* structures at first and can still chicken out easily if we run out * structures at first and can still chicken out easily if we run out
* of memory. * of memory.
*/ */
xhci->max_slots_en = xhci->capreg->MaxSlots & CONFIG_LP_MASK_MaxSlotsEn; xhci->max_slots_en = CAP_GET(MAXSLOTS, xhci->capreg) &
CONFIG_LP_MASK_MaxSlotsEn;
xhci->dcbaa = xhci_align(64, (xhci->max_slots_en + 1) * sizeof(u64)); xhci->dcbaa = xhci_align(64, (xhci->max_slots_en + 1) * sizeof(u64));
xhci->dev = malloc((xhci->max_slots_en + 1) * sizeof(*xhci->dev)); xhci->dev = malloc((xhci->max_slots_en + 1) * sizeof(*xhci->dev));
if (!xhci->dcbaa || !xhci->dev) { if (!xhci->dcbaa || !xhci->dev) {
@ -227,8 +229,9 @@ xhci_init (unsigned long physical_bar)
* Let dcbaa[0] point to another array of pointers, sp_ptrs. * Let dcbaa[0] point to another array of pointers, sp_ptrs.
* The pointers therein point to scratchpad buffers (pages). * The pointers therein point to scratchpad buffers (pages).
*/ */
const size_t max_sp_bufs = xhci->capreg->Max_Scratchpad_Bufs_Hi << 5 | const size_t max_sp_bufs =
xhci->capreg->Max_Scratchpad_Bufs_Lo; CAP_GET(MAX_SCRATCH_BUFS_HI, xhci->capreg) << 5 |
CAP_GET(MAX_SCRATCH_BUFS_LO, xhci->capreg);
xhci_debug("max scratchpad bufs: 0x%zx\n", max_sp_bufs); xhci_debug("max scratchpad bufs: 0x%zx\n", max_sp_bufs);
if (max_sp_bufs) { if (max_sp_bufs) {
const size_t sp_ptrs_size = max_sp_bufs * sizeof(u64); const size_t sp_ptrs_size = max_sp_bufs * sizeof(u64);
@ -376,7 +379,8 @@ xhci_reinit (hci_t *controller)
xhci_debug("event ring @%p (0x%08x)\n", xhci_debug("event ring @%p (0x%08x)\n",
xhci->er.ring, virt_to_phys(xhci->er.ring)); xhci->er.ring, virt_to_phys(xhci->er.ring));
xhci_debug("ERST Max: 0x%lx -> 0x%lx entries\n", xhci_debug("ERST Max: 0x%lx -> 0x%lx entries\n",
xhci->capreg->ERST_Max, 1 << xhci->capreg->ERST_Max); CAP_GET(ERST_MAX, xhci->capreg),
1 << CAP_GET(ERST_MAX, xhci->capreg));
memset((void*)xhci->ev_ring_table, 0x00, sizeof(erst_entry_t)); memset((void*)xhci->ev_ring_table, 0x00, sizeof(erst_entry_t));
xhci->ev_ring_table[0].seg_base_lo = virt_to_phys(xhci->er.ring); xhci->ev_ring_table[0].seg_base_lo = virt_to_phys(xhci->er.ring);
xhci->ev_ring_table[0].seg_base_hi = 0; xhci->ev_ring_table[0].seg_base_hi = 0;
@ -432,8 +436,9 @@ xhci_shutdown(hci_t *const controller)
#endif #endif
if (xhci->sp_ptrs) { if (xhci->sp_ptrs) {
size_t max_sp_bufs = xhci->capreg->Max_Scratchpad_Bufs_Hi << 5 | const size_t max_sp_bufs =
xhci->capreg->Max_Scratchpad_Bufs_Lo; CAP_GET(MAX_SCRATCH_BUFS_HI, xhci->capreg) << 5 |
CAP_GET(MAX_SCRATCH_BUFS_LO, xhci->capreg);
for (i = 0; i < max_sp_bufs; ++i) { for (i = 0; i < max_sp_bufs; ++i) {
if (xhci->sp_ptrs[i]) if (xhci->sp_ptrs[i])
free(phys_to_virt(xhci->sp_ptrs[i])); free(phys_to_virt(xhci->sp_ptrs[i]));

View File

@ -274,7 +274,6 @@ typedef volatile struct epctx {
} epctx_t; } epctx_t;
#define NUM_EPS 32 #define NUM_EPS 32
#define CTXSIZE(xhci) ((xhci)->capreg->csz ? 64 : 32)
typedef union devctx { typedef union devctx {
/* set of pointers, so we can dynamically adjust Slot/EP context size */ /* set of pointers, so we can dynamically adjust Slot/EP context size */
@ -321,66 +320,65 @@ typedef struct erst_entry {
u32 rsvd; u32 rsvd;
} erst_entry_t; } erst_entry_t;
#define CAP_CAPLEN_FIELD hciparams
#define CAP_CAPLEN_START 0
#define CAP_CAPLEN_LEN 8
#define CAP_CAPVER_FIELD hciparams
#define CAP_CAPVER_START 16
#define CAP_CAPVER_LEN 16
#define CAP_CAPVER_HI_FIELD hciparams
#define CAP_CAPVER_HI_START 24
#define CAP_CAPVER_HI_LEN 8
#define CAP_CAPVER_LO_FIELD hciparams
#define CAP_CAPVER_LO_START 16
#define CAP_CAPVER_LO_LEN 8
#define CAP_MAXSLOTS_FIELD hcsparams1
#define CAP_MAXSLOTS_START 0
#define CAP_MAXSLOTS_LEN 7
#define CAP_MAXINTRS_FIELD hcsparams1
#define CAP_MAXINTRS_START 7
#define CAP_MAXINTRS_LEN 11
#define CAP_MAXPORTS_FIELD hcsparams1
#define CAP_MAXPORTS_START 24
#define CAP_MAXPORTS_LEN 8
#define CAP_IST_FIELD hcsparams2
#define CAP_IST_START 0
#define CAP_IST_LEN 4
#define CAP_ERST_MAX_FIELD hcsparams2
#define CAP_ERST_MAX_START 4
#define CAP_ERST_MAX_LEN 4
#define CAP_MAX_SCRATCH_BUFS_HI_FIELD hcsparams2
#define CAP_MAX_SCRATCH_BUFS_HI_START 21
#define CAP_MAX_SCRATCH_BUFS_HI_LEN 5
#define CAP_MAX_SCRATCH_BUFS_LO_FIELD hcsparams2
#define CAP_MAX_SCRATCH_BUFS_LO_START 27
#define CAP_MAX_SCRATCH_BUFS_LO_LEN 5
#define CAP_U1_LATENCY_FIELD hcsparams3
#define CAP_U1_LATENCY_START 0
#define CAP_U1_LATENCY_LEN 8
#define CAP_U2_LATENCY_FIELD hcsparams3
#define CAP_U2_LATENCY_START 16
#define CAP_U2_LATENCY_LEN 16
#define CAP_CSZ_FIELD hccparams
#define CAP_CSZ_START 2
#define CAP_CSZ_LEN 1
#define CAP_MASK(tok) MASK(CAP_##tok##_START, CAP_##tok##_LEN)
#define CAP_GET(tok, cap) (((cap).CAP_##tok##_FIELD & CAP_MASK(tok)) \
>> CAP_##tok##_START)
#define CTXSIZE(xhci) (CAP_GET(CSZ, (xhci)->capreg) ? 64 : 32)
typedef struct xhci { typedef struct xhci {
/* capreg is read-only, so no need for volatile,
and thus 32bit accesses can be assumed. */
struct capreg { struct capreg {
u8 caplength; /* 0x00 */ u32 hciparams;
u8 res1; /* 0x01 */ u32 hcsparams1;
union { /* 0x02 */ u32 hcsparams2;
u16 hciversion; u32 hcsparams3;
struct { u32 hccparams;
u8 hciver_lo; u32 dboff;
u8 hciver_hi; u32 rtsoff;
} __packed; } __packed capreg;
} __packed;
union { /* 0x04 */
u32 hcsparams1;
struct {
unsigned long MaxSlots:7;
unsigned long MaxIntrs:11;
unsigned long:6;
unsigned long MaxPorts:8;
} __packed;
} __packed;
union { /* 0x08 */
u32 hcsparams2;
struct {
unsigned long IST:4;
unsigned long ERST_Max:4;
unsigned long:13;
unsigned long Max_Scratchpad_Bufs_Hi:5;
unsigned long SPR:1;
unsigned long Max_Scratchpad_Bufs_Lo:5;
} __packed;
} __packed;
union { /* 0x0C */
u32 hcsparams3;
struct {
unsigned long u1latency:8;
unsigned long:8;
unsigned long u2latency:16;
} __packed;
} __packed;
union { /* 0x10 */
u32 hccparams;
struct {
unsigned long ac64:1;
unsigned long bnc:1;
unsigned long csz:1;
unsigned long ppc:1;
unsigned long pind:1;
unsigned long lhrc:1;
unsigned long ltc:1;
unsigned long nss:1;
unsigned long:4;
unsigned long MaxPSASize:4;
unsigned long xECP:16;
} __packed;
} __packed;
u32 dboff; /* 0x14 */
u32 rtsoff; /* 0x18 */
} __packed *capreg;
/* opreg is R/W is most places, so volatile access is necessary. /* opreg is R/W is most places, so volatile access is necessary.
volatile means that the compiler seeks byte writes if possible, volatile means that the compiler seeks byte writes if possible,

View File

@ -160,7 +160,7 @@ xhci_rh_init (usbdev_t *dev)
dev->port = -1; dev->port = -1;
const int num_ports = /* TODO: maybe we need to read extended caps */ const int num_ports = /* TODO: maybe we need to read extended caps */
(XHCI_INST(dev->controller)->capreg->hcsparams1 >> 24) & 0xff; CAP_GET(MAXPORTS, XHCI_INST(dev->controller)->capreg);
generic_hub_init(dev, num_ports, &xhci_rh_ops); generic_hub_init(dev, num_ports, &xhci_rh_ops);
usb_debug("xHCI: root hub init done\n"); usb_debug("xHCI: root hub init done\n");