cbmem: use aligned_memcpy for reading lb_cbmem_entry information
The lbtable contains the memory entries that have fields unnaturally aligned in memory. Therefore, we need to perform an aligned_memcpy() to fix the issues with platforms that don't allow unaligned accesses. BUG=b:246887035 TEST=cbmem -l; cbmem -r ${CBMEM ID} Change-Id: Id94e3d65118083a081fc060a6938836f6176ab54 Signed-off-by: Yidi Lin <yidilin@chromium.org> Reviewed-on: https://review.coreboot.org/c/coreboot/+/67672 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Julius Werner <jwerner@chromium.org> Reviewed-by: Yu-Ping Wu <yupingso@google.com>
This commit is contained in:
parent
f0d5f67e46
commit
0811a6492d
|
@ -235,7 +235,7 @@ static int find_cbmem_entry(uint32_t id, uint64_t *addr, size_t *size)
|
||||||
|
|
||||||
while (offset < mapping_size(&lbtable_mapping)) {
|
while (offset < mapping_size(&lbtable_mapping)) {
|
||||||
const struct lb_record *lbr;
|
const struct lb_record *lbr;
|
||||||
const struct lb_cbmem_entry *lbe;
|
struct lb_cbmem_entry lbe;
|
||||||
|
|
||||||
lbr = (const void *)(table + offset);
|
lbr = (const void *)(table + offset);
|
||||||
offset += lbr->size;
|
offset += lbr->size;
|
||||||
|
@ -243,12 +243,12 @@ static int find_cbmem_entry(uint32_t id, uint64_t *addr, size_t *size)
|
||||||
if (lbr->tag != LB_TAG_CBMEM_ENTRY)
|
if (lbr->tag != LB_TAG_CBMEM_ENTRY)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
lbe = (const void *)lbr;
|
aligned_memcpy(&lbe, lbr, sizeof(lbe));
|
||||||
if (lbe->id != id)
|
if (lbe.id != id)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
*addr = lbe->address;
|
*addr = lbe.address;
|
||||||
*size = lbe->entry_size;
|
*size = lbe.entry_size;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1128,7 +1128,7 @@ static void dump_cbmem_raw(unsigned int id)
|
||||||
|
|
||||||
while (offset < mapping_size(&lbtable_mapping)) {
|
while (offset < mapping_size(&lbtable_mapping)) {
|
||||||
const struct lb_record *lbr;
|
const struct lb_record *lbr;
|
||||||
const struct lb_cbmem_entry *lbe;
|
struct lb_cbmem_entry lbe;
|
||||||
|
|
||||||
lbr = (const void *)(table + offset);
|
lbr = (const void *)(table + offset);
|
||||||
offset += lbr->size;
|
offset += lbr->size;
|
||||||
|
@ -1136,11 +1136,11 @@ static void dump_cbmem_raw(unsigned int id)
|
||||||
if (lbr->tag != LB_TAG_CBMEM_ENTRY)
|
if (lbr->tag != LB_TAG_CBMEM_ENTRY)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
lbe = (const void *)lbr;
|
aligned_memcpy(&lbe, lbr, sizeof(lbe));
|
||||||
if (lbe->id == id) {
|
if (lbe.id == id) {
|
||||||
debug("found id for raw dump %0x", lbe->id);
|
debug("found id for raw dump %0x", lbe.id);
|
||||||
base = lbe->address;
|
base = lbe.address;
|
||||||
size = lbe->entry_size;
|
size = lbe.entry_size;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1211,7 +1211,7 @@ static void dump_cbmem_toc(void)
|
||||||
|
|
||||||
while (offset < mapping_size(&lbtable_mapping)) {
|
while (offset < mapping_size(&lbtable_mapping)) {
|
||||||
const struct lb_record *lbr;
|
const struct lb_record *lbr;
|
||||||
const struct lb_cbmem_entry *lbe;
|
struct lb_cbmem_entry lbe;
|
||||||
|
|
||||||
lbr = (const void *)(table + offset);
|
lbr = (const void *)(table + offset);
|
||||||
offset += lbr->size;
|
offset += lbr->size;
|
||||||
|
@ -1219,8 +1219,8 @@ static void dump_cbmem_toc(void)
|
||||||
if (lbr->tag != LB_TAG_CBMEM_ENTRY)
|
if (lbr->tag != LB_TAG_CBMEM_ENTRY)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
lbe = (const void *)lbr;
|
aligned_memcpy(&lbe, lbr, sizeof(lbe));
|
||||||
cbmem_print_entry(i, lbe->id, lbe->address, lbe->entry_size);
|
cbmem_print_entry(i, lbe.id, lbe.address, lbe.entry_size);
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue