- Fix the definition of the linuxbios table so all of the compilers
will generate the struct lb_memory_range the same. - Add a few pci_ids. - Small readabiltiy clean ups to debug_dev git-svn-id: svn://svn.coreboot.org/coreboot/trunk@1818 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
This commit is contained in:
parent
a7dd869706
commit
ec01aa98d0
|
@ -7,6 +7,22 @@
|
||||||
#include <device/device.h>
|
#include <device/device.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
static inline uint64_t unpack_lb64(struct lb_uint64 value)
|
||||||
|
{
|
||||||
|
uint64_t result;
|
||||||
|
result = value.hi;
|
||||||
|
result = (result << 32) + value.lo;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct lb_uint64 pack_lb64(uint64_t value)
|
||||||
|
{
|
||||||
|
struct lb_uint64 result;
|
||||||
|
result.lo = (value >> 0) & 0xffffffff;
|
||||||
|
result.hi = (value >> 32) & 0xffffffff;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
struct lb_header *lb_table_init(unsigned long addr)
|
struct lb_header *lb_table_init(unsigned long addr)
|
||||||
{
|
{
|
||||||
struct lb_header *header;
|
struct lb_header *header;
|
||||||
|
@ -133,8 +149,8 @@ void lb_memory_range(struct lb_memory *mem,
|
||||||
{
|
{
|
||||||
int entries;
|
int entries;
|
||||||
entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
|
entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
|
||||||
mem->map[entries].start = start;
|
mem->map[entries].start = pack_lb64(start);
|
||||||
mem->map[entries].size = size;
|
mem->map[entries].size = pack_lb64(size);
|
||||||
mem->map[entries].type = type;
|
mem->map[entries].type = type;
|
||||||
mem->size += sizeof(mem->map[0]);
|
mem->size += sizeof(mem->map[0]);
|
||||||
}
|
}
|
||||||
|
@ -157,16 +173,16 @@ static void lb_reserve_table_memory(struct lb_header *head)
|
||||||
* setup so that is all we need to do.
|
* setup so that is all we need to do.
|
||||||
*/
|
*/
|
||||||
for(i = 0; i < entries; i++ ) {
|
for(i = 0; i < entries; i++ ) {
|
||||||
uint64_t map_start = mem->map[i].start;
|
uint64_t map_start = unpack_lb64(mem->map[i].start);
|
||||||
uint64_t map_end = map_start + mem->map[i].size;
|
uint64_t map_end = map_start + unpack_lb64(mem->map[i].size);
|
||||||
/* Does this area need to be expanded? */
|
/* Does this area need to be expanded? */
|
||||||
if (map_end == start) {
|
if (map_end == start) {
|
||||||
mem->map[i].size = end - map_start;
|
mem->map[i].size = pack_lb64(end - map_start);
|
||||||
}
|
}
|
||||||
/* Does this area need to be contracted? */
|
/* Does this area need to be contracted? */
|
||||||
else if (map_start == start) {
|
else if (map_start == start) {
|
||||||
mem->map[i].start = end;
|
mem->map[i].start = pack_lb64(end);
|
||||||
mem->map[i].size = map_end - end;
|
mem->map[i].size = pack_lb64(map_end - end);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -196,8 +212,10 @@ static void lb_cleanup_memory_ranges(struct lb_memory *mem)
|
||||||
|
|
||||||
/* Sort the lb memory ranges */
|
/* Sort the lb memory ranges */
|
||||||
for(i = 0; i < entries; i++) {
|
for(i = 0; i < entries; i++) {
|
||||||
|
uint64_t entry_start = unpack_lb64(mem->map[i].start);
|
||||||
for(j = i; j < entries; j++) {
|
for(j = i; j < entries; j++) {
|
||||||
if (mem->map[j].start < mem->map[i].start) {
|
uint64_t temp_start = unpack_lb64(mem->map[j].start);
|
||||||
|
if (temp_start < entry_start) {
|
||||||
struct lb_memory_range tmp;
|
struct lb_memory_range tmp;
|
||||||
tmp = mem->map[i];
|
tmp = mem->map[i];
|
||||||
mem->map[i] = mem->map[j];
|
mem->map[i] = mem->map[j];
|
||||||
|
@ -212,10 +230,10 @@ static void lb_cleanup_memory_ranges(struct lb_memory *mem)
|
||||||
if (mem->map[i].type != mem->map[i + 1].type) {
|
if (mem->map[i].type != mem->map[i + 1].type) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
start = mem->map[i].start;
|
start = unpack_lb64(mem->map[i].start);
|
||||||
end = start + mem->map[i].size;
|
end = start + unpack_lb64(mem->map[i].size);
|
||||||
nstart = mem->map[i + 1].start;
|
nstart = unpack_lb64(mem->map[i + 1].start);
|
||||||
nend = nstart + mem->map[i + 1].size;
|
nend = nstart + unpack_lb64(mem->map[i + 1].size);
|
||||||
if ((start <= nstart) && (end > nstart)) {
|
if ((start <= nstart) && (end > nstart)) {
|
||||||
if (start > nstart) {
|
if (start > nstart) {
|
||||||
start = nstart;
|
start = nstart;
|
||||||
|
@ -224,8 +242,8 @@ static void lb_cleanup_memory_ranges(struct lb_memory *mem)
|
||||||
end = nend;
|
end = nend;
|
||||||
}
|
}
|
||||||
/* Record the new region size */
|
/* Record the new region size */
|
||||||
mem->map[i].start = start;
|
mem->map[i].start = pack_lb64(start);
|
||||||
mem->map[i].size = end - start;
|
mem->map[i].size = pack_lb64(end - start);
|
||||||
|
|
||||||
/* Delete the entry I have merged with */
|
/* Delete the entry I have merged with */
|
||||||
memmove(&mem->map[i + 1], &mem->map[i + 2],
|
memmove(&mem->map[i + 1], &mem->map[i + 2],
|
||||||
|
@ -250,8 +268,8 @@ static void lb_remove_memory_range(struct lb_memory *mem,
|
||||||
|
|
||||||
/* Remove a reserved area from the memory map */
|
/* Remove a reserved area from the memory map */
|
||||||
for(i = 0; i < entries; i++) {
|
for(i = 0; i < entries; i++) {
|
||||||
uint64_t map_start = mem->map[i].start;
|
uint64_t map_start = unpack_lb64(mem->map[i].start);
|
||||||
uint64_t map_end = map_start + mem->map[i].size;
|
uint64_t map_end = map_start + unpack_lb64(mem->map[i].size);
|
||||||
if ((start <= map_start) && (end >= map_end)) {
|
if ((start <= map_start) && (end >= map_end)) {
|
||||||
/* Remove the completely covered range */
|
/* Remove the completely covered range */
|
||||||
memmove(&mem->map[i], &mem->map[i + 1],
|
memmove(&mem->map[i], &mem->map[i + 1],
|
||||||
|
@ -268,21 +286,21 @@ static void lb_remove_memory_range(struct lb_memory *mem,
|
||||||
mem->size += sizeof(mem->map[0]);
|
mem->size += sizeof(mem->map[0]);
|
||||||
entries += 1;
|
entries += 1;
|
||||||
/* Update the first map entry */
|
/* Update the first map entry */
|
||||||
mem->map[i].size = start - map_start;
|
mem->map[i].size = pack_lb64(start - map_start);
|
||||||
/* Update the second map entry */
|
/* Update the second map entry */
|
||||||
mem->map[i + 1].start = end;
|
mem->map[i + 1].start = pack_lb64(end);
|
||||||
mem->map[i + 1].size = map_end - end;
|
mem->map[i + 1].size = pack_lb64(map_end - end);
|
||||||
/* Don't bother with this map entry again */
|
/* Don't bother with this map entry again */
|
||||||
i += 1;
|
i += 1;
|
||||||
}
|
}
|
||||||
else if ((start <= map_start) && (end > map_start)) {
|
else if ((start <= map_start) && (end > map_start)) {
|
||||||
/* Shrink the start of the memory range */
|
/* Shrink the start of the memory range */
|
||||||
mem->map[i].start = end;
|
mem->map[i].start = pack_lb64(end);
|
||||||
mem->map[i].size = map_end - end;
|
mem->map[i].size = pack_lb64(map_end - end);
|
||||||
}
|
}
|
||||||
else if ((start < map_end) && (start > map_start)) {
|
else if ((start < map_end) && (start > map_start)) {
|
||||||
/* Shrink the end of the memory range */
|
/* Shrink the end of the memory range */
|
||||||
mem->map[i].size = start - map_start;
|
mem->map[i].size = pack_lb64(start - map_start);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,6 @@
|
||||||
#option CONFIG_PCIBIOS_IRQ=0
|
#option CONFIG_PCIBIOS_IRQ=0
|
||||||
object c_start.S
|
object c_start.S
|
||||||
object cpu.c
|
object cpu.c
|
||||||
#object pci_ops.c
|
|
||||||
object pci_ops_conf1.c
|
object pci_ops_conf1.c
|
||||||
object pci_ops_conf2.c
|
object pci_ops_conf2.c
|
||||||
object pci_ops_auto.c
|
object pci_ops_auto.c
|
||||||
|
|
|
@ -7,6 +7,22 @@
|
||||||
#include <device/device.h>
|
#include <device/device.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
static inline uint64_t unpack_lb64(struct lb_uint64 value)
|
||||||
|
{
|
||||||
|
uint64_t result;
|
||||||
|
result = value.hi;
|
||||||
|
result = (result << 32) + value.lo;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct lb_uint64 pack_lb64(uint64_t value)
|
||||||
|
{
|
||||||
|
struct lb_uint64 result;
|
||||||
|
result.lo = (value >> 0) & 0xffffffff;
|
||||||
|
result.hi = (value >> 32) & 0xffffffff;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
struct lb_header *lb_table_init(unsigned long addr)
|
struct lb_header *lb_table_init(unsigned long addr)
|
||||||
{
|
{
|
||||||
struct lb_header *header;
|
struct lb_header *header;
|
||||||
|
@ -129,12 +145,12 @@ void lb_strings(struct lb_header *header)
|
||||||
}
|
}
|
||||||
|
|
||||||
void lb_memory_range(struct lb_memory *mem,
|
void lb_memory_range(struct lb_memory *mem,
|
||||||
uint32_t type, unsigned long start, unsigned long size)
|
uint32_t type, uint64_t start, uint64_t size)
|
||||||
{
|
{
|
||||||
int entries;
|
int entries;
|
||||||
entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
|
entries = (mem->size - sizeof(*mem))/sizeof(mem->map[0]);
|
||||||
mem->map[entries].start = start;
|
mem->map[entries].start = pack_lb64(start);
|
||||||
mem->map[entries].size = size;
|
mem->map[entries].size = pack_lb64(size);
|
||||||
mem->map[entries].type = type;
|
mem->map[entries].type = type;
|
||||||
mem->size += sizeof(mem->map[0]);
|
mem->size += sizeof(mem->map[0]);
|
||||||
}
|
}
|
||||||
|
@ -157,16 +173,16 @@ static void lb_reserve_table_memory(struct lb_header *head)
|
||||||
* setup so that is all we need to do.
|
* setup so that is all we need to do.
|
||||||
*/
|
*/
|
||||||
for(i = 0; i < entries; i++ ) {
|
for(i = 0; i < entries; i++ ) {
|
||||||
uint64_t map_start = mem->map[i].start;
|
uint64_t map_start = unpack_lb64(mem->map[i].start);
|
||||||
uint64_t map_end = map_start + mem->map[i].size;
|
uint64_t map_end = map_start + unpack_lb64(mem->map[i].size);
|
||||||
/* Does this area need to be expanded? */
|
/* Does this area need to be expanded? */
|
||||||
if (map_end == start) {
|
if (map_end == start) {
|
||||||
mem->map[i].size = end - map_start;
|
mem->map[i].size = pack_lb64(end - map_start);
|
||||||
}
|
}
|
||||||
/* Does this area need to be contracted? */
|
/* Does this area need to be contracted? */
|
||||||
else if (map_start == start) {
|
else if (map_start == start) {
|
||||||
mem->map[i].start = end;
|
mem->map[i].start = pack_lb64(end);
|
||||||
mem->map[i].size = map_end - end;
|
mem->map[i].size = pack_lb64(map_end - end);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -196,8 +212,10 @@ static void lb_cleanup_memory_ranges(struct lb_memory *mem)
|
||||||
|
|
||||||
/* Sort the lb memory ranges */
|
/* Sort the lb memory ranges */
|
||||||
for(i = 0; i < entries; i++) {
|
for(i = 0; i < entries; i++) {
|
||||||
|
uint64_t entry_start = unpack_lb64(mem->map[i].start);
|
||||||
for(j = i; j < entries; j++) {
|
for(j = i; j < entries; j++) {
|
||||||
if (mem->map[j].start < mem->map[i].start) {
|
uint64_t temp_start = unpack_lb64(mem->map[j].start);
|
||||||
|
if (temp_start < entry_start) {
|
||||||
struct lb_memory_range tmp;
|
struct lb_memory_range tmp;
|
||||||
tmp = mem->map[i];
|
tmp = mem->map[i];
|
||||||
mem->map[i] = mem->map[j];
|
mem->map[i] = mem->map[j];
|
||||||
|
@ -212,10 +230,10 @@ static void lb_cleanup_memory_ranges(struct lb_memory *mem)
|
||||||
if (mem->map[i].type != mem->map[i + 1].type) {
|
if (mem->map[i].type != mem->map[i + 1].type) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
start = mem->map[i].start;
|
start = unpack_lb64(mem->map[i].start);
|
||||||
end = start + mem->map[i].size;
|
end = start + unpack_lb64(mem->map[i].size);
|
||||||
nstart = mem->map[i + 1].start;
|
nstart = unpack_lb64(mem->map[i + 1].start);
|
||||||
nend = nstart + mem->map[i + 1].size;
|
nend = nstart + unpack_lb64(mem->map[i + 1].size);
|
||||||
if ((start <= nstart) && (end > nstart)) {
|
if ((start <= nstart) && (end > nstart)) {
|
||||||
if (start > nstart) {
|
if (start > nstart) {
|
||||||
start = nstart;
|
start = nstart;
|
||||||
|
@ -224,8 +242,8 @@ static void lb_cleanup_memory_ranges(struct lb_memory *mem)
|
||||||
end = nend;
|
end = nend;
|
||||||
}
|
}
|
||||||
/* Record the new region size */
|
/* Record the new region size */
|
||||||
mem->map[i].start = start;
|
mem->map[i].start = pack_lb64(start);
|
||||||
mem->map[i].size = end - start;
|
mem->map[i].size = pack_lb64(end - start);
|
||||||
|
|
||||||
/* Delete the entry I have merged with */
|
/* Delete the entry I have merged with */
|
||||||
memmove(&mem->map[i + 1], &mem->map[i + 2],
|
memmove(&mem->map[i + 1], &mem->map[i + 2],
|
||||||
|
@ -250,8 +268,8 @@ static void lb_remove_memory_range(struct lb_memory *mem,
|
||||||
|
|
||||||
/* Remove a reserved area from the memory map */
|
/* Remove a reserved area from the memory map */
|
||||||
for(i = 0; i < entries; i++) {
|
for(i = 0; i < entries; i++) {
|
||||||
uint64_t map_start = mem->map[i].start;
|
uint64_t map_start = unpack_lb64(mem->map[i].start);
|
||||||
uint64_t map_end = map_start + mem->map[i].size;
|
uint64_t map_end = map_start + unpack_lb64(mem->map[i].size);
|
||||||
if ((start <= map_start) && (end >= map_end)) {
|
if ((start <= map_start) && (end >= map_end)) {
|
||||||
/* Remove the completely covered range */
|
/* Remove the completely covered range */
|
||||||
memmove(&mem->map[i], &mem->map[i + 1],
|
memmove(&mem->map[i], &mem->map[i + 1],
|
||||||
|
@ -268,21 +286,21 @@ static void lb_remove_memory_range(struct lb_memory *mem,
|
||||||
mem->size += sizeof(mem->map[0]);
|
mem->size += sizeof(mem->map[0]);
|
||||||
entries += 1;
|
entries += 1;
|
||||||
/* Update the first map entry */
|
/* Update the first map entry */
|
||||||
mem->map[i].size = start - map_start;
|
mem->map[i].size = pack_lb64(start - map_start);
|
||||||
/* Update the second map entry */
|
/* Update the second map entry */
|
||||||
mem->map[i + 1].start = end;
|
mem->map[i + 1].start = pack_lb64(end);
|
||||||
mem->map[i + 1].size = map_end - end;
|
mem->map[i + 1].size = pack_lb64(map_end - end);
|
||||||
/* Don't bother with this map entry again */
|
/* Don't bother with this map entry again */
|
||||||
i += 1;
|
i += 1;
|
||||||
}
|
}
|
||||||
else if ((start <= map_start) && (end > map_start)) {
|
else if ((start <= map_start) && (end > map_start)) {
|
||||||
/* Shrink the start of the memory range */
|
/* Shrink the start of the memory range */
|
||||||
mem->map[i].start = end;
|
mem->map[i].start = pack_lb64(end);
|
||||||
mem->map[i].size = map_end - end;
|
mem->map[i].size = pack_lb64(map_end - end);
|
||||||
}
|
}
|
||||||
else if ((start < map_end) && (start > map_start)) {
|
else if ((start < map_end) && (start > map_start)) {
|
||||||
/* Shrink the end of the memory range */
|
/* Shrink the end of the memory range */
|
||||||
mem->map[i].size = start - map_start;
|
mem->map[i].size = pack_lb64(start - map_start);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -306,27 +324,24 @@ struct lb_memory *get_lb_mem(void)
|
||||||
return mem_ranges;
|
return mem_ranges;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void build_lb_mem_range(void *gp, struct device *dev, struct resource *res)
|
||||||
|
{
|
||||||
|
struct lb_memory *mem = gp;
|
||||||
|
lb_memory_range(mem, LB_MEM_RAM, res->base, res->size);
|
||||||
|
}
|
||||||
|
|
||||||
static struct lb_memory *build_lb_mem(struct lb_header *head)
|
static struct lb_memory *build_lb_mem(struct lb_header *head)
|
||||||
{
|
{
|
||||||
struct lb_memory *mem;
|
struct lb_memory *mem;
|
||||||
struct device *dev;
|
|
||||||
|
|
||||||
/* Record where the lb memory ranges will live */
|
/* Record where the lb memory ranges will live */
|
||||||
mem = lb_memory(head);
|
mem = lb_memory(head);
|
||||||
mem_ranges = mem;
|
mem_ranges = mem;
|
||||||
|
|
||||||
/* Build the raw table of memory */
|
/* Build the raw table of memory */
|
||||||
for(dev = all_devices; dev; dev = dev->next) {
|
search_global_resources(
|
||||||
struct resource *res, *last;
|
IORESOURCE_MEM | IORESOURCE_CACHEABLE, IORESOURCE_MEM | IORESOURCE_CACHEABLE,
|
||||||
last = &dev->resource[dev->resources];
|
build_lb_mem_range, mem);
|
||||||
for(res = &dev->resource[0]; res < last; res++) {
|
|
||||||
if (!(res->flags & IORESOURCE_MEM) ||
|
|
||||||
!(res->flags & IORESOURCE_CACHEABLE)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
lb_memory_range(mem, LB_MEM_RAM, res->base, res->size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
lb_cleanup_memory_ranges(mem);
|
lb_cleanup_memory_ranges(mem);
|
||||||
return mem;
|
return mem;
|
||||||
}
|
}
|
||||||
|
@ -345,7 +360,7 @@ unsigned long write_linuxbios_table(
|
||||||
struct lb_record *rec_dest, *rec_src;
|
struct lb_record *rec_dest, *rec_src;
|
||||||
/* Write the option config table... */
|
/* Write the option config table... */
|
||||||
rec_dest = lb_new_record(head);
|
rec_dest = lb_new_record(head);
|
||||||
rec_src = (struct lb_record *)&option_table;
|
rec_src = (struct lb_record *)(void *)&option_table;
|
||||||
memcpy(rec_dest, rec_src, rec_src->size);
|
memcpy(rec_dest, rec_src, rec_src->size);
|
||||||
}
|
}
|
||||||
/* Record where RAM is located */
|
/* Record where RAM is located */
|
||||||
|
|
|
@ -9,6 +9,22 @@
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
|
static inline uint64_t unpack_lb64(struct lb_uint64 value)
|
||||||
|
{
|
||||||
|
uint64_t result;
|
||||||
|
result = value.hi;
|
||||||
|
result = (result << 32) + value.lo;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct lb_uint64 pack_lb64(uint64_t value)
|
||||||
|
{
|
||||||
|
struct lb_uint64 result;
|
||||||
|
result.lo = (value >> 0) & 0xffffffff;
|
||||||
|
result.hi = (value >> 32) & 0xffffffff;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
/* Maximum physical address we can use for the linuxBIOS bounce buffer.
|
/* Maximum physical address we can use for the linuxBIOS bounce buffer.
|
||||||
*/
|
*/
|
||||||
#ifndef MAX_ADDR
|
#ifndef MAX_ADDR
|
||||||
|
@ -120,14 +136,14 @@ static unsigned long get_bounce_buffer(struct lb_memory *mem)
|
||||||
unsigned long tbuffer;
|
unsigned long tbuffer;
|
||||||
if (mem->map[i].type != LB_MEM_RAM)
|
if (mem->map[i].type != LB_MEM_RAM)
|
||||||
continue;
|
continue;
|
||||||
if (mem->map[i].start > MAX_ADDR)
|
if (unpack_lb64(mem->map[i].start) > MAX_ADDR)
|
||||||
continue;
|
continue;
|
||||||
if (mem->map[i].size < lb_size)
|
if (unpack_lb64(mem->map[i].size) < lb_size)
|
||||||
continue;
|
continue;
|
||||||
mstart = mem->map[i].start;
|
mstart = unpack_lb64(mem->map[i].start);
|
||||||
msize = MAX_ADDR - mstart +1;
|
msize = MAX_ADDR - mstart +1;
|
||||||
if (msize > mem->map[i].size)
|
if (msize > unpack_lb64(mem->map[i].size))
|
||||||
msize = mem->map[i].size;
|
msize = unpack_lb64(mem->map[i].size);
|
||||||
mend = mstart + msize;
|
mend = mstart + msize;
|
||||||
tbuffer = mend - lb_size;
|
tbuffer = mend - lb_size;
|
||||||
if (tbuffer < buffer)
|
if (tbuffer < buffer)
|
||||||
|
@ -222,8 +238,8 @@ static int valid_area(struct lb_memory *mem, unsigned long buffer,
|
||||||
uint64_t mstart, mend;
|
uint64_t mstart, mend;
|
||||||
uint32_t mtype;
|
uint32_t mtype;
|
||||||
mtype = mem->map[i].type;
|
mtype = mem->map[i].type;
|
||||||
mstart = mem->map[i].start;
|
mstart = unpack_lb64(mem->map[i].start);
|
||||||
mend = mstart + mem->map[i].size;
|
mend = mstart + unpack_lb64(mem->map[i].size);
|
||||||
if ((mtype == LB_MEM_RAM) && (start < mend) && (end > mstart)) {
|
if ((mtype == LB_MEM_RAM) && (start < mend) && (end > mstart)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -236,8 +252,8 @@ static int valid_area(struct lb_memory *mem, unsigned long buffer,
|
||||||
uint64_t mstart, mend;
|
uint64_t mstart, mend;
|
||||||
uint32_t mtype;
|
uint32_t mtype;
|
||||||
mtype = mem->map[i].type;
|
mtype = mem->map[i].type;
|
||||||
mstart = mem->map[i].start;
|
mstart = unpack_lb64(mem->map[i].start);
|
||||||
mend = mstart + mem->map[i].size;
|
mend = mstart + unpack_lb64(mem->map[i].size);
|
||||||
printk_err(" [0x%016lx, 0x%016lx) %s\n",
|
printk_err(" [0x%016lx, 0x%016lx) %s\n",
|
||||||
(unsigned long)mstart,
|
(unsigned long)mstart,
|
||||||
(unsigned long)mend,
|
(unsigned long)mend,
|
||||||
|
|
|
@ -19,8 +19,8 @@ static void print_pci_regs(struct device *dev)
|
||||||
printk_debug(" %02x",byte);
|
printk_debug(" %02x",byte);
|
||||||
}
|
}
|
||||||
printk_debug("\n");
|
printk_debug("\n");
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_mem(void)
|
static void print_mem(void)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
@ -35,27 +35,29 @@ static void print_mem(void)
|
||||||
static void print_pci_regs_all(void)
|
static void print_pci_regs_all(void)
|
||||||
{
|
{
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
unsigned char i,j,k;
|
unsigned char bus, device, function;
|
||||||
|
|
||||||
for(i=0;i<=15;i++) {
|
for(bus=0; bus<=256; bus++) {
|
||||||
for(j=0;j<=0x1f;j++) {
|
for(device=0; device<=0x1f; device++) {
|
||||||
for (k=0;k<=6;k++){
|
for (function=0; function<=7; function++){
|
||||||
dev = dev_find_slot(i, PCI_DEVFN(j, k));
|
unsigned devfn;
|
||||||
|
devfn = PCI_DEVFN(device, function);
|
||||||
|
dev = dev_find_slot(bus, devfn);
|
||||||
if(!dev) {
|
if(!dev) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if(!dev->enabled) {
|
if(!dev->enabled) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
printk_debug("\n%02x:%02x:%02x aka %s",i,j,k, dev_path(dev));
|
printk_debug("\n%02x:%02x:%02x aka %s",
|
||||||
|
bus, device, function, dev_path(dev));
|
||||||
print_pci_regs(dev);
|
print_pci_regs(dev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_msr()
|
static void print_cpuid()
|
||||||
{
|
{
|
||||||
msr_t msr;
|
msr_t msr;
|
||||||
unsigned index;
|
unsigned index;
|
||||||
|
@ -126,6 +128,8 @@ static void print_smbus_regs_all(struct device *dev)
|
||||||
static void debug_init(device_t dev)
|
static void debug_init(device_t dev)
|
||||||
{
|
{
|
||||||
device_t parent;
|
device_t parent;
|
||||||
|
if (!dev->enabled)
|
||||||
|
return;
|
||||||
switch(dev->path.u.pnp.device) {
|
switch(dev->path.u.pnp.device) {
|
||||||
#if CONFIG_CHIP_NAME
|
#if CONFIG_CHIP_NAME
|
||||||
case 0:
|
case 0:
|
||||||
|
@ -146,7 +150,7 @@ static void debug_init(device_t dev)
|
||||||
print_mem();
|
print_mem();
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
print_msr();
|
print_cpuid();
|
||||||
break;
|
break;
|
||||||
case 4:
|
case 4:
|
||||||
print_smbus_regs_all(&dev_root);
|
print_smbus_regs_all(&dev_root);
|
||||||
|
|
|
@ -57,14 +57,17 @@ struct lb_record {
|
||||||
|
|
||||||
#define LB_TAG_MEMORY 0x0001
|
#define LB_TAG_MEMORY 0x0001
|
||||||
|
|
||||||
|
struct lb_uint64 {
|
||||||
|
uint32_t lo;
|
||||||
|
uint32_t hi;
|
||||||
|
};
|
||||||
struct lb_memory_range {
|
struct lb_memory_range {
|
||||||
uint64_t start;
|
struct lb_uint64 start;
|
||||||
uint64_t size;
|
struct lb_uint64 size;
|
||||||
uint32_t type;
|
uint32_t type;
|
||||||
#define LB_MEM_RAM 1 /* Memory anyone can use */
|
#define LB_MEM_RAM 1 /* Memory anyone can use */
|
||||||
#define LB_MEM_RESERVED 2 /* Don't use this memory region */
|
#define LB_MEM_RESERVED 2 /* Don't use this memory region */
|
||||||
#define LB_MEM_TABLE 16 /* Ram configuration tables are kept in */
|
#define LB_MEM_TABLE 16 /* Ram configuration tables are kept in */
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct lb_memory {
|
struct lb_memory {
|
||||||
|
|
|
@ -1793,6 +1793,7 @@
|
||||||
#define PCI_DEVICE_ID_INTEL_6300ESB_USB2 0x25aa
|
#define PCI_DEVICE_ID_INTEL_6300ESB_USB2 0x25aa
|
||||||
#define PCI_DEVICE_ID_INTEL_6300ESB_USB3 0x25ad
|
#define PCI_DEVICE_ID_INTEL_6300ESB_USB3 0x25ad
|
||||||
#define PCI_DEVICE_ID_INTEL_6300ESB_SATA 0x25a3
|
#define PCI_DEVICE_ID_INTEL_6300ESB_SATA 0x25a3
|
||||||
|
#define PCI_DEVICE_ID_INTEL_6300ESB_PIC1 0x25ac
|
||||||
#define PCI_DEVICE_ID_INTEL_80310 0x530d
|
#define PCI_DEVICE_ID_INTEL_80310 0x530d
|
||||||
#define PCI_DEVICE_ID_INTEL_82810_MC1 0x7120
|
#define PCI_DEVICE_ID_INTEL_82810_MC1 0x7120
|
||||||
#define PCI_DEVICE_ID_INTEL_82810_IG1 0x7121
|
#define PCI_DEVICE_ID_INTEL_82810_IG1 0x7121
|
||||||
|
@ -1815,6 +1816,10 @@
|
||||||
#define PCI_DEVICE_ID_INTEL_82450GX 0x84c5
|
#define PCI_DEVICE_ID_INTEL_82450GX 0x84c5
|
||||||
#define PCI_DEVICE_ID_INTEL_82451NX 0x84ca
|
#define PCI_DEVICE_ID_INTEL_82451NX 0x84ca
|
||||||
#define PCI_DEVICE_ID_INTEL_82454NX 0x84cb
|
#define PCI_DEVICE_ID_INTEL_82454NX 0x84cb
|
||||||
|
#define PCI_DEVICE_ID_INTEL_PCIE_PA 0x3595
|
||||||
|
#define PCI_DEVICE_ID_INTEL_PCIE_PA1 0x3596
|
||||||
|
#define PCI_DEVICE_ID_INTEL_PCIE_PB 0x3597
|
||||||
|
#define PCI_DEVICE_ID_INTEL_PCIE_PC 0x3599
|
||||||
|
|
||||||
#define PCI_DEVICE_ID_INTEL_82801DBM_1E0 0x2448
|
#define PCI_DEVICE_ID_INTEL_82801DBM_1E0 0x2448
|
||||||
#define PCI_DEVICE_ID_INTEL_82801DBM_1F0 0x24cc
|
#define PCI_DEVICE_ID_INTEL_82801DBM_1F0 0x24cc
|
||||||
|
|
Loading…
Reference in New Issue