diff --git a/src/soc/intel/xeon_sp/cpx/Makefile.inc b/src/soc/intel/xeon_sp/cpx/Makefile.inc index 6c0d6c05e5..54004a5be7 100644 --- a/src/soc/intel/xeon_sp/cpx/Makefile.inc +++ b/src/soc/intel/xeon_sp/cpx/Makefile.inc @@ -8,7 +8,7 @@ subdirs-y += ../../../../cpu/x86/tsc subdirs-y += ../../../../cpu/intel/microcode romstage-y += romstage.c -ramstage-y += chip.c acpi.c cpu.c +ramstage-y += chip.c acpi.c cpu.c soc_util.c CPPFLAGS_common += -I$(src)/soc/intel/xeon_sp/cpx/include -I$(src)/soc/intel/xeon_sp/cpx diff --git a/src/soc/intel/xeon_sp/cpx/chip.c b/src/soc/intel/xeon_sp/cpx/chip.c index c19c0cf93c..19ebe4786d 100644 --- a/src/soc/intel/xeon_sp/cpx/chip.c +++ b/src/soc/intel/xeon_sp/cpx/chip.c @@ -1,18 +1,492 @@ /* SPDX-License-Identifier: GPL-2.0-only */ #include +#include #include #include #include #include #include +#include #include #include #include +#include +#include /* C620 IOAPIC has 120 redirection entries */ #define C620_IOAPIC_REDIR_ENTRIES 120 +struct pci_resource { + struct device *dev; + struct resource *res; + struct pci_resource *next; +}; + +struct stack_dev_resource { + uint8_t align; + struct pci_resource *children; + struct stack_dev_resource *next; +}; + +typedef enum { + RES_TYPE_IO = 0, + RES_TYPE_NONPREF_MEM, + RES_TYPE_PREF_MEM, + MAX_RES_TYPES +} ResType; + +static ResType get_res_type(uint64_t flags) +{ + if (flags & IORESOURCE_IO) + return RES_TYPE_IO; + if (flags & IORESOURCE_MEM) { + if (flags & IORESOURCE_PREFETCH) { + printk(BIOS_DEBUG, "%s:%d flags: 0x%llx\n", __func__, __LINE__, flags); + return RES_TYPE_PREF_MEM; + } + /* both 64-bit and 32-bit use below 4GB address space */ + return RES_TYPE_NONPREF_MEM; + } + printk(BIOS_ERR, "Invalid resource type 0x%llx\n", flags); + die("Invalida resource type"); +} + +static bool need_assignment(uint64_t flags) +{ + if (flags & (IORESOURCE_STORED | IORESOURCE_RESERVE | IORESOURCE_FIXED | + IORESOURCE_ASSIGNED)) + return false; + else + return true; +} + +static uint64_t get_resource_base(STACK_RES *stack, ResType res_type) +{ + if (res_type == RES_TYPE_IO) { + assert(stack->PciResourceIoBase <= stack->PciResourceIoLimit); + return stack->PciResourceIoBase; + } + if (res_type == RES_TYPE_NONPREF_MEM) { + assert(stack->PciResourceMem32Base <= stack->PciResourceMem32Limit); + return stack->PciResourceMem32Base; + } + assert(stack->PciResourceMem64Base <= stack->PciResourceMem64Limit); + return stack->PciResourceMem64Base; +} + +static void set_resource_base(STACK_RES *stack, ResType res_type, uint64_t base) +{ + if (res_type == RES_TYPE_IO) { + assert(base <= (stack->PciResourceIoLimit + 1)); + stack->PciResourceIoBase = base; + } else if (res_type == RES_TYPE_NONPREF_MEM) { + assert(base <= (stack->PciResourceMem32Limit + 1)); + stack->PciResourceMem32Base = base; + } else { + assert(base <= (stack->PciResourceMem64Limit + 1)); + stack->PciResourceMem64Base = base; + } +} + +static void assign_stack_resources(struct iiostack_resource *stack_list, + struct device *dev, struct resource *bridge); + +static void xeonsp_cpx_pci_domain_scan_bus(struct device *dev) +{ + DEV_FUNC_ENTER(dev); + struct bus *link = dev->link_list; + + printk(BIOS_SPEW, "%s:%s scanning buses under device %s\n", + __FILE__, __func__, dev_path(dev)); + while (link != NULL) { + if (link->secondary == 0) { // scan only PSTACK buses + struct device *d; + for (d = link->children; d; d = d->sibling) + pci_probe_dev(d, link, d->path.pci.devfn); + scan_bridges(link); + } else { + pci_scan_bus(link, PCI_DEVFN(0, 0), 0xff); + } + link = link->next; + } + DEV_FUNC_EXIT(dev); +} + +static void xeonsp_pci_dev_iterator(struct bus *bus, + void (*dev_iterator)(struct device *, void *), + void (*res_iterator)(struct device *, struct resource *, void *), + void *data) +{ + struct device *curdev; + struct resource *res; + + /* Walk through all devices and find which resources they need. */ + for (curdev = bus->children; curdev; curdev = curdev->sibling) { + struct bus *link; + + if (!curdev->enabled) + continue; + + if (!curdev->ops || !curdev->ops->read_resources) { + if (curdev->path.type != DEVICE_PATH_APIC) + printk(BIOS_ERR, "%s missing read_resources\n", + dev_path(curdev)); + continue; + } + + if (dev_iterator) + dev_iterator(curdev, data); + + if (res_iterator) { + for (res = curdev->resource_list; res; res = res->next) + res_iterator(curdev, res, data); + } + + /* Read in the resources behind the current device's links. */ + for (link = curdev->link_list; link; link = link->next) + xeonsp_pci_dev_iterator(link, dev_iterator, res_iterator, data); + } +} + +static void xeonsp_pci_dev_read_resources(struct device *dev, void *data) +{ + post_log_path(dev); + dev->ops->read_resources(dev); +} + +static void xeonsp_pci_dev_dummy_func(struct device *dev) +{ +} + +static void xeonsp_reset_pci_op(struct device *dev, void *data) +{ + if (dev->ops) + dev->ops->read_resources = xeonsp_pci_dev_dummy_func; +} + +static STACK_RES *find_stack_for_bus(struct iiostack_resource *info, uint8_t bus) +{ + for (int i = 0; i < info->no_of_stacks; ++i) { + if (bus >= info->res[i].BusBase && bus <= info->res[i].BusLimit) + return &info->res[i]; + } + return NULL; +} + +static void add_res_to_stack(struct stack_dev_resource **root, + struct device *dev, struct resource *res) +{ + struct stack_dev_resource *cur = *root; + while (cur) { + if (cur->align == res->align || cur->next == NULL) /* equal or last record */ + break; + else if (cur->align > res->align) { + if (cur->next->align < res->align) /* need to insert new record here */ + break; + cur = cur->next; + } else { + break; + } + } + + struct stack_dev_resource *nr; + if (!cur || cur->align != res->align) { /* need to add new record */ + nr = malloc(sizeof(struct stack_dev_resource)); + if (nr == 0) + die("assign_resource_to_stack(): out of memory.\n"); + memset(nr, 0, sizeof(struct stack_dev_resource)); + nr->align = res->align; + if (!cur) { + *root = nr; /* head node */ + } else if (cur->align > nr->align) { + if (cur->next == NULL) { + cur->next = nr; + } else { + nr->next = cur->next; + cur->next = nr; + } + } else { /* insert in the beginning */ + nr->next = cur; + *root = nr; + } + } else { + nr = cur; + } + + assert(nr != NULL && nr->align == res->align); + + struct pci_resource *npr = malloc(sizeof(struct pci_resource)); + if (npr == NULL) + die("%s: out of memory.\n", __func__); + npr->res = res; + npr->dev = dev; + npr->next = NULL; + + if (nr->children == NULL) { + nr->children = npr; + } else { + struct pci_resource *pr = nr->children; + while (pr->next != NULL) + pr = pr->next; + pr->next = npr; + } +} + +static void reserve_dev_resources(STACK_RES *stack, ResType res_type, + struct stack_dev_resource *res_root, struct resource *bridge) +{ + uint8_t align; + uint64_t orig_base, base; + + orig_base = get_resource_base(stack, res_type); + + align = 0; + base = orig_base; + int first = 1; + while (res_root) { /* loop through all devices grouped by alignment requirements */ + struct pci_resource *pr = res_root->children; + while (pr) { + if (first) { + if (bridge) { /* takes highest alignment */ + if (bridge->align < pr->res->align) + bridge->align = pr->res->align; + orig_base = ALIGN_UP(orig_base, 1 << bridge->align); + } else { + orig_base = ALIGN_UP(orig_base, 1 << pr->res->align); + } + base = orig_base; + + if (bridge) + bridge->base = base; + pr->res->base = base; + first = 0; + } else { + pr->res->base = ALIGN_UP(base, 1 << pr->res->align); + } + pr->res->limit = pr->res->base + pr->res->size - 1; + base = pr->res->limit + 1; + pr->res->flags |= (IORESOURCE_ASSIGNED); + pr = pr->next; + } + res_root = res_root->next; + } + + if (bridge) { + /* this bridge doesn't have any resources, will set it to default window */ + if (first) { + orig_base = ALIGN_UP(orig_base, 1 << bridge->align); + bridge->base = orig_base; + base = orig_base + (1ULL << bridge->gran); + } + + bridge->size = ALIGN_UP(base, 1 << bridge->align) - bridge->base; + + bridge->limit = bridge->base + bridge->size - 1; + bridge->flags |= (IORESOURCE_ASSIGNED); + base = bridge->limit + 1; + } + + set_resource_base(stack, res_type, base); +} + +static void reclaim_resource_mem(struct stack_dev_resource *res_root) +{ + while (res_root) { /* loop through all devices grouped by alignment requirements */ + /* free pci_resource */ + struct pci_resource *pr = res_root->children; + while (pr) { + struct pci_resource *dpr = pr; + pr = pr->next; + free(dpr); + } + + /* free stack_dev_resource */ + struct stack_dev_resource *ddr = res_root; + res_root = res_root->next; + free(ddr); + } +} + +static void assign_bridge_resources(struct iiostack_resource *stack_list, + struct device *dev, struct resource *bridge) +{ + struct resource *res; + if (!dev->enabled) + return; + + for (res = dev->resource_list; res; res = res->next) { + if (!(res->flags & IORESOURCE_BRIDGE) || + (bridge && (get_res_type(bridge->flags) != get_res_type(res->flags)))) + continue; + + assign_stack_resources(stack_list, dev, res); + + if (!bridge) + continue; + + /* for 1st time update, overlading IORESOURCE_ASSIGNED */ + if (!(bridge->flags & IORESOURCE_ASSIGNED)) { + bridge->base = res->base; + bridge->limit = res->limit; + bridge->flags |= (IORESOURCE_ASSIGNED); + } else { + /* update bridge range from child bridge range */ + if (res->base < bridge->base) + bridge->base = res->base; + if (res->limit > bridge->limit) + bridge->limit = res->limit; + } + bridge->size = (bridge->limit - bridge->base + 1); + } +} + +static void assign_stack_resources(struct iiostack_resource *stack_list, + struct device *dev, struct resource *bridge) +{ + struct bus *bus; + + /* Read in the resources behind the current device's links. */ + for (bus = dev->link_list; bus; bus = bus->next) { + struct device *curdev; + STACK_RES *stack; + + /* get IIO stack for this bus */ + stack = find_stack_for_bus(stack_list, bus->secondary); + assert(stack != NULL); + + /* Assign resources to bridge */ + for (curdev = bus->children; curdev; curdev = curdev->sibling) + assign_bridge_resources(stack_list, curdev, bridge); + + /* Pick non-bridged resources for resource allocation for each resource type */ + ResType res_types[MAX_RES_TYPES] = { + RES_TYPE_IO, + RES_TYPE_NONPREF_MEM, + RES_TYPE_PREF_MEM + }; + + uint8_t no_res_types = MAX_RES_TYPES; + + /* if it is a bridge, only process matching brigge resource type */ + if (bridge) { + res_types[0] = get_res_type(bridge->flags); + no_res_types = 1; + } + + printk(BIOS_DEBUG, "%s:%d no_res_types: %d\n", __func__, __LINE__, + no_res_types); + + /* Process each resource type */ + for (int rt = 0; rt < no_res_types; ++rt) { + struct stack_dev_resource *res_root = NULL; + printk(BIOS_DEBUG, "%s:%d rt: %d\n", __func__, __LINE__, rt); + for (curdev = bus->children; curdev; curdev = curdev->sibling) { + struct resource *res; + printk(BIOS_DEBUG, "%s:%d dev: %s\n", + __func__, __LINE__, dev_path(curdev)); + if (!curdev->enabled) + continue; + + for (res = curdev->resource_list; res; res = res->next) { + printk(BIOS_DEBUG, "%s:%d dev: %s, flags: 0x%lx\n", + __func__, __LINE__, + dev_path(curdev), res->flags); + if (res->size == 0 || + get_res_type(res->flags) != res_types[rt] || + (res->flags & IORESOURCE_BRIDGE) || + !need_assignment(res->flags)) + continue; + else + add_res_to_stack(&res_root, curdev, res); + } + } + + /* Allocate resources and update bridge range */ + if (res_root || (bridge && !(bridge->flags & IORESOURCE_ASSIGNED))) { + reserve_dev_resources(stack, res_types[rt], res_root, bridge); + reclaim_resource_mem(res_root); + } + } + } +} + +static void xeonsp_pci_domain_read_resources(struct device *dev) +{ + struct bus *link; + + DEV_FUNC_ENTER(dev); + + pci_domain_read_resources(dev); + + /* + * Walk through all devices in this domain and read resources. + * Since there is no callback when read resource operation is + * complete for all devices, domain read resource function initiates + * read resources for all devices and swaps read resource operation + * with dummy function to avoid warning. + */ + for (link = dev->link_list; link; link = link->next) + xeonsp_pci_dev_iterator(link, xeonsp_pci_dev_read_resources, NULL, NULL); + + for (link = dev->link_list; link; link = link->next) + xeonsp_pci_dev_iterator(link, xeonsp_reset_pci_op, NULL, NULL); + + struct iiostack_resource stack_info = {0}; + uint8_t pci64bit_alloc_flag = get_iiostack_info(&stack_info); + if (!pci64bit_alloc_flag) { + /* + * Split 32 bit address space between prefetchable and + * non-prefetchable windows + */ + for (int s = 0; s < stack_info.no_of_stacks; ++s) { + STACK_RES *res = &stack_info.res[s]; + uint64_t length = (res->PciResourceMem32Limit - + res->PciResourceMem32Base + 1)/2; + res->PciResourceMem64Limit = res->PciResourceMem32Limit; + res->PciResourceMem32Limit = (res->PciResourceMem32Base + length - 1); + res->PciResourceMem64Base = res->PciResourceMem32Limit + 1; + } + } + + + /* assign resources */ + assign_stack_resources(&stack_info, dev, NULL); + + DEV_FUNC_EXIT(dev); +} + +static void reset_resource_to_unassigned(struct device *dev, struct resource *res, void *data) +{ + if ((res->flags & (IORESOURCE_IO | IORESOURCE_MEM)) && + !(res->flags & (IORESOURCE_FIXED | IORESOURCE_RESERVE))) { + res->flags &= ~IORESOURCE_ASSIGNED; + } +} + +static void xeonsp_cpx_pci_domain_set_resources(struct device *dev) +{ + DEV_FUNC_ENTER(dev); + + print_resource_tree(dev, BIOS_SPEW, "Before xeonsp pci domain set resource"); + + /* reset bus 0 dev resource assignment - need to change them to FSP IIOStack window */ + xeonsp_pci_dev_iterator(dev->link_list, NULL, reset_resource_to_unassigned, NULL); + + /* update dev resources based on IIOStack IO/Mem32/Mem64 windows */ + xeonsp_pci_domain_read_resources(dev); + + struct bus *link = dev->link_list; + while (link != NULL) { + assign_resources(link); + link = link->next; + } + + print_resource_tree(dev, BIOS_SPEW, "After xeonsp pci domain set resource"); + + DEV_FUNC_EXIT(dev); +} + void platform_fsp_silicon_init_params_cb(FSPS_UPD *silupd) { /* not implemented yet */ @@ -20,8 +494,8 @@ void platform_fsp_silicon_init_params_cb(FSPS_UPD *silupd) static struct device_operations pci_domain_ops = { .read_resources = &pci_domain_read_resources, - .set_resources = &pci_domain_set_resources, - .scan_bus = &pci_domain_scan_bus, + .set_resources = &xeonsp_cpx_pci_domain_set_resources, + .scan_bus = &xeonsp_cpx_pci_domain_scan_bus, }; static struct device_operations cpu_bus_ops = { @@ -30,14 +504,43 @@ static struct device_operations cpu_bus_ops = { .init = cpx_init_cpus, }; -static void chip_enable_dev(struct device *dev) +/* Attach IIO stack bus numbers with dummy device to PCI DOMAIN 0000 device */ +static void attach_iio_stacks(struct device *dev) { - /* Set the operations if it is a special bus type */ - if (dev->path.type == DEVICE_PATH_DOMAIN) { - dev->ops = &pci_domain_ops; - } else if (dev->path.type == DEVICE_PATH_CPU_CLUSTER) { - dev->ops = &cpu_bus_ops; + struct bus *iiostack_bus; + struct iiostack_resource stack_info = {0}; + + DEV_FUNC_ENTER(dev); + + get_iiostack_info(&stack_info); + for (int s = 0; s < stack_info.no_of_stacks; ++s) { + /* only non zero bus no. needs to be enumerated */ + if (stack_info.res[s].BusBase == 0) + continue; + + iiostack_bus = malloc(sizeof(struct bus)); + if (iiostack_bus == NULL) + die("%s: out of memory.\n", __func__); + memset(iiostack_bus, 0, sizeof(*iiostack_bus)); + memcpy(iiostack_bus, dev->bus, sizeof(*iiostack_bus)); + iiostack_bus->secondary = stack_info.res[s].BusBase; + iiostack_bus->subordinate = stack_info.res[s].BusBase; + iiostack_bus->dev = NULL; + iiostack_bus->children = NULL; + iiostack_bus->next = NULL; + iiostack_bus->link_num = 1; + + if (dev->link_list == NULL) { + dev->link_list = iiostack_bus; + } else { + struct bus *nlink = dev->link_list; + while (nlink->next != NULL) + nlink = nlink->next; + nlink->next = iiostack_bus; + } } + + DEV_FUNC_EXIT(dev); } static void pch_enable_ioapic(const struct device *dev) @@ -65,6 +568,17 @@ struct pci_operations soc_pci_ops = { .set_subsystem = pci_dev_set_subsystem, }; +static void chip_enable_dev(struct device *dev) +{ + /* Set the operations if it is a special bus type */ + if (dev->path.type == DEVICE_PATH_DOMAIN) { + dev->ops = &pci_domain_ops; + attach_iio_stacks(dev); + } else if (dev->path.type == DEVICE_PATH_CPU_CLUSTER) { + dev->ops = &cpu_bus_ops; + } +} + static void chip_final(void *data) { p2sb_hide(); @@ -83,5 +597,5 @@ struct chip_operations soc_intel_xeon_sp_cpx_ops = { CHIP_NAME("Intel Cooperlake-SP") .enable_dev = chip_enable_dev, .init = chip_init, - .final = chip_final + .final = chip_final, }; diff --git a/src/soc/intel/xeon_sp/cpx/chip.h b/src/soc/intel/xeon_sp/cpx/chip.h index 87dd2b30b5..6bf7272572 100644 --- a/src/soc/intel/xeon_sp/cpx/chip.h +++ b/src/soc/intel/xeon_sp/cpx/chip.h @@ -3,13 +3,65 @@ #ifndef _SOC_CHIP_H_ #define _SOC_CHIP_H_ -#include #include +#include +#include struct soc_intel_xeon_sp_cpx_config { /* Common struct containing soc config data required by common code */ struct soc_intel_common_config common_soc_config; + /** + * Interrupt Routing configuration + * If bit7 is 1, the interrupt is disabled. + */ + uint8_t pirqa_routing; + uint8_t pirqb_routing; + uint8_t pirqc_routing; + uint8_t pirqd_routing; + uint8_t pirqe_routing; + uint8_t pirqf_routing; + uint8_t pirqg_routing; + uint8_t pirqh_routing; + + /** + * Device Interrupt Routing configuration + * Interrupt Pin x Route. + * 0h = PIRQA# + * 1h = PIRQB# + * 2h = PIRQC# + * 3h = PIRQD# + * 4h = PIRQE# + * 5h = PIRQF# + * 6h = PIRQG# + * 7h = PIRQH# + */ + uint16_t ir00_routing; + uint16_t ir01_routing; + uint16_t ir02_routing; + uint16_t ir03_routing; + uint16_t ir04_routing; + + /** + * Device Interrupt Polarity Control + * ipc0 - IRQ-00-31 - 1: Active low to IOAPIC, 0: Active high to IOAPIC + * ipc1 - IRQ-32-63 - 1: Active low to IOAPIC, 0: Active high to IOAPIC + * ipc2 - IRQ-64-95 - 1: Active low to IOAPIC, 0: Active high to IOAPIC + * ipc3 - IRQ-96-119 - 1: Active low to IOAPIC, 0: Active high to IOAPIC + */ + uint32_t ipc0; + uint32_t ipc1; + uint32_t ipc2; + uint32_t ipc3; + + uint64_t turbo_ratio_limit; + uint64_t turbo_ratio_limit_cores; + + uint32_t pstate_req_ratio; + + uint32_t coherency_support; + uint32_t ats_support; + /* Generic IO decode ranges */ uint32_t gen1_dec; uint32_t gen2_dec; diff --git a/src/soc/intel/xeon_sp/cpx/include/soc/irq.h b/src/soc/intel/xeon_sp/cpx/include/soc/irq.h index 71e058417e..efd50577b6 100644 --- a/src/soc/intel/xeon_sp/cpx/include/soc/irq.h +++ b/src/soc/intel/xeon_sp/cpx/include/soc/irq.h @@ -1,3 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* nothing here, please come back later */ +#ifndef _SOC_IRQ_H_ +#define _SOC_IRQ_H_ + +#define PCH_IRQ10 10 +#define PCH_IRQ11 11 + +#endif /* _SOC_IRQ_H_ */ diff --git a/src/soc/intel/xeon_sp/cpx/include/soc/soc_util.h b/src/soc/intel/xeon_sp/cpx/include/soc/soc_util.h new file mode 100644 index 0000000000..d8b038c4d5 --- /dev/null +++ b/src/soc/intel/xeon_sp/cpx/include/soc/soc_util.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _SOC_UTIL_H_ +#define _SOC_UTIL_H_ + +#include +#include +#include +#include + +#define DEV_FUNC_ENTER(dev) \ + printk(BIOS_SPEW, "%s:%s:%d: ENTER (dev: %s)\n", \ + __FILE__, __func__, __LINE__, dev_path(dev)) + +#define DEV_FUNC_EXIT(dev) \ + printk(BIOS_SPEW, "%s:%s:%d: EXIT (dev: %s)\n", __FILE__, \ + __func__, __LINE__, dev_path(dev)) + +struct iiostack_resource { + uint8_t no_of_stacks; + STACK_RES res[MAX_SOCKET * MAX_LOGIC_IIO_STACK]; +}; + +uint8_t get_iiostack_info(struct iiostack_resource *info); +#endif /* _SOC_UTIL_H_ */ diff --git a/src/soc/intel/xeon_sp/cpx/soc_util.c b/src/soc/intel/xeon_sp/cpx/soc_util.c new file mode 100644 index 0000000000..8548615239 --- /dev/null +++ b/src/soc/intel/xeon_sp/cpx/soc_util.c @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#include +#include +#include +#include + +uint8_t get_iiostack_info(struct iiostack_resource *info) +{ + size_t hob_size; + const uint8_t fsp_hob_iio_universal_data_guid[16] = FSP_HOB_IIO_UNIVERSAL_DATA_GUID; + const IIO_UDS *hob; + + hob = fsp_find_extension_hob_by_guid( + fsp_hob_iio_universal_data_guid, &hob_size); + assert(hob != NULL && hob_size != 0); + + // copy IIO Stack info from FSP HOB + info->no_of_stacks = 0; + for (int s = 0; s < hob->PlatformData.numofIIO; ++s) { + for (int x = 0; x < MAX_IIO_STACK; ++x) { + const STACK_RES *ri = &hob->PlatformData.IIO_resource[s].StackRes[x]; + // TODO: do we have situation with only bux 0 and one stack? + if (ri->BusBase >= ri->BusLimit) + continue; + assert(info->no_of_stacks < (CONFIG_MAX_SOCKET * MAX_IIO_STACK)); + memcpy(&info->res[info->no_of_stacks++], ri, sizeof(STACK_RES)); + } + } + + return hob->PlatformData.Pci64BitResourceAllocation; +} diff --git a/src/soc/intel/xeon_sp/include/soc/util.h b/src/soc/intel/xeon_sp/include/soc/util.h index 24baca6eb7..159efeba2c 100644 --- a/src/soc/intel/xeon_sp/include/soc/util.h +++ b/src/soc/intel/xeon_sp/include/soc/util.h @@ -4,6 +4,7 @@ #define _XEON_SP_SOC_UTIL_H_ #include +#include void get_cpubusnos(uint32_t *bus0, uint32_t *bus1, uint32_t *bus2, uint32_t *bus3); void unlock_pam_regions(void);