def set_shm_regions(launch_item_values, scenario_info): try: raw_shmem_regions = common.get_hv_item_tag(scenario_info, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") vm_types = common.get_leaf_tag_map(scenario_info, "vm_type") shm_enabled = common.get_hv_item_tag(scenario_info, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") except: return sos_vm_id = 0 for vm_id, vm_type in vm_types.items(): if vm_type in ['SOS_VM']: sos_vm_id = vm_id elif vm_type in ['POST_STD_VM', 'POST_RT_VM', 'KATA_VM']: uos_id = vm_id - sos_vm_id shm_region_key = 'uos:id={},shm_regions,shm_region'.format(uos_id) launch_item_values[shm_region_key] = [''] if shm_enabled == 'y': for shmem_region in raw_shmem_regions: if shmem_region is None or shmem_region.strip() == '': continue try: shm_splited = shmem_region.split(',') name = shm_splited[0].strip() size = shm_splited[1].strip() vm_id_list = [x.strip() for x in shm_splited[2].split(':')] if str(vm_id) in vm_id_list: launch_item_values[shm_region_key].append(','.join([name, size])) except Exception as e: print(e)
def get_args(self): self.args["uos_type"] = common.get_leaf_tag_map( self.launch_info, "uos_type") self.args["rtos_type"] = common.get_leaf_tag_map( self.launch_info, "rtos_type") self.args["mem_size"] = common.get_leaf_tag_map( self.launch_info, "mem_size") self.args["gvt_args"] = common.get_leaf_tag_map( self.launch_info, "gvt_args") self.args["vbootloader"] = common.get_leaf_tag_map( self.launch_info, "vbootloader") self.args["vuart0"] = common.get_leaf_tag_map(self.launch_info, "vuart0") self.args["cpu_sharing"] = common.get_hv_item_tag( self.scenario_info, "FEATURES", "SCHEDULER") self.args["pm_channel"] = common.get_leaf_tag_map( self.launch_info, "poweroff_channel") self.args["cpu_affinity"] = common.get_leaf_tag_map( self.launch_info, "cpu_affinity", "pcpu_id") self.args["shm_enabled"] = common.get_hv_item_tag( self.scenario_info, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") self.args["shm_regions"] = common.get_leaf_tag_map( self.launch_info, "shm_regions", "shm_region") for vmid, shm_regions in self.args["shm_regions"].items(): if self.args["shm_enabled"] == 'y': self.args["shm_regions"][vmid] = [ x for x in shm_regions if (x is not None and x.strip != '') ] else: self.args["shm_regions"][vmid] = [] self.args["xhci"] = common.get_leaf_tag_map(self.launch_info, "usb_xhci")
def get_info(self): self.npk = common.get_hv_item_tag(self.hv_file, "DEBUG_OPTIONS", "NPK_LOGLEVEL") self.mem = common.get_hv_item_tag(self.hv_file, "DEBUG_OPTIONS", "MEM_LOGLEVEL") self.console = common.get_hv_item_tag(self.hv_file, "DEBUG_OPTIONS", "CONSOLE_LOGLEVEL")
def pci_dev_num_per_vm_gen(config): pci_items = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "pci_devs", "pci_dev") pci_devs = scenario_cfg_lib.get_pci_devs(pci_items) pci_dev_num = scenario_cfg_lib.get_pci_num(pci_devs) ivshmem_region = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") shmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") shmem_regions = scenario_cfg_lib.get_shmem_regions(ivshmem_region) shmem_num = scenario_cfg_lib.get_shmem_num(shmem_regions) for vm_i,vm_type in common.VM_TYPES.items(): if "POST_LAUNCHED_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']: if shmem_enabled == 'y' and vm_i in shmem_num.keys(): print("#define VM{}_CONFIG_PCI_DEV_NUM\t{}U".format(vm_i, shmem_num[vm_i]), file=config) elif "PRE_LAUNCHED_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']: shmem_num_i = 0 if shmem_enabled == 'y' and vm_i in shmem_num.keys(): shmem_num_i = shmem_num[vm_i] print("#define VM{}_CONFIG_PCI_DEV_NUM\t{}U".format(vm_i, pci_dev_num[vm_i] + shmem_num_i), file=config) print("", file=config)
def get_info(self): self.release = common.get_hv_item_tag(self.hv_file, "DEBUG_OPTIONS", "RELEASE") self.dest = common.get_hv_item_tag(self.hv_file, "DEBUG_OPTIONS", "LOG_DESTINATION") self.buf_size = common.get_hv_item_tag(self.hv_file, "DEBUG_OPTIONS", "LOG_BUF_SIZE") self.level.get_info()
def get_info(self): self.stack_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "STACK_SIZE") self.hv_ram_start = common.get_hv_item_tag(self.hv_file, "MEMORY", "HV_RAM_START") self.ivshmem_enable = common.get_hv_item_tag(self.hv_file, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") self.ivshmem_region = common.get_hv_item_tag(self.hv_file, "FEATURES", "IVSHMEM", "IVSHMEM_REGION")
def get_info(self): self.stack_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "STACK_SIZE") self.low_ram_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "LOW_RAM_SIZE") self.hv_ram_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "HV_RAM_SIZE") self.hv_ram_start = common.get_hv_item_tag(self.hv_file, "MEMORY", "HV_RAM_START") self.platform_ram_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "PLATFORM_RAM_SIZE") self.sos_ram_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "SOS_RAM_SIZE") self.uos_ram_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "UOS_RAM_SIZE") self.ivshmem_enable = common.get_hv_item_tag(self.hv_file, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") self.ivshmem_region = common.get_hv_item_tag(self.hv_file, "FEATURES", "IVSHMEM", "IVSHMEM_REGION")
def generate_file(config): """ Get PCI device and generate pci_devices.h :param config: it is a file pointer of pci information for writing to """ # write the license into pci print("{0}".format(board_cfg_lib.HEADER_LICENSE), file=config) # add bios and base board info board_cfg_lib.handle_bios_info(config) # write the header into pci print("{0}".format(PCI_HEADER), file=config) board_cfg_lib.parser_pci() compared_bdf = [] for cnt_sub_name in board_cfg_lib.SUB_NAME_COUNT.keys(): i_cnt = 0 for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items( ): if cnt_sub_name == bar_attr.name and bdf not in compared_bdf: compared_bdf.append(bdf) else: continue print("", file=config) write_pbdf(i_cnt, bdf, bar_attr, config) i_cnt += 1 ivshmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") raw_shmem_regions = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") if ivshmem_enabled == 'y': shmem_cnt = 0 for raw_shmem_region in raw_shmem_regions: if raw_shmem_region and raw_shmem_region.strip != '': name = raw_shmem_region.split(',')[0].strip() print("", file=config) print("#define IVSHMEM_SHM_REGION_%-21d" % shmem_cnt, end="", file=config) print('"{}"'.format(name), file=config) shmem_cnt += 1 # write the end to the pci devices print("{0}".format(PCI_END_HEADER), file=config)
def vm_cpu_affinity_check(config_file, id_cpus_per_vm_dic, item): """ Check cpu number of per vm :param item: vm pcpu_id item in xml :return: error informations """ err_dic = {} use_cpus = [] cpu_sharing_enabled = True cpu_sharing = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "SCHEDULER") if cpu_sharing == "SCHED_NOOP": cpu_sharing_enabled = False cpu_affinity = common.get_leaf_tag_map(config_file, "cpu_affinity", "pcpu_id") for vm_i in id_cpus_per_vm_dic.keys(): for cpu in id_cpus_per_vm_dic[vm_i]: if cpu in use_cpus and not cpu_sharing_enabled: key = "vm:id={},{}".format(vm_i, item) err_dic[ key] = "The same pcpu was configurated in <pcpu_id>/<cpu_affinity>, but CPU sharing is disabled by 'SCHED_NOOP'. Please re-configurate them!" return err_dic else: use_cpus.append(cpu) pre_launch_cpus = [] post_launch_cpus = [] for vm_i, vm_type in common.VM_TYPES.items(): if vm_i not in id_cpus_per_vm_dic.keys(): continue elif VM_DB[vm_type]['load_type'] == "PRE_LAUNCHED_VM": cpus = [x for x in id_cpus_per_vm_dic[vm_i] if not None] pre_launch_cpus.extend(cpus) elif VM_DB[vm_type]['load_type'] == "POST_LAUNCHED_VM": cpus = [x for x in id_cpus_per_vm_dic[vm_i] if not None] post_launch_cpus.extend(cpus) # duplicate cpus assign the same VM check cpus_vm_i = id_cpus_per_vm_dic[vm_i] for cpu_id in cpus_vm_i: if cpus_vm_i.count(cpu_id) >= 2: key = "vm:id={},{}".format(vm_i, item) err_dic[key] = "VM should not use the same pcpu id:{}".format( cpu_id) return err_dic if pre_launch_cpus: for pcpu in pre_launch_cpus: if pre_launch_cpus.count(pcpu) >= 2: key = "Pre launched VM cpu_affinity" err_dic[ key] = "Pre_launched_vm vm should not have the same cpus assignment" if pcpu in post_launch_cpus: key = "Pre launched vm and Post launchded VM cpu_affinity" err_dic[ key] = "Pre launched_vm and Post launched vm should not have the same cpus assignment" return err_dic
def populate_mba_delay_mask(rdt_res, common_clos_max, config): """ Populate the mba delay mask and msr index for memory resource :param rdt_res: it is a string representing the RDT resource :param common_clos_max: Least common clos supported by all RDT resource :param config: it is a file pointer of board information for writing to """ err_dic = {} mba_delay_list = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "RDT", "MBA_DELAY") mba_max_delay_settings_len = len(mba_delay_list) if mba_max_delay_settings_len != 0 and \ mba_max_delay_settings_len != common_clos_max: err_dic["board config: generate board.c failed"] = "Number of \ MBA_DELAY values in scenaio xml should equal to MAX_PLATFORM_CLOS_NUM" return err_dic for idx in range(common_clos_max): print("\t{", file=config) if idx < mba_max_delay_settings_len: print("\t\t.mba_delay = {0}U,".format(mba_delay_list[idx]), file=config) else: print("\t\t.mba_delay = 0U,", file=config) print("\t\t.msr_index = MSR_IA32_{0}_MASK_BASE + {1},".format( rdt_res, idx), file=config) print("\t},", file=config) return err_dic
def get_info(self): """ Get all items which belong to this class :return: None """ self.kern_name = common.get_leaf_tag_map(self.scenario_info, "os_config", "name") self.kern_type = common.get_leaf_tag_map(self.scenario_info, "os_config", "kern_type") self.kern_mod = common.get_leaf_tag_map(self.scenario_info, "os_config", "kern_mod") self.kern_args = common.get_leaf_tag_map(self.scenario_info, "os_config", "bootargs") self.kern_console = common.get_hv_item_tag(self.scenario_info, "DEBUG_OPTIONS", "SERIAL_CONSOLE") self.kern_load_addr = common.get_leaf_tag_map(self.scenario_info, "os_config", "kern_load_addr") self.kern_entry_addr = common.get_leaf_tag_map(self.scenario_info, "os_config", "kern_entry_addr") self.kern_root_dev = common.get_leaf_tag_map(self.scenario_info, "os_config", "rootfs") self.ramdisk_mod = common.get_leaf_tag_map(self.scenario_info, "os_config", "ramdisk_mod") self.kern_args_append = common.get_leaf_tag_map( self.scenario_info, "boot_private", "bootargs")
def vcpu_clos_check(cpus_per_vm, clos_per_vm, prime_item, item): common_clos_max = 0 cdp_enabled = cdp_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "RDT", "CDP_ENABLED") (rdt_resources, rdt_res_clos_max, _) = board_cfg_lib.clos_info_parser(common.BOARD_INFO_FILE) if len(rdt_resources) != 0 and len(rdt_res_clos_max) != 0: common_clos_max = min(rdt_res_clos_max) if cdp_enabled == 'y': common_clos_max //= 2 for vm_i,vcpus in cpus_per_vm.items(): clos_per_vm_len = 0 if vm_i in clos_per_vm: clos_per_vm_len = len(clos_per_vm[vm_i]) if clos_per_vm_len != len(vcpus): key = "vm:id={},{},{}".format(vm_i, prime_item, item) ERR_LIST[key] = "'vcpu_clos' number should be equal 'pcpu_id' number for VM{}".format(vm_i) return if cdp_enabled == 'y' and common_clos_max != 0: for clos_val in clos_per_vm[vm_i]: if not clos_val or clos_val == None: key = "vm:id={},{},{}".format(vm_i, prime_item, item) ERR_LIST[key] = "'vcpu_clos' should be not None" return if int(clos_val) >= common_clos_max: key = "vm:id={},{},{}".format(vm_i, prime_item, item) ERR_LIST[key] = "CDP_ENABLED=y, the clos value should not be greater than {} for VM{}".format(common_clos_max - 1, vm_i) return
def get_args(self): self.args["user_vm_type"] = common.get_leaf_tag_map( self.launch_info, "user_vm_type") self.args["rtos_type"] = common.get_leaf_tag_map( self.launch_info, "rtos_type") self.args["mem_size"] = common.get_leaf_tag_map( self.launch_info, "mem_size") self.args["vbootloader"] = common.get_leaf_tag_map( self.launch_info, "vbootloader") self.args["vuart0"] = common.get_leaf_tag_map(self.launch_info, "vuart0") self.args["cpu_sharing"] = common.get_hv_item_tag( self.scenario_info, "FEATURES", "SCHEDULER") self.args["pm_channel"] = common.get_leaf_tag_map( self.launch_info, "poweroff_channel") self.args["cpu_affinity"] = common.get_leaf_tag_map( self.launch_info, "cpu_affinity", "pcpu_id") # get default cpu_affinity from scenario file scenario_cpu_aff = common.get_leaf_tag_map(self.scenario_info, "cpu_affinity", "pcpu_id") for vm_id, cpu_ids in self.args["cpu_affinity"].items(): cpu_ids = [x for x in cpu_ids if x is not None] if cpu_ids: continue self.args["cpu_affinity"][vm_id] = scenario_cpu_aff[vm_id] self.args["shm_enabled"] = common.get_hv_item_tag( self.scenario_info, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") self.args["shm_regions"] = common.get_leaf_tag_map( self.launch_info, "shm_regions", "shm_region") for vmid, shm_regions in self.args["shm_regions"].items(): if self.args["shm_enabled"] == 'y': self.args["shm_regions"][vmid] = [ x for x in shm_regions if (x is not None and x.strip != '') ] else: self.args["shm_regions"][vmid] = [] self.args["xhci"] = common.get_leaf_tag_map(self.launch_info, "usb_xhci") self.args["communication_vuarts"] = common.get_leaf_tag_map( self.launch_info, "communication_vuarts", "communication_vuart") self.args["console_vuart"] = common.get_leaf_tag_map( self.launch_info, "console_vuart") self.args["enable_ptm"] = common.get_leaf_tag_map( self.launch_info, "enable_ptm") self.args["allow_trigger_s5"] = common.get_leaf_tag_map( self.launch_info, "allow_trigger_s5")
def get_info(self): self.max_emu_mmio_regions = common.get_hv_item_tag(self.hv_file, "CAPACITIES", "MAX_EMULATED_MMIO") self.max_pt_irq_entries = common.get_hv_item_tag(self.hv_file, "CAPACITIES", "MAX_PT_IRQ_ENTRIES") self.max_ioapic_num = common.get_hv_item_tag(self.hv_file, "CAPACITIES", "MAX_IOAPIC_NUM") self.max_ioapic_lines = common.get_hv_item_tag(self.hv_file, "CAPACITIES", "MAX_IOAPIC_LINES") self.max_ir_entries = common.get_hv_item_tag(self.hv_file, "CAPACITIES", "MAX_IR_ENTRIES") self.iommu_bus_num = common.get_hv_item_tag(self.hv_file, "CAPACITIES", "IOMMU_BUS_NUM") self.max_pci_dev_num = common.get_hv_item_tag(self.hv_file, "CAPACITIES", "MAX_PCI_DEV_NUM") self.max_msix_table_num = common.get_hv_item_tag(self.hv_file, "CAPACITIES", "MAX_MSIX_TABLE_NUM")
def write_shmem_regions(config): raw_shmem_regions = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") shmem_regions = [] shmem_dev_num = 0 for raw_shm in raw_shmem_regions: if raw_shm is None or raw_shm.strip() == '': continue raw_shm_splited = raw_shm.split(',') if len(raw_shm_splited) == 3 and raw_shm_splited[0].strip() != '' \ and raw_shm_splited[1].strip() != '' and len(raw_shm_splited[2].strip().split(':')) >= 1: shmem_regions.append((raw_shm_splited[0].strip(), raw_shm_splited[1].strip(), raw_shm_splited[2].strip().split(':'))) shmem_dev_num += len(raw_shm_splited[2].strip().split(':')) if len(shmem_regions) > 0: shmem_cnt = 0 print("", file=config) for shmem_region in shmem_regions: print("#define IVSHMEM_SHM_REGION_%d\t"%shmem_cnt, end="", file=config) print('"{}"'.format(shmem_region[0]), file=config) shmem_cnt += 1 print("", file=config) print("/*", file=config) print(" * The IVSHMEM_SHM_SIZE is the sum of all memory regions.", file=config) print(" * The size range of each memory region is [2MB, 512MB] and is a power of 2.", file=config) print(" */", file=config) total_shm_size = 0 if len(shmem_regions) > 0: for shmem_region in shmem_regions: int_size = 0 size = shmem_region[1] try: int_size = int(size) * 0x100000 except Exception as e: print('the format of shm size error: ', str(e)) total_shm_size += int_size print("#define IVSHMEM_SHM_SIZE\t{}UL".format(hex(total_shm_size)), file=config) print("#define IVSHMEM_DEV_NUM\t\t{}UL".format(shmem_dev_num), file=config) print("", file=config) print("/* All user defined memory regions */", file=config) if len(shmem_regions) == 0: print("#define IVSHMEM_SHM_REGIONS", file=config) else: print("#define IVSHMEM_SHM_REGIONS \\", file=config) shmem_cnt = 0 for shmem in shmem_regions: print("\t{ \\", file=config) print('\t\t.name = IVSHMEM_SHM_REGION_{}, \\'.format(shmem_cnt), file=config) try: int_size = int(shmem[1]) * 0x100000 except: int_size = 0 print('\t\t.size = {}UL,\t\t/* {}M */ \\'.format(hex(int_size), shmem[1]), file=config) if shmem_cnt < len(shmem_regions) - 1: print("\t}, \\", file=config) else: print("\t},", file=config) shmem_cnt += 1 print("", file=config)
def is_rdt_enabled(): """ Returns True if RDT enabled else False """ rdt_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "RDT", "RDT_ENABLED") if is_rdt_supported() and rdt_enabled == 'y': return True return False
def pci_dev_num_per_vm_gen(config): pci_items = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "pci_devs", "pci_dev") pci_devs = scenario_cfg_lib.get_pt_pci_devs(pci_items) pt_pci_num = scenario_cfg_lib.get_pt_pci_num(pci_devs) ivshmem_region = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") shmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") shmem_regions = scenario_cfg_lib.get_shmem_regions(ivshmem_region) shmem_num = scenario_cfg_lib.get_shmem_num(shmem_regions) vuarts = common.get_vuart_info(common.SCENARIO_INFO_FILE) pci_vuarts_num = scenario_cfg_lib.get_pci_vuart_num(vuarts) for vm_i, vm_type in common.VM_TYPES.items(): num = 0 if "POST_LAUNCHED_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']: shmem_num_i = 0 pci_vuart_num = pci_vuarts_num[vm_i] if shmem_enabled == 'y' and vm_i in shmem_num.keys(): shmem_num_i = shmem_num[vm_i] num = shmem_num_i + pci_vuart_num elif "PRE_LAUNCHED_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']: shmem_num_i = 0 if shmem_enabled == 'y' and vm_i in shmem_num.keys(): shmem_num_i = shmem_num[vm_i] num = pt_pci_num[vm_i] + shmem_num_i + pci_vuarts_num[vm_i] if pt_pci_num[vm_i] > 0 or shmem_num_i > 0 or pci_vuarts_num[ vm_i] > 0: # if there is passthrough device or ivshmem, vhostbridge is needed num += 1 elif "SOS_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']: continue if num > 0: print("#define VM{}_CONFIG_PCI_DEV_NUM\t{}U".format(vm_i, num), file=config) print("", file=config)
def get_info(self): self.stack_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "STACK_SIZE") self.low_ram_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "LOW_RAM_SIZE") self.hv_ram_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "HV_RAM_SIZE") self.hv_ram_start = common.get_hv_item_tag(self.hv_file, "MEMORY", "HV_RAM_START") self.platform_ram_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "PLATFORM_RAM_SIZE") self.sos_ram_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "SOS_RAM_SIZE") self.uos_ram_size = common.get_hv_item_tag(self.hv_file, "MEMORY", "UOS_RAM_SIZE")
def is_cdp_enabled(): """ Returns True if platform supports RDT/CDP else False """ rdt_enabled = is_rdt_enabled() cdp_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "RDT", "CDP_ENABLED") if rdt_enabled and cdp_enabled == 'y': return True return False
def set_ivshmem(self, ivshmem_regions): """ set ivshmem regions for VMs. :param ivshmem_regions: :return: """ self.raw_shmem_regions = ivshmem_regions self.shmem_enabled = common.get_hv_item_tag(self.scenario_info, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") self.shmem_regions = scenario_cfg_lib.get_shmem_regions(ivshmem_regions) self.shmem_num = scenario_cfg_lib.get_shmem_num(self.shmem_regions)
def get_pci_dev_num_per_vm(): pci_dev_num_per_vm = {} pci_items = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "pci_devs", "pci_dev") pci_devs = get_pt_pci_devs(pci_items) pt_pci_num = get_pt_pci_num(pci_devs) ivshmem_region = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") shmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") shmem_regions = get_shmem_regions(ivshmem_region) shmem_num = get_shmem_num(shmem_regions) vuarts = common.get_vuart_info(common.SCENARIO_INFO_FILE) vuarts_num = get_pci_vuart_num(vuarts) for vm_i,load_order in common.LOAD_ORDER.items(): if "POST_LAUNCHED_VM" == load_order: shmem_num_i = 0 vuart_num = vuarts_num[vm_i] if shmem_enabled == 'y' and vm_i in shmem_num.keys(): shmem_num_i = shmem_num[vm_i] pci_dev_num_per_vm[vm_i] = shmem_num_i + vuart_num elif "PRE_LAUNCHED_VM" == load_order: shmem_num_i = 0 if shmem_enabled == 'y' and vm_i in shmem_num.keys(): shmem_num_i = shmem_num[vm_i] pci_dev_num_per_vm[vm_i] = pt_pci_num[vm_i] + shmem_num_i + vuarts_num[vm_i] elif "SERVICE_VM" == load_order: shmem_num_i = 0 if shmem_enabled == 'y' and vm_i in shmem_num.keys(): shmem_num_i = shmem_num[vm_i] pci_dev_num_per_vm[vm_i] = shmem_num_i + vuarts_num[vm_i] return pci_dev_num_per_vm
def get_info(self): self.multiboot2 = common.get_hv_item_tag(self.hv_file, "FEATURES", "MULTIBOOT2") self.scheduler = common.get_hv_item_tag(self.hv_file, "FEATURES", "SCHEDULER") self.reloc = common.get_hv_item_tag(self.hv_file, "FEATURES", "RELOC") self.hyperv_enabled = common.get_hv_item_tag(self.hv_file, "FEATURES", "HYPERV_ENABLED") self.acpi_parse_enabled = common.get_hv_item_tag( self.hv_file, "FEATURES", "ACPI_PARSE_ENABLED") self.l1d_flush_vmentry_enabled = common.get_hv_item_tag( self.hv_file, "FEATURES", "L1D_VMENTRY_ENABLED") self.mce_on_psc_workaround_disabled = common.get_hv_item_tag( self.hv_file, "FEATURES", "MCE_ON_PSC_DISABLED") self.iommu_enforce_snp = common.get_hv_item_tag( self.hv_file, "FEATURES", "IOMMU_ENFORCE_SNP")
def parse_mem(): raw_shmem_regions = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") global USED_RAM_RANGE for shm_name, shm_bar_dic in PCI_DEV_BAR_DESC.shm_bar_dic.items(): if 0 in shm_bar_dic.keys() and int(shm_bar_dic[0].addr, 16) in USED_RAM_RANGE.keys(): del USED_RAM_RANGE[int(shm_bar_dic[0].addr, 16)] if 2 in shm_bar_dic.keys() and int(shm_bar_dic[2].addr, 16) - 0xC in USED_RAM_RANGE.keys(): del USED_RAM_RANGE[int(shm_bar_dic[2].addr, 16) - 0xC] idx = 0 for shm in raw_shmem_regions: if shm is None or shm.strip() == '': continue shm_splited = shm.split(',') name = shm_splited[0].strip() size = shm_splited[1].strip() try: int_size = int(size) * 0x100000 except: int_size = 0 ram_range = get_ram_range() tmp_bar_dict = {} hv_start_offset = 0x80000000 ret_start_addr = find_avl_memory(ram_range, str(0x200100), hv_start_offset) bar_mem_0 = Bar_Mem() bar_mem_0.addr = hex(common.round_up(int(ret_start_addr, 16), 0x200000)) USED_RAM_RANGE[int(bar_mem_0.addr, 16)] = 0x100 tmp_bar_dict[0] = bar_mem_0 ram_range = get_ram_range() hv_start_offset2 = 0x100000000 ret_start_addr2 = find_avl_memory(ram_range, str(int_size + 0x200000), hv_start_offset2) bar_mem_2 = Bar_Mem() bar_mem_2.addr = hex( common.round_up(int(ret_start_addr2, 16), 0x200000) + 0xC) USED_RAM_RANGE[common.round_up(int(ret_start_addr2, 16), 0x20000)] = int_size tmp_bar_dict[2] = bar_mem_2 PCI_DEV_BAR_DESC.shm_bar_dic[str(idx) + '_' + name] = tmp_bar_dict idx += 1
def populate_clos_mask_msr(rdt_res, common_clos_max, config): """ Populate the clos bitmask and msr index for a given RDT resource :param rdt_res: it is a string representing the RDT resource :param common_clos_max: Least common clos supported by all RDT resource :param config: it is a file pointer of board information for writing to """ cat_mask_list = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "RDT", "CLOS_MASK") cat_max_mask_settings_len = len(cat_mask_list) for idx in range(common_clos_max): print("\t{", file=config) if idx < cat_max_mask_settings_len: print("\t\t.clos_mask = {0}U,".format(cat_mask_list[idx]), file=config) else: print("\t\t.clos_mask = 0xffU,", file=config) print("\t\t.msr_index = MSR_IA32_{0}_MASK_BASE + {1},".format( rdt_res, idx), file=config) print("\t},", file=config)
def parser_hv_console(): """ There may be 3 types in the console item 1. BDF:(00:18.2) seri:/dev/ttyS2 2. /dev/ttyS2 3. ttyS2 """ ttys_n = '' err_dic = {} ttys = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "DEBUG_OPTIONS", "SERIAL_CONSOLE") if not ttys or ttys == None: return (err_dic, ttys_n) if ttys and 'BDF' in ttys or '/dev' in ttys: ttys_n = ttys.split('/')[2] else: ttys_n = ttys return (err_dic, ttys_n)
def get_args(self): self.args["uos_type"] = common.get_leaf_tag_map( self.launch_info, "uos_type") self.args["rtos_type"] = common.get_leaf_tag_map( self.launch_info, "rtos_type") self.args["mem_size"] = common.get_leaf_tag_map( self.launch_info, "mem_size") self.args["gvt_args"] = common.get_leaf_tag_map( self.launch_info, "gvt_args") self.args["vbootloader"] = common.get_leaf_tag_map( self.launch_info, "vbootloader") self.args["vuart0"] = common.get_leaf_tag_map(self.launch_info, "vuart0") self.args["cpu_sharing"] = common.get_hv_item_tag( self.scenario_info, "FEATURES", "SCHEDULER") self.args["pm_channel"] = common.get_leaf_tag_map( self.launch_info, "poweroff_channel") self.args["cpu_affinity"] = common.get_leaf_tag_map( self.launch_info, "cpu_affinity", "pcpu_id") self.args["xhci"] = common.get_leaf_tag_map(self.launch_info, "usb_xhci")
def generate_file(config): """ Start to generate board.c :param config: it is a file pointer of board information for writing to """ board_cfg_lib.get_valid_irq(common.BOARD_INFO_FILE) # get the vuart0/vuart1 which user chosed from scenario.xml of board_private section (err_dic, ttys_n) = board_cfg_lib.parser_hv_console() if err_dic: return err_dic # parse sos_bootargs/rootfs/console (err_dic, sos_cmdlines, sos_rootfs, vuart0_dic, vuart1_dic) = parse_boot_info() if err_dic: return err_dic if vuart0_dic: # parse to get poart/base of vuart0/vuart1 vuart0_port_base = board_cfg_lib.LEGACY_TTYS[list(vuart0_dic.keys())[0]] vuart0_irq = vuart0_dic[list(vuart0_dic.keys())[0]] vuart1_port_base = board_cfg_lib.LEGACY_TTYS[list(vuart1_dic.keys())[0]] vuart1_irq = vuart1_dic[list(vuart1_dic.keys())[0]] # parse the setting ttys vuatx dic: {vmid:base/irq} vuart0_setting = Vuart() vuart1_setting = Vuart() vuart0_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 0) vuart1_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 1) # sos command lines information sos_cmdlines = [i for i in sos_cmdlines[0].split() if i != ''] # get native rootfs list from board_info.xml (root_devs, root_dev_num) = board_cfg_lib.get_rootfs(common.BOARD_INFO_FILE) # start to generate misc_cfg.h print("{0}".format(board_cfg_lib.HEADER_LICENSE), file=config) print("{}".format(MISC_CFG_HEADER), file=config) print("", file=config) # define rootfs with macro #for i in range(root_dev_num): # print('#define ROOTFS_{}\t\t"root={} "'.format(i, root_devs[i]), file=config) # sos rootfs and console if "SOS_VM" in common.VM_TYPES.values(): print('#define SOS_ROOTFS\t\t"root={} "'.format(sos_rootfs[0]), file=config) if ttys_n: print('#define SOS_CONSOLE\t\t"console={} "'.format(ttys_n), file=config) else: print('#define SOS_CONSOLE\t\t" "', file=config) # sos com base/irq i_type = 0 for vm_i,vm_type in common.VM_TYPES.items(): if vm_type == "SOS_VM": i_type = vm_i break if "SOS_VM" in common.VM_TYPES.values(): if vuart0_dic: print("#define SOS_COM1_BASE\t\t{}U".format(vuart0_port_base), file=config) print("#define SOS_COM1_IRQ\t\t{}U".format(vuart0_irq), file=config) else: print("#define SOS_COM1_BASE\t\t0U", file=config) print("#define SOS_COM1_IRQ\t\t0U", file=config) if vuart1_setting[i_type]['base'] != "INVALID_COM_BASE": print("#define SOS_COM2_BASE\t\t{}U".format(vuart1_port_base), file=config) print("#define SOS_COM2_IRQ\t\t{}U".format(vuart1_irq), file=config) # sos boot command line print("", file=config) if "SOS_VM" in common.VM_TYPES.values(): sos_bootarg_diff(sos_cmdlines, config) print("", file=config) if board_cfg_lib.is_rdt_supported(): print("", file=config) common_clos_max = board_cfg_lib.get_common_clos_max() max_cache_clos_entries = common_clos_max if board_cfg_lib.is_cdp_enabled(): max_cache_clos_entries = 2 * common_clos_max print("#define MAX_CACHE_CLOS_NUM_ENTRIES\t{}U".format(max_cache_clos_entries), file=config) (rdt_resources, rdt_res_clos_max, _) = board_cfg_lib.clos_info_parser(common.BOARD_INFO_FILE) cat_mask_list = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "RDT", "CLOS_MASK") mba_delay_list = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "RDT", "MBA_DELAY") idx = 0 for mba_delay_mask in mba_delay_list: print("#define MBA_MASK_{}\t\t\t{}U".format(idx, mba_delay_mask), file=config) idx += 1 idx = 0 for cat_mask in cat_mask_list: print("#define CLOS_MASK_{}\t\t\t{}U".format(idx, cat_mask), file=config) idx += 1 print("", file=config) vm0_pre_launch = False common.get_vm_types() for vm_idx,vm_type in common.VM_TYPES.items(): if vm_idx == 0 and scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "PRE_LAUNCHED_VM": vm0_pre_launch = True if vm0_pre_launch and board_cfg_lib.is_tpm_passthru(): print("#define VM0_PASSTHROUGH_TPM", file=config) print("#define VM0_TPM_BUFFER_BASE_ADDR 0xFED40000UL", file=config) print("#define VM0_TPM_BUFFER_SIZE 0x5000UL", file=config) print("", file=config) print("{}".format(MISC_CFG_END), file=config) return err_dic
def gen_rdt_res(config): """ Get RDT resource (L2, L3, MBA) information :param config: it is a file pointer of board information for writing to """ err_dic = {} rdt_res_str ="" res_present = [0, 0, 0] (rdt_resources, rdt_res_clos_max, _) = board_cfg_lib.clos_info_parser(common.BOARD_INFO_FILE) common_clos_max = board_cfg_lib.get_common_clos_max() cat_mask_list = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "RDT", "CLOS_MASK") mba_delay_list = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "RDT", "MBA_DELAY") # TODO: Since use the MAX_PLATFORM_CLOS_NUM for L2/L3/MBA, so use the minimal number of them common_clos_max = min(len(cat_mask_list), len(mba_delay_list)) if common_clos_max > MSR_IA32_L2_MASK_END - MSR_IA32_L2_MASK_BASE or\ common_clos_max > MSR_IA32_L3_MASK_END - MSR_IA32_L3_MASK_BASE: err_dic["board config: generate board.c failed"] = "CLOS MAX should be less than reserved adress region length of L2/L3 cache" return err_dic print("\n#ifdef CONFIG_RDT_ENABLED", file=config) if len(rdt_resources) == 0 or common_clos_max == 0: print("struct platform_clos_info platform_{0}_clos_array[MAX_PLATFORM_CLOS_NUM];".format("l2"), file=config) print("struct platform_clos_info platform_{0}_clos_array[MAX_PLATFORM_CLOS_NUM];".format("l3"), file=config) print("struct platform_clos_info platform_{0}_clos_array[MAX_PLATFORM_CLOS_NUM];".format("mba"), file=config) else: for idx, rdt_res in enumerate(rdt_resources): if rdt_res == "L2": rdt_res_str = "l2" print("struct platform_clos_info platform_{0}_clos_array[{1}] = {{".format(rdt_res_str, "MAX_PLATFORM_CLOS_NUM"), file=config) populate_clos_mask_msr(rdt_res, cat_mask_list, config) print("};\n", file=config) res_present[RDT.L2.value] = 1 elif rdt_res == "L3": rdt_res_str = "l3" print("struct platform_clos_info platform_{0}_clos_array[{1}] = {{".format(rdt_res_str, "MAX_PLATFORM_CLOS_NUM"), file=config) populate_clos_mask_msr(rdt_res, cat_mask_list, config) print("};\n", file=config) res_present[RDT.L3.value] = 1 elif rdt_res == "MBA": rdt_res_str = "mba" print("struct platform_clos_info platform_{0}_clos_array[{1}] = {{".format(rdt_res_str, "MAX_PLATFORM_CLOS_NUM"), file=config) err_dic = populate_mba_delay_mask(rdt_res, mba_delay_list, config) print("};\n", file=config) res_present[RDT.MBA.value] = 1 else: err_dic['board config: generate board.c failed'] = "The input of {} was corrupted!".format(common.BOARD_INFO_FILE) return err_dic if res_present[RDT.L2.value] == 0: print("struct platform_clos_info platform_{0}_clos_array[{1}];".format("l2", "MAX_PLATFORM_CLOS_NUM"), file=config) if res_present[RDT.L3.value] == 0: print("struct platform_clos_info platform_{0}_clos_array[{1}];".format("l3", "MAX_PLATFORM_CLOS_NUM"), file=config) if res_present[RDT.MBA.value] == 0: print("struct platform_clos_info platform_{0}_clos_array[{1}];".format("mba", "MAX_PLATFORM_CLOS_NUM"), file=config) print("#endif", file=config) print("", file=config) return err_dic
def write_shmem_regions(config): raw_shmem_regions = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") shmem_regions = [] shmem_dev_num = 0 for raw_shm in raw_shmem_regions: if raw_shm is None or raw_shm.strip() == '': continue raw_shm_splited = raw_shm.split(',') if len(raw_shm_splited) == 3 and raw_shm_splited[0].strip() != '' \ and raw_shm_splited[1].strip() != '' and len(raw_shm_splited[2].strip().split(':')) >= 1: shmem_regions.append( (raw_shm_splited[0].strip(), raw_shm_splited[1].strip(), raw_shm_splited[2].strip().split(':'))) shmem_dev_num += len(raw_shm_splited[2].strip().split(':')) print("", file=config) print("/*", file=config) print(" * The IVSHMEM_SHM_SIZE is the sum of all memory regions.", file=config) print( " * The size range of each memory region is [2M, 1G) and is a power of 2.", file=config) print(" */", file=config) total_shm_size = 0 if len(shmem_regions) > 0: for shmem_region in shmem_regions: int_size = 0 size = shmem_region[1] try: if size.isdecimal(): int_size = int(size) else: int_size = int(size, 16) except Exception as e: print('the format of shm size error: ', str(e)) total_shm_size += int_size print("#define IVSHMEM_SHM_SIZE\t{}UL".format(hex(total_shm_size)), file=config) print("#define IVSHMEM_DEV_NUM\t\t{}UL".format(shmem_dev_num), file=config) print("", file=config) print("/* All user defined memory regions */", file=config) print("\nstruct ivshmem_shm_region mem_regions[] = {", file=config) shmem_cnt = 0 for shmem in shmem_regions: print("\t{", file=config) print('\t\t.name = IVSHMEM_SHM_REGION_{},'.format(shmem_cnt), file=config) try: if shmem[1].isdecimal(): int_m_size = int(int(shmem[1]) / 0x100000) else: int_m_size = int(int(shmem[1], 16) / 0x100000) except: int_m_size = 0 print('\t\t.size = {}UL,\t\t/* {}M */'.format(shmem[1], int_m_size), file=config) print("\t},", file=config) shmem_cnt += 1 print("};", file=config) print("", file=config)
def generate_file(config): """ Start to generate board.c :param config: it is a file pointer of board information for writing to """ board_cfg_lib.get_valid_irq(common.BOARD_INFO_FILE) # get the vuart0/vuart1 which user chosed from scenario.xml of board_private section (err_dic, ttys_n) = board_cfg_lib.parser_hv_console() if err_dic: return err_dic # parse sos_bootargs/rootfs/console (err_dic, sos_cmdlines, sos_rootfs, vuart0_dic, vuart1_dic) = parse_boot_info() if err_dic: return err_dic if vuart0_dic: # parse to get poart/base of vuart0/vuart1 vuart0_port_base = board_cfg_lib.LEGACY_TTYS[list( vuart0_dic.keys())[0]] vuart0_irq = vuart0_dic[list(vuart0_dic.keys())[0]] vuart1_port_base = board_cfg_lib.LEGACY_TTYS[list(vuart1_dic.keys())[0]] vuart1_irq = vuart1_dic[list(vuart1_dic.keys())[0]] # parse the setting ttys vuatx dic: {vmid:base/irq} vuart0_setting = Vuart() vuart1_setting = Vuart() vuart0_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 0) vuart1_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 1) # sos command lines information sos_cmdlines = [i for i in sos_cmdlines[0].split() if i != ''] # add maxcpus parameter into sos cmdlines if there are pre-launched VMs pcpu_list = board_cfg_lib.get_processor_info() cpu_affinity = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "cpu_affinity", "pcpu_id") pre_cpu_list = [] sos_cpu_num = 0 for vmid, cpu_list in cpu_affinity.items(): if vmid in common.VM_TYPES and cpu_list != [None]: vm_type = common.VM_TYPES[vmid] load_type = '' if vm_type in scenario_cfg_lib.VM_DB: load_type = scenario_cfg_lib.VM_DB[vm_type]['load_type'] if load_type == "PRE_LAUNCHED_VM": pre_cpu_list += cpu_list elif load_type == "SOS_VM": sos_cpu_num += len(cpu_list) if sos_cpu_num == 0: sos_cpu_num_max = len(list(set(pcpu_list) - set(pre_cpu_list))) else: sos_cpu_num_max = sos_cpu_num if sos_cpu_num_max > 0: sos_cmdlines.append('maxcpus=' + str(sos_cpu_num_max)) # get native rootfs list from board_info.xml (root_devs, root_dev_num) = board_cfg_lib.get_rootfs(common.BOARD_INFO_FILE) # start to generate misc_cfg.h print("{0}".format(board_cfg_lib.HEADER_LICENSE), file=config) print("{}".format(MISC_CFG_HEADER), file=config) print("", file=config) # define rootfs with macro #for i in range(root_dev_num): # print('#define ROOTFS_{}\t\t"root={} "'.format(i, root_devs[i]), file=config) # sos rootfs and console if "SOS_VM" in common.VM_TYPES.values(): print('#define SOS_ROOTFS\t\t"root={} "'.format(sos_rootfs[0]), file=config) if ttys_n: print('#define SOS_CONSOLE\t\t"console={} "'.format(ttys_n), file=config) else: print('#define SOS_CONSOLE\t\t" "', file=config) # sos com base/irq i_type = 0 for vm_i, vm_type in common.VM_TYPES.items(): if vm_type == "SOS_VM": i_type = vm_i break if "SOS_VM" in common.VM_TYPES.values(): if vuart0_dic: print("#define SOS_COM1_BASE\t\t{}U".format(vuart0_port_base), file=config) print("#define SOS_COM1_IRQ\t\t{}U".format(vuart0_irq), file=config) else: print("#define SOS_COM1_BASE\t\t0U", file=config) print("#define SOS_COM1_IRQ\t\t0U", file=config) if vuart1_setting[i_type]['base'] != "INVALID_COM_BASE": print("#define SOS_COM2_BASE\t\t{}U".format(vuart1_port_base), file=config) print("#define SOS_COM2_IRQ\t\t{}U".format(vuart1_irq), file=config) # sos boot command line print("", file=config) if "SOS_VM" in common.VM_TYPES.values(): sos_bootarg_diff(sos_cmdlines, config) print("", file=config) cpu_affinity_per_vm_gen(config) common_clos_max = board_cfg_lib.get_common_clos_max() max_mba_clos_entries = common_clos_max max_cache_clos_entries = common_clos_max comments_max_clos = ''' /* * The maximum CLOS that is allowed by ACRN hypervisor, * its value is set to be least common Max CLOS (CPUID.(EAX=0x10,ECX=ResID):EDX[15:0]) * among all supported RDT resources in the platform. In other words, it is * min(maximum CLOS of L2, L3 and MBA). This is done in order to have consistent * CLOS allocations between all the RDT resources. */''' comments_max_mba_clos = ''' /* * Max number of Cache Mask entries corresponding to each CLOS. * This can vary if CDP is enabled vs disabled, as each CLOS entry * will have corresponding cache mask values for Data and Code when * CDP is enabled. */''' comments_max_cache_clos = ''' /* Max number of MBA delay entries corresponding to each CLOS. */''' if board_cfg_lib.is_cdp_enabled(): max_cache_clos_entries_cdp_enable = 2 * common_clos_max (res_info, rdt_res_clos_max, clos_max_mask_list) = board_cfg_lib.clos_info_parser( common.BOARD_INFO_FILE) common_clos_max_cdp_disable = min(rdt_res_clos_max) print("#ifdef CONFIG_RDT_ENABLED", file=config) print("#ifdef CONFIG_CDP_ENABLED", file=config) print(comments_max_clos, file=config) print("#define HV_SUPPORTED_MAX_CLOS\t{}U".format(common_clos_max), file=config) print(comments_max_cache_clos, file=config) print("#define MAX_CACHE_CLOS_NUM_ENTRIES\t{}U".format( max_cache_clos_entries_cdp_enable), file=config) print("#else", file=config) print(comments_max_clos, file=config) print("#define HV_SUPPORTED_MAX_CLOS\t{}U".format( common_clos_max_cdp_disable), file=config) print(comments_max_cache_clos, file=config) print("#define MAX_CACHE_CLOS_NUM_ENTRIES\t{}U".format( max_cache_clos_entries), file=config) print("#endif", file=config) print(comments_max_mba_clos, file=config) print("#define MAX_MBA_CLOS_NUM_ENTRIES\t{}U".format( max_mba_clos_entries), file=config) else: print("#ifdef CONFIG_RDT_ENABLED", file=config) print(comments_max_clos, file=config) print("#define HV_SUPPORTED_MAX_CLOS\t{}U".format(common_clos_max), file=config) print(comments_max_mba_clos, file=config) print("#define MAX_MBA_CLOS_NUM_ENTRIES\t{}U".format( max_mba_clos_entries), file=config) print(comments_max_cache_clos, file=config) print("#define MAX_CACHE_CLOS_NUM_ENTRIES\t{}U".format( max_cache_clos_entries), file=config) if not board_cfg_lib.is_rdt_supported(): print("#endif", file=config) print("", file=config) if board_cfg_lib.is_rdt_supported(): (rdt_resources, rdt_res_clos_max, _) = board_cfg_lib.clos_info_parser(common.BOARD_INFO_FILE) cat_mask_list = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "RDT", "CLOS_MASK") mba_delay_list = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "RDT", "MBA_DELAY") idx = 0 for mba_delay_mask in mba_delay_list: print("#define MBA_MASK_{}\t\t\t{}U".format(idx, mba_delay_mask), file=config) idx += 1 idx = 0 for cat_mask in cat_mask_list: print("#define CLOS_MASK_{}\t\t\t{}U".format(idx, cat_mask), file=config) idx += 1 print("", file=config) clos_per_vm_gen(config) print("#endif", file=config) print("", file=config) vm0_pre_launch = False common.get_vm_types() for vm_idx, vm_type in common.VM_TYPES.items(): if vm_idx == 0 and scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "PRE_LAUNCHED_VM": vm0_pre_launch = True if vm0_pre_launch and board_cfg_lib.is_tpm_passthru(): tpm2_passthru_enabled = common.get_leaf_tag_map_bool( common.SCENARIO_INFO_FILE, "mmio_resources", "TPM2") if 0 in tpm2_passthru_enabled and tpm2_passthru_enabled[0]: print("#define VM0_PASSTHROUGH_TPM", file=config) print("#define VM0_TPM_BUFFER_BASE_ADDR 0xFED40000UL", file=config) gpa = common.hpa2gpa(0, 0xFED40000, 0x5000) print( "#define VM0_TPM_BUFFER_BASE_ADDR_GPA 0x{:X}UL".format(gpa), file=config) print("#define VM0_TPM_BUFFER_SIZE 0x5000UL", file=config) print("", file=config) pci_dev_num_per_vm_gen(config) boot_args_per_vm_gen(config) pt_intx_num_vm0_gen(config) swsram_base_gpa_gen(config) print("{}".format(MISC_CFG_END), file=config) return err_dic