def get_info(self): """ Get all items which belong to this class :return: None """ self.v0_vuart = common.get_vuart_info_id(self.scenario_info, 0) self.v1_vuart = common.get_vuart_info_id(self.scenario_info, 1) self.pci_vuarts = common.get_vuart_info(self.scenario_info)
def check_console_vuart(launch_console_vuart, vuart0, scenario_info): vuarts = common.get_vuart_info(scenario_info) for uos_id, console_vuart_enable in launch_console_vuart.items(): key = 'uos:id={},console_vuart'.format(uos_id) if console_vuart_enable == "Enable" and vuart0[uos_id] == "Enable": ERR_LIST[key] = "vuart0 and console_vuart of uos {} should not be enabled " \ "at the same time".format(uos_id) return if console_vuart_enable == "Enable" and int(uos_id) in vuarts.keys() \ and 0 in vuarts[uos_id] and vuarts[uos_id][0]['base'] == "INVALID_PCI_BASE": ERR_LIST[key] = "console_vuart of uos {} should be enabled in scenario setting".format(uos_id) return
def pci_dev_num_per_vm_gen(config): pci_items = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "pci_devs", "pci_dev") pci_devs = scenario_cfg_lib.get_pt_pci_devs(pci_items) pt_pci_num = scenario_cfg_lib.get_pt_pci_num(pci_devs) ivshmem_region = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") shmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") shmem_regions = scenario_cfg_lib.get_shmem_regions(ivshmem_region) shmem_num = scenario_cfg_lib.get_shmem_num(shmem_regions) vuarts = common.get_vuart_info(common.SCENARIO_INFO_FILE) pci_vuarts_num = scenario_cfg_lib.get_pci_vuart_num(vuarts) for vm_i, vm_type in common.VM_TYPES.items(): num = 0 if "POST_LAUNCHED_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']: shmem_num_i = 0 pci_vuart_num = pci_vuarts_num[vm_i] if shmem_enabled == 'y' and vm_i in shmem_num.keys(): shmem_num_i = shmem_num[vm_i] num = shmem_num_i + pci_vuart_num elif "PRE_LAUNCHED_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']: shmem_num_i = 0 if shmem_enabled == 'y' and vm_i in shmem_num.keys(): shmem_num_i = shmem_num[vm_i] num = pt_pci_num[vm_i] + shmem_num_i + pci_vuarts_num[vm_i] if pt_pci_num[vm_i] > 0 or shmem_num_i > 0 or pci_vuarts_num[ vm_i] > 0: # if there is passthrough device or ivshmem, vhostbridge is needed num += 1 elif "SOS_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']: continue if num > 0: print("#define VM{}_CONFIG_PCI_DEV_NUM\t{}U".format(vm_i, num), file=config) print("", file=config)
def check_communication_vuart(launch_communication_vuarts, scenario_info): vuarts = common.get_vuart_info(scenario_info) vuart1_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 1) for uos_id, vuart_list in launch_communication_vuarts.items(): vuart_key = 'uos:id={},communication_vuarts,communication_vuart'.format(uos_id) for vuart_id in vuart_list: if not vuart_id: return if int(vuart_id) not in vuarts[uos_id].keys(): ERR_LIST[vuart_key] = "communication_vuart {} of uos {} should be configured" \ "in scenario setting.".format(vuart_id, uos_id) return if int(vuart_id) == 1 and vuarts[uos_id][1]['base'] != "INVALID_PCI_BASE": if uos_id in vuart1_setting.keys() and vuart1_setting[uos_id]['base'] != "INVALID_COM_BASE": ERR_LIST[vuart_key] = "uos {}'s communication_vuart 1 and legacy_vuart 1 should " \ "not be configured at the same time.".format(uos_id) return
def get_pci_dev_num_per_vm(): pci_dev_num_per_vm = {} pci_items = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "pci_devs", "pci_dev") pci_devs = get_pt_pci_devs(pci_items) pt_pci_num = get_pt_pci_num(pci_devs) ivshmem_region = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") shmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") shmem_regions = get_shmem_regions(ivshmem_region) shmem_num = get_shmem_num(shmem_regions) vuarts = common.get_vuart_info(common.SCENARIO_INFO_FILE) vuarts_num = get_pci_vuart_num(vuarts) for vm_i,load_order in common.LOAD_ORDER.items(): if "POST_LAUNCHED_VM" == load_order: shmem_num_i = 0 vuart_num = vuarts_num[vm_i] if shmem_enabled == 'y' and vm_i in shmem_num.keys(): shmem_num_i = shmem_num[vm_i] pci_dev_num_per_vm[vm_i] = shmem_num_i + vuart_num elif "PRE_LAUNCHED_VM" == load_order: shmem_num_i = 0 if shmem_enabled == 'y' and vm_i in shmem_num.keys(): shmem_num_i = shmem_num[vm_i] pci_dev_num_per_vm[vm_i] = pt_pci_num[vm_i] + shmem_num_i + vuarts_num[vm_i] elif "SERVICE_VM" == load_order: shmem_num_i = 0 if shmem_enabled == 'y' and vm_i in shmem_num.keys(): shmem_num_i = shmem_num[vm_i] pci_dev_num_per_vm[vm_i] = shmem_num_i + vuarts_num[vm_i] return pci_dev_num_per_vm
def write_vuart_vbar(mmiolist_per_vm, sos_mmio_range, config): # get legacy vuart information vuart0_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 0) vuart1_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 1) # get pci vuart information vuarts = common.get_vuart_info(common.SCENARIO_INFO_FILE) for vm_id in vuarts.keys(): vm_type = common.VM_TYPES[vm_id] if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "POST_LAUNCHED_VM": continue for vuart_id in vuarts[vm_id].keys(): if vuarts[vm_id][vuart_id]['base'] == "INVALID_PCI_BASE": continue # Skip pci vuart 0 if the legacy vuart 0 is enabled if vuart_id == 0 and vm_id in vuart0_setting \ and vuart0_setting[vm_id]['base'] != "INVALID_COM_BASE": continue # Skip pci vuart 1 if the legacy vuart 1 is enabled if vuart_id == 1 and vm_id in vuart1_setting \ and vuart1_setting[vm_id]['base'] != "INVALID_COM_BASE": continue free_bar0 = [] free_bar1 = [] # vuart decice requires 2 bars if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SERVICE_VM": free_bar0 = get_free_mmio(sos_mmio_range, mmiolist_per_vm[vm_id], \ PCI_VUART_VBAR0_SIZE + PCI_VUART_VBAR0_ALIGNMENT) free_bar0_start_addr = common.round_up( free_bar0.start, PCI_VUART_VBAR0_ALIGNMENT) free_bar0_end_addr = free_bar0_start_addr + PCI_VUART_VBAR0_SIZE - 1 free_bar0 = MmioWindow(free_bar0_start_addr, free_bar0_end_addr) mmiolist_per_vm[vm_id].append(free_bar0) mmiolist_per_vm[vm_id].sort() free_bar1 = get_free_mmio(sos_mmio_range, mmiolist_per_vm[vm_id], \ PCI_VUART_VBAR1_SIZE + PCI_VUART_VBAR1_ALIGNMENT) free_bar1_start_addr = common.round_up( free_bar1.start, PCI_VUART_VBAR1_ALIGNMENT) free_bar1_end_addr = free_bar1_start_addr + PCI_VUART_VBAR1_SIZE - 1 free_bar1 = MmioWindow(free_bar1_start_addr, free_bar1_end_addr) mmiolist_per_vm[vm_id].append(free_bar1) mmiolist_per_vm[vm_id].sort() elif scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "PRE_LAUNCHED_VM": free_bar0 = get_free_mmio([MmioWindow(start=common.SIZE_2G, end=common.SIZE_4G-1)], \ mmiolist_per_vm[vm_id], PCI_VUART_VBAR0_SIZE + PCI_VUART_VBAR0_ALIGNMENT) free_bar0_start_addr = common.round_up( free_bar0.start, PCI_VUART_VBAR0_ALIGNMENT) free_bar0_end_addr = free_bar0_start_addr + PCI_VUART_VBAR0_SIZE - 1 free_bar0 = MmioWindow(free_bar0_start_addr, free_bar0_end_addr) mmiolist_per_vm[vm_id].append(free_bar0) mmiolist_per_vm[vm_id].sort() free_bar1 = get_free_mmio([MmioWindow(start=common.SIZE_2G, end=common.SIZE_4G-1)], \ mmiolist_per_vm[vm_id], PCI_VUART_VBAR1_SIZE + PCI_VUART_VBAR1_ALIGNMENT) free_bar1_start_addr = common.round_up( free_bar1.start, PCI_VUART_VBAR1_ALIGNMENT) free_bar1_end_addr = free_bar1_start_addr + PCI_VUART_VBAR1_SIZE - 1 free_bar1 = MmioWindow(free_bar1_start_addr, free_bar1_end_addr) mmiolist_per_vm[vm_id].append(free_bar1) mmiolist_per_vm[vm_id].sort() print("#define VM%s" % (str(vm_id) + "_VUART_%-28s") % (str(vuart_id) + "_VBAR"), " .vbar_base[0] = {:#x}UL, \\".format(free_bar0.start), file=config) print("{}.vbar_base[1] = {:#x}UL".format(' ' * 54, free_bar1.start), file=config) print("", file=config)
def generate_file(vm_info, config): """ Generate pci_dev.c for Pre-Launched VMs in a scenario. :param config: it is pointer for for file write to :return: None """ board_cfg_lib.parser_pci() board_cfg_lib.parse_mem() compared_bdf = [] sos_used_bdf = [] for cnt_sub_name in board_cfg_lib.SUB_NAME_COUNT.keys(): i_cnt = 0 for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items( ): if cnt_sub_name == bar_attr.name and bdf not in compared_bdf: compared_bdf.append(bdf) else: continue add_instance_to_name(i_cnt, bdf, bar_attr) i_cnt += 1 for bdf in compared_bdf: bdf_tuple = BusDevFunc.from_str(bdf) sos_used_bdf.append(bdf_tuple) bdf_tuple = BusDevFunc(bus=0, dev=1, func=0) sos_used_bdf.append(bdf_tuple) vuarts = common.get_vuart_info(common.SCENARIO_INFO_FILE) vuarts_num = scenario_cfg_lib.get_vuart_num(vuarts) pci_vuart_enabled = False for vm_i in common.VM_TYPES: if vuarts_num[vm_i] > 0: pci_vuart_enabled = True break print("{}".format(scenario_cfg_lib.HEADER_LICENSE), file=config) print("", file=config) print("#include <vm_config.h>", file=config) print("#include <pci_devices.h>", file=config) print("#include <vpci.h>", file=config) print("#include <vbar_base.h>", file=config) print("#include <mmu.h>", file=config) print("#include <page.h>", file=config) if pci_vuart_enabled: print("#include <vmcs9900.h>", file=config) # Insert header for share memory if vm_info.shmem.shmem_enabled == 'y': print("#include <ivshmem_cfg.h>", file=config) # Insert comments and macros for passthrough devices if any((p for _, p in vm_info.cfg_pci.pci_devs.items())): print("", file=config) print("/*", file=config) print( " * TODO: remove PTDEV macro and add DEV_PRIVINFO macro to initialize pbdf for", file=config) print( " * passthrough device configuration and shm_name for ivshmem device configuration.", file=config) print(" */", file=config) print("#define PTDEV(PCI_DEV)\t\tPCI_DEV, PCI_DEV##_VBAR", file=config) print("", file=config) print("/*", file=config) print( " * TODO: add DEV_PCICOMMON macro to initialize emu_type, vbdf and vdev_ops", file=config) print(" * to simplify the code.", file=config) print(" */", file=config) if pci_vuart_enabled: print("#define INVALID_PCI_BASE\t0U", file=config) for vm_i, vm_type in common.VM_TYPES.items(): vm_used_bdf = [] # Skip this vm if there is no any pci device and virtual device if not scenario_cfg_lib.get_pci_dev_num_per_vm()[vm_i] and \ scenario_cfg_lib.VM_DB[vm_type]['load_type'] != "SOS_VM": continue if not scenario_cfg_lib.get_pci_dev_num_per_vm()[vm_i] and \ scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM": print("", file=config) print("struct acrn_vm_pci_dev_config " + "sos_pci_devs[CONFIG_MAX_PCI_DEV_NUM];", file=config) continue pci_cnt = 1 # Insert device structure and bracket print("", file=config) if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM": print("struct acrn_vm_pci_dev_config " + "sos_pci_devs[CONFIG_MAX_PCI_DEV_NUM] = {", file=config) else: print("struct acrn_vm_pci_dev_config " + "vm{}_pci_devs[VM{}_CONFIG_PCI_DEV_NUM] = {{".format( vm_i, vm_i), file=config) # Insert passtrough devices data if vm_i in vm_info.cfg_pci.pci_devs.keys(): pci_bdf_devs_list = vm_info.cfg_pci.pci_devs[vm_i] if pci_bdf_devs_list: # Insert pci hostbridge for passtrough devices: if pci_cnt == 1: print("\t{", file=config) print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]), file=config) print( "\t\t.vbdf.bits = {.b = 0x00U, .d = 0x00U, .f = 0x00U},", file=config) print("\t\t.vdev_ops = &vhostbridge_ops,", file=config) print("\t},", file=config) bdf_tuple = BusDevFunc.from_str("00:00.0") vm_used_bdf.append(bdf_tuple) for pci_bdf_dev in pci_bdf_devs_list: if not pci_bdf_dev: continue bus = int(pci_bdf_dev.split(':')[0], 16) dev = int(pci_bdf_dev.split(':')[1].split('.')[0], 16) fun = int(pci_bdf_dev.split('.')[1], 16) print("\t{", file=config) print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[1]), file=config) print( "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02x}U, .f = 0x00U}}," .format(pci_cnt), file=config) for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items( ): if bdf == pci_bdf_dev: print("\t\tPTDEV({}),".format( board_cfg_lib.PCI_DEV_BAR_DESC. pci_dev_dic[bdf].name_w_i_cnt), file=config) else: continue print("\t},", file=config) bdf_tuple = BusDevFunc(0, pci_cnt, 0) vm_used_bdf.append(bdf_tuple) pci_cnt += 1 # Insert ivshmem information if vm_info.shmem.shmem_enabled == 'y' and vm_i in vm_info.shmem.shmem_regions.keys() \ and len(vm_info.shmem.shmem_regions[vm_i]) > 0: raw_shm_list = vm_info.shmem.shmem_regions[vm_i] for shm in raw_shm_list: shm_splited = shm.split(',') print("\t{", file=config) print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]), file=config) if vm_i in vm_info.cfg_pci.pci_devs.keys(): if scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "SOS_VM": free_bdf = find_unused_bdf(sos_used_bdf, "ivshmem") print("\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{:02x}U, .f = 0x{:02x}U}}," \ .format(free_bdf.dev,free_bdf.func), file=config) print("\t\t.vdev_ops = &vpci_ivshmem_ops,", file=config) sos_used_bdf.append(free_bdf) else: print( "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02x}U, .f = 0x00U}}," .format(pci_cnt), file=config) print("\t\t.vdev_ops = &vpci_ivshmem_ops,", file=config) bdf_tuple = BusDevFunc(0, pci_cnt, 0) vm_used_bdf.append(bdf_tuple) elif vm_i not in vm_info.cfg_pci.pci_devs.keys(): if scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "PRE_LAUNCHED_VM": print( "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02x}U, .f = 0x00U}}," .format(pci_cnt), file=config) bdf_tuple = BusDevFunc(0, pci_cnt, 0) vm_used_bdf.append(bdf_tuple) elif scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "POST_LAUNCHED_VM": print("\t\t.vbdf.value = UNASSIGNED_VBDF,", file=config) print("\t\t.vdev_ops = &vpci_ivshmem_ops,", file=config) for shm_name, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic.items( ): index = shm_name[:shm_name.find('_')] shm_name = shm_name[shm_name.find('_') + 1:] if shm_name == shm_splited[0].strip(): if scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "PRE_LAUNCHED_VM": print( "\t\t.shm_region_name = IVSHMEM_SHM_REGION_{}," .format(index), file=config) print("\t\tIVSHMEM_DEVICE_{}_VBAR".format(index), file=config) break elif scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "SOS_VM": print( "\t\t.shm_region_name = IVSHMEM_SHM_REGION_{}," .format(index), file=config) print( "\t\tSOS_IVSHMEM_DEVICE_{}_VBAR".format(index), file=config) break else: print( "\t\t.shm_region_name = IVSHMEM_SHM_REGION_{}". format(index), file=config) break pci_cnt += 1 print("\t},", file=config) if vm_i in vuarts.keys(): # get legacy vuart information vuart0_setting = common.get_vuart_info_id( common.SCENARIO_INFO_FILE, 0) vuart1_setting = common.get_vuart_info_id( common.SCENARIO_INFO_FILE, 1) for vuart_id in vuarts[vm_i].keys(): if vuarts[vm_i][vuart_id]['base'] == "INVALID_PCI_BASE": continue # skip pci vuart 0 for post-launched vm if vuart_id == 0 and scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "POST_LAUNCHED_VM": continue # Skip pci vuart 0 if the legacy vuart 0 is enabled if vuart_id == 0 and vm_i in vuart0_setting and vuart0_setting[ vm_i]['base'] != "INVALID_COM_BASE": continue # Skip pci vuart 1 if the legacy vuart 1 is enabled if vuart_id == 1 and vm_i in vuart1_setting and vuart1_setting[ vm_i]['base'] != "INVALID_COM_BASE": continue print("\t{", file=config) print("\t\t.vuart_idx = {:1d},".format(vuart_id), file=config) print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]), file=config) print("\t\t.vdev_ops = &vmcs9900_ops,", file=config) if vuart_id != 0 and scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "POST_LAUNCHED_VM": print("\t\t.vbar_base[0] = INVALID_PCI_BASE,", file=config) print("\t\t.vbdf.value = UNASSIGNED_VBDF,", file=config) if scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] != "POST_LAUNCHED_VM": print("\t\tVM{:1d}_VUART_{:1d}_VBAR,".format( vm_i, vuart_id), file=config) if scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "PRE_LAUNCHED_VM": free_bdf = find_unused_bdf(vm_used_bdf, "vuart") vm_used_bdf.append(free_bdf) elif scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "SOS_VM": free_bdf = find_unused_bdf(sos_used_bdf, "vuart") sos_used_bdf.append(free_bdf) print( "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{:02x}U, .f = 0x00U}}," .format(free_bdf.dev, free_bdf.func), file=config) if vuart_id != 0: print("\t\t.t_vuart.vm_id = {},".format( vuarts[vm_i][vuart_id]['target_vm_id']), file=config) print("\t\t.t_vuart.vuart_id = {},".format( vuarts[vm_i][vuart_id]['target_uart_id']), file=config) pci_cnt += 1 print("\t},", file=config) # Insert the end bracket of the pci_dev.c file print("};", file=config)