def declare_pci_devs(vm_info, config): for vm_i,vm_type in common.VM_TYPES.items(): if vm_type == "SOS_VM": print("extern struct acrn_vm_pci_dev_config " + "sos_pci_devs[CONFIG_MAX_PCI_DEV_NUM];", file=config) continue if scenario_cfg_lib.get_pci_dev_num_per_vm()[vm_i]: print("extern struct acrn_vm_pci_dev_config " + "vm{}_pci_devs[VM{}_CONFIG_PCI_DEV_NUM];".format(vm_i, vm_i), file=config) print("", file=config)
def gen_sos_vm(vm_type, vm_i, scenario_items, config): vm_info = scenario_items['vm'] (err_dic, sos_guest_flags) = get_guest_flag(vm_info.guest_flags[vm_i]) if err_dic: return err_dic print("\t{{\t/* VM{} */".format(vm_i), file=config) print("\t\tCONFIG_SOS_VM,", file=config) print('\t\t.name = "{0}",'.format(vm_info.name[vm_i]), file=config) print("", file=config) print("\t\t/* Allow SOS to reboot the host since " + "there is supposed to be the highest severity guest */", file=config) if sos_guest_flags: print("\t\t.guest_flags = {0},".format(sos_guest_flags), file=config) clos_output(scenario_items, vm_i, config) cpu_affinity_output(vm_info, vm_i, config) print("\t\t.memory = {", file=config) print("\t\t\t.start_hpa = {}UL,".format( vm_info.mem_info.mem_start_hpa[vm_i]), file=config) print("\t\t\t.size = {0},".format("CONFIG_SOS_RAM_SIZE"), file=config) print("\t\t},", file=config) print("\t\t.os_config = {", file=config) print('\t\t\t.name = "{0}",'.format(vm_info.os_cfg.kern_name[vm_i]), file=config) print('\t\t\t.kernel_type = {0},'.format(vm_info.os_cfg.kern_type[vm_i]), file=config) print('\t\t\t.kernel_mod_tag = "{0}",'.format( vm_info.os_cfg.kern_mod[vm_i]), file=config) print('\t\t\t.bootargs = {0},'.format(vm_info.os_cfg.kern_args[vm_i]), file=config) if (vm_info.os_cfg.ramdisk_mod[vm_i].strip()): print('\t\t\t.ramdisk_mod_tag = "{0}",'.format( vm_info.os_cfg.ramdisk_mod[vm_i]), file=config) print("\t\t},", file=config) # VUART err_dic = vuart_output(vm_type, vm_i, vm_info, config) if err_dic: return err_dic sos_dev_num = scenario_cfg_lib.get_pci_dev_num_per_vm()[vm_i] print("\t\t.pci_dev_num = {}U,".format(sos_dev_num), file=config) print("\t\t.pci_devs = sos_pci_devs,", file=config) print("\t},", file=config)
def gen_post_launch_vm(vm_type, vm_i, scenario_items, config): vm_info = scenario_items['vm'] post_vm_type = get_post_vm_type(vm_type, vm_i) print("\t{{\t/* VM{} */".format(vm_i), file=config) print("\t\t{},".format(post_vm_type), file=config) clos_output(scenario_items, vm_i, config) if scenario_cfg_lib.get_pci_dev_num_per_vm()[vm_i]: print("\t\t/* The PCI device configuration is only for in-hypervisor vPCI devices. */", file=config) print("\t\t.pci_dev_num = VM{}_CONFIG_PCI_DEV_NUM,".format(vm_i), file=config) print("\t\t.pci_devs = vm{}_pci_devs,".format(vm_i), file=config) cpu_affinity_output(vm_info, vm_i, config) is_need_epc(vm_info.epc_section, vm_i, config) # VUART err_dic = vuart_output(vm_type, vm_i, vm_info, config) if err_dic: return err_dic print("\t},", file=config)
def gen_pre_launch_vm(vm_type, vm_i, scenario_items, config): vm_info = scenario_items['vm'] # guest flags (err_dic, guest_flags) = get_guest_flag(vm_info.guest_flags[vm_i]) if err_dic: return err_dic pre_vm_type = get_pre_vm_type(vm_type, vm_i) print("\t{{\t/* VM{} */".format(vm_i), file=config) print("\t\t{},".format(pre_vm_type), file=config) print('\t\t.name = "{0}",'.format(vm_info.name[vm_i]), file=config) cpu_affinity_output(vm_info, vm_i, config) if guest_flags: print("\t\t.guest_flags = {0},".format(guest_flags), file=config) clos_output(scenario_items, vm_i, config) print("\t\t.memory = {", file=config) print("\t\t\t.start_hpa = VM{0}_CONFIG_MEM_START_HPA,".format(vm_i), file=config) print("\t\t\t.size = VM{0}_CONFIG_MEM_SIZE,".format(vm_i), file=config) print("\t\t\t.start_hpa2 = VM{0}_CONFIG_MEM_START_HPA2,".format(vm_i), file=config) print("\t\t\t.size_hpa2 = VM{0}_CONFIG_MEM_SIZE_HPA2,".format(vm_i), file=config) print("\t\t},", file=config) is_need_epc(vm_info.epc_section, vm_i, config) print("\t\t.os_config = {", file=config) print('\t\t\t.name = "{0}",'.format(vm_info.os_cfg.kern_name[vm_i]), file=config) print("\t\t\t.kernel_type = {0},".format(vm_info.os_cfg.kern_type[vm_i]), file=config) print('\t\t\t.kernel_mod_tag = "{0}",'.format( vm_info.os_cfg.kern_mod[vm_i]), file=config) if (vm_info.os_cfg.ramdisk_mod[vm_i].strip()): print('\t\t\t.ramdisk_mod_tag = "{0}",'.format( vm_info.os_cfg.ramdisk_mod[vm_i]), file=config) if vm_i in vm_info.os_cfg.kern_load_addr.keys( ) and vm_info.os_cfg.kern_entry_addr[vm_i]: print("\t\t\t.kernel_load_addr = {0},".format( vm_info.os_cfg.kern_load_addr[vm_i]), file=config) if vm_i in vm_info.os_cfg.kern_entry_addr.keys( ) and vm_info.os_cfg.kern_entry_addr[vm_i]: print("\t\t\t.kernel_entry_addr = {0},".format( vm_info.os_cfg.kern_entry_addr[vm_i]), file=config) if vm_i in vm_info.os_cfg.kern_args.keys( ) and vm_info.os_cfg.kern_args[vm_i]: print("\t\t\t.bootargs = VM{0}_BOOT_ARGS,".format(vm_i), file=config) print("\t\t},", file=config) print("\t\t.acpi_config = {", file=config) print('\t\t\t.acpi_mod_tag = "ACPI_VM{}",'.format(vm_i), file=config) print("\t\t},", file=config) # VUART err_dic = vuart_output(vm_type, vm_i, vm_info, config) if err_dic: return err_dic if scenario_cfg_lib.get_pci_dev_num_per_vm()[vm_i]: print("\t\t.pci_dev_num = VM{}_CONFIG_PCI_DEV_NUM,".format(vm_i), file=config) print("\t\t.pci_devs = vm{}_pci_devs,".format(vm_i), file=config) if vm_i == 0: print("#ifdef VM0_PASSTHROUGH_TPM", file=config) print("\t\t.pt_tpm2 = true,", file=config) print("\t\t.mmiodevs[0] = {", file=config) print("\t\t\t.user_vm_pa = VM0_TPM_BUFFER_BASE_ADDR_GPA,", file=config) print("\t\t\t.host_pa = VM0_TPM_BUFFER_BASE_ADDR,", file=config) print("\t\t\t.size = VM0_TPM_BUFFER_SIZE,", file=config) print("\t\t},", file=config) print("#endif", file=config) if vm_i == 0: print("#ifdef P2SB_BAR_ADDR", file=config) print("\t\t.pt_p2sb_bar = true,", file=config) print("\t\t.mmiodevs[0] = {", file=config) print("\t\t\t.user_vm_pa = P2SB_BAR_ADDR_GPA,", file=config) print("\t\t\t.host_pa = P2SB_BAR_ADDR,", file=config) print("\t\t\t.size = P2SB_BAR_SIZE,", file=config) print("\t\t},", file=config) print("#endif", file=config) if vm_i == 0: print("\t\t.pt_intx_num = VM0_PT_INTX_NUM,", file=config) print("\t\t.pt_intx = &vm0_pt_intx[0U],", file=config) print("\t},", file=config)
def generate_file(vm_info, config): """ Generate pci_dev.c for Pre-Launched VMs in a scenario. :param config: it is pointer for for file write to :return: None """ board_cfg_lib.parser_pci() board_cfg_lib.parse_mem() compared_bdf = [] sos_used_bdf = [] for cnt_sub_name in board_cfg_lib.SUB_NAME_COUNT.keys(): i_cnt = 0 for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items( ): if cnt_sub_name == bar_attr.name and bdf not in compared_bdf: compared_bdf.append(bdf) else: continue add_instance_to_name(i_cnt, bdf, bar_attr) i_cnt += 1 for bdf in compared_bdf: bdf_tuple = BusDevFunc.from_str(bdf) sos_used_bdf.append(bdf_tuple) bdf_tuple = BusDevFunc(bus=0, dev=1, func=0) sos_used_bdf.append(bdf_tuple) vuarts = common.get_vuart_info(common.SCENARIO_INFO_FILE) vuarts_num = scenario_cfg_lib.get_vuart_num(vuarts) pci_vuart_enabled = False for vm_i in common.VM_TYPES: if vuarts_num[vm_i] > 0: pci_vuart_enabled = True break print("{}".format(scenario_cfg_lib.HEADER_LICENSE), file=config) print("", file=config) print("#include <vm_config.h>", file=config) print("#include <pci_devices.h>", file=config) print("#include <vpci.h>", file=config) print("#include <vbar_base.h>", file=config) print("#include <mmu.h>", file=config) print("#include <page.h>", file=config) if pci_vuart_enabled: print("#include <vmcs9900.h>", file=config) # Insert header for share memory if vm_info.shmem.shmem_enabled == 'y': print("#include <ivshmem_cfg.h>", file=config) # Insert comments and macros for passthrough devices if any((p for _, p in vm_info.cfg_pci.pci_devs.items())): print("", file=config) print("/*", file=config) print( " * TODO: remove PTDEV macro and add DEV_PRIVINFO macro to initialize pbdf for", file=config) print( " * passthrough device configuration and shm_name for ivshmem device configuration.", file=config) print(" */", file=config) print("#define PTDEV(PCI_DEV)\t\tPCI_DEV, PCI_DEV##_VBAR", file=config) print("", file=config) print("/*", file=config) print( " * TODO: add DEV_PCICOMMON macro to initialize emu_type, vbdf and vdev_ops", file=config) print(" * to simplify the code.", file=config) print(" */", file=config) if pci_vuart_enabled: print("#define INVALID_PCI_BASE\t0U", file=config) for vm_i, vm_type in common.VM_TYPES.items(): vm_used_bdf = [] # Skip this vm if there is no any pci device and virtual device if not scenario_cfg_lib.get_pci_dev_num_per_vm()[vm_i] and \ scenario_cfg_lib.VM_DB[vm_type]['load_type'] != "SOS_VM": continue if not scenario_cfg_lib.get_pci_dev_num_per_vm()[vm_i] and \ scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM": print("", file=config) print("struct acrn_vm_pci_dev_config " + "sos_pci_devs[CONFIG_MAX_PCI_DEV_NUM];", file=config) continue pci_cnt = 1 # Insert device structure and bracket print("", file=config) if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM": print("struct acrn_vm_pci_dev_config " + "sos_pci_devs[CONFIG_MAX_PCI_DEV_NUM] = {", file=config) else: print("struct acrn_vm_pci_dev_config " + "vm{}_pci_devs[VM{}_CONFIG_PCI_DEV_NUM] = {{".format( vm_i, vm_i), file=config) # Insert passtrough devices data if vm_i in vm_info.cfg_pci.pci_devs.keys(): pci_bdf_devs_list = vm_info.cfg_pci.pci_devs[vm_i] if pci_bdf_devs_list: # Insert pci hostbridge for passtrough devices: if pci_cnt == 1: print("\t{", file=config) print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]), file=config) print( "\t\t.vbdf.bits = {.b = 0x00U, .d = 0x00U, .f = 0x00U},", file=config) print("\t\t.vdev_ops = &vhostbridge_ops,", file=config) print("\t},", file=config) bdf_tuple = BusDevFunc.from_str("00:00.0") vm_used_bdf.append(bdf_tuple) for pci_bdf_dev in pci_bdf_devs_list: if not pci_bdf_dev: continue bus = int(pci_bdf_dev.split(':')[0], 16) dev = int(pci_bdf_dev.split(':')[1].split('.')[0], 16) fun = int(pci_bdf_dev.split('.')[1], 16) print("\t{", file=config) print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[1]), file=config) print( "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02x}U, .f = 0x00U}}," .format(pci_cnt), file=config) for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items( ): if bdf == pci_bdf_dev: print("\t\tPTDEV({}),".format( board_cfg_lib.PCI_DEV_BAR_DESC. pci_dev_dic[bdf].name_w_i_cnt), file=config) else: continue print("\t},", file=config) bdf_tuple = BusDevFunc(0, pci_cnt, 0) vm_used_bdf.append(bdf_tuple) pci_cnt += 1 # Insert ivshmem information if vm_info.shmem.shmem_enabled == 'y' and vm_i in vm_info.shmem.shmem_regions.keys() \ and len(vm_info.shmem.shmem_regions[vm_i]) > 0: raw_shm_list = vm_info.shmem.shmem_regions[vm_i] for shm in raw_shm_list: shm_splited = shm.split(',') print("\t{", file=config) print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]), file=config) if vm_i in vm_info.cfg_pci.pci_devs.keys(): if scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "SOS_VM": free_bdf = find_unused_bdf(sos_used_bdf, "ivshmem") print("\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{:02x}U, .f = 0x{:02x}U}}," \ .format(free_bdf.dev,free_bdf.func), file=config) print("\t\t.vdev_ops = &vpci_ivshmem_ops,", file=config) sos_used_bdf.append(free_bdf) else: print( "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02x}U, .f = 0x00U}}," .format(pci_cnt), file=config) print("\t\t.vdev_ops = &vpci_ivshmem_ops,", file=config) bdf_tuple = BusDevFunc(0, pci_cnt, 0) vm_used_bdf.append(bdf_tuple) elif vm_i not in vm_info.cfg_pci.pci_devs.keys(): if scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "PRE_LAUNCHED_VM": print( "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02x}U, .f = 0x00U}}," .format(pci_cnt), file=config) bdf_tuple = BusDevFunc(0, pci_cnt, 0) vm_used_bdf.append(bdf_tuple) elif scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "POST_LAUNCHED_VM": print("\t\t.vbdf.value = UNASSIGNED_VBDF,", file=config) print("\t\t.vdev_ops = &vpci_ivshmem_ops,", file=config) for shm_name, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic.items( ): index = shm_name[:shm_name.find('_')] shm_name = shm_name[shm_name.find('_') + 1:] if shm_name == shm_splited[0].strip(): if scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "PRE_LAUNCHED_VM": print( "\t\t.shm_region_name = IVSHMEM_SHM_REGION_{}," .format(index), file=config) print("\t\tIVSHMEM_DEVICE_{}_VBAR".format(index), file=config) break elif scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "SOS_VM": print( "\t\t.shm_region_name = IVSHMEM_SHM_REGION_{}," .format(index), file=config) print( "\t\tSOS_IVSHMEM_DEVICE_{}_VBAR".format(index), file=config) break else: print( "\t\t.shm_region_name = IVSHMEM_SHM_REGION_{}". format(index), file=config) break pci_cnt += 1 print("\t},", file=config) if vm_i in vuarts.keys(): # get legacy vuart information vuart0_setting = common.get_vuart_info_id( common.SCENARIO_INFO_FILE, 0) vuart1_setting = common.get_vuart_info_id( common.SCENARIO_INFO_FILE, 1) for vuart_id in vuarts[vm_i].keys(): if vuarts[vm_i][vuart_id]['base'] == "INVALID_PCI_BASE": continue # skip pci vuart 0 for post-launched vm if vuart_id == 0 and scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "POST_LAUNCHED_VM": continue # Skip pci vuart 0 if the legacy vuart 0 is enabled if vuart_id == 0 and vm_i in vuart0_setting and vuart0_setting[ vm_i]['base'] != "INVALID_COM_BASE": continue # Skip pci vuart 1 if the legacy vuart 1 is enabled if vuart_id == 1 and vm_i in vuart1_setting and vuart1_setting[ vm_i]['base'] != "INVALID_COM_BASE": continue print("\t{", file=config) print("\t\t.vuart_idx = {:1d},".format(vuart_id), file=config) print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]), file=config) print("\t\t.vdev_ops = &vmcs9900_ops,", file=config) if vuart_id != 0 and scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "POST_LAUNCHED_VM": print("\t\t.vbar_base[0] = INVALID_PCI_BASE,", file=config) print("\t\t.vbdf.value = UNASSIGNED_VBDF,", file=config) if scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] != "POST_LAUNCHED_VM": print("\t\tVM{:1d}_VUART_{:1d}_VBAR,".format( vm_i, vuart_id), file=config) if scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "PRE_LAUNCHED_VM": free_bdf = find_unused_bdf(vm_used_bdf, "vuart") vm_used_bdf.append(free_bdf) elif scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "SOS_VM": free_bdf = find_unused_bdf(sos_used_bdf, "vuart") sos_used_bdf.append(free_bdf) print( "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{:02x}U, .f = 0x00U}}," .format(free_bdf.dev, free_bdf.func), file=config) if vuart_id != 0: print("\t\t.t_vuart.vm_id = {},".format( vuarts[vm_i][vuart_id]['target_vm_id']), file=config) print("\t\t.t_vuart.vuart_id = {},".format( vuarts[vm_i][vuart_id]['target_uart_id']), file=config) pci_cnt += 1 print("\t},", file=config) # Insert the end bracket of the pci_dev.c file print("};", file=config)