def generate_file(config): # get cpu processor list cpu_list = board_cfg_lib.get_processor_info() max_cpu_num = len(cpu_list) # start to generate board_info.h print("{0}".format(board_cfg_lib.HEADER_LICENSE), file=config) print(BOARD_INFO_DEFINE, file=config) # define CONFIG_MAX_PCPCU_NUM print("#define MAX_PCPU_NUM\t\t\t{}U".format(max_cpu_num), file=config) # define MAX_VMSIX_ON_MSI_PDEVS_NUM gen_known_caps_pci_head(config) # define MAX_HIDDEN_PDEVS_NUM if board_cfg_lib.BOARD_NAME in list( board_cfg_lib.KNOWN_HIDDEN_PDEVS_BOARD_DB): print("#define MAX_HIDDEN_PDEVS_NUM\t\t{}U".format( len(board_cfg_lib.KNOWN_HIDDEN_PDEVS_BOARD_DB[ board_cfg_lib.BOARD_NAME])), file=config) else: print("#define MAX_HIDDEN_PDEVS_NUM\t\t0U", file=config) # generate HI_MMIO_START/HI_MMIO_END find_hi_mmio_window(config) p2sb = common.get_leaf_tag_map_bool(common.SCENARIO_INFO_FILE, "mmio_resources", "p2sb") if (common.VM_TYPES.get(0) is not None and scenario_cfg_lib.VM_DB[common.VM_TYPES[0]]['load_type'] == "PRE_LAUNCHED_VM" and board_cfg_lib.is_p2sb_passthru_possible() and p2sb.get(0, False)): print("", file=config) print("#define P2SB_VGPIO_DM_ENABLED", file=config) hpa = board_cfg_lib.find_p2sb_bar_addr() print("#define P2SB_BAR_ADDR\t\t\t0x{:X}UL".format(hpa), file=config) gpa = common.hpa2gpa(0, hpa, 0x1000000) print("#define P2SB_BAR_ADDR_GPA\t\t0x{:X}UL".format(gpa), file=config) print("#define P2SB_BAR_SIZE\t\t\t0x1000000UL", file=config) if board_cfg_lib.is_matched_board(("ehl-crb-b")): print("", file=config) print("#define P2SB_BASE_GPIO_PORT_ID\t\t0x69U", file=config) print("#define P2SB_MAX_GPIO_COMMUNITIES\t0x6U", file=config) print(BOARD_INFO_ENDIF, file=config)
def gen_pre_launch_vm(vm_type, vm_i, scenario_items, config): vm_info = scenario_items['vm'] # guest flags (err_dic, guest_flags) = get_guest_flag(vm_info.guest_flags[vm_i]) if err_dic: return err_dic pre_vm_type = get_pre_vm_type(vm_type, vm_i) print("\t{{\t/* VM{} */".format(vm_i), file=config) print("\t\t{},".format(pre_vm_type), file=config) print('\t\t.name = "{0}",'.format(vm_info.name[vm_i]), file=config) cpu_affinity_output(vm_info, vm_i, config) if guest_flags: print("\t\t.guest_flags = {0},".format(guest_flags), file=config) clos_output(scenario_items, vm_i, config) print("\t\t.memory = {", file=config) print("\t\t\t.start_hpa = VM{0}_CONFIG_MEM_START_HPA,".format(vm_i), file=config) print("\t\t\t.size = VM{0}_CONFIG_MEM_SIZE,".format(vm_i), file=config) print("\t\t\t.start_hpa2 = VM{0}_CONFIG_MEM_START_HPA2,".format(vm_i), file=config) print("\t\t\t.size_hpa2 = VM{0}_CONFIG_MEM_SIZE_HPA2,".format(vm_i), file=config) print("\t\t},", file=config) is_need_epc(vm_info.epc_section, vm_i, config) print("\t\t.os_config = {", file=config) print('\t\t\t.name = "{0}",'.format(vm_info.os_cfg.kern_name[vm_i]), file=config) print("\t\t\t.kernel_type = {0},".format( vm_info.os_cfg.kern_type[vm_i]), file=config) print('\t\t\t.kernel_mod_tag = "{0}",'.format( vm_info.os_cfg.kern_mod[vm_i]), file=config) if (vm_info.os_cfg.ramdisk_mod[vm_i].strip()): print('\t\t\t.ramdisk_mod_tag = "{0}",'.format( vm_info.os_cfg.ramdisk_mod[vm_i]), file=config) if vm_i in vm_info.os_cfg.kern_load_addr.keys() and vm_info.os_cfg.kern_entry_addr[vm_i]: print("\t\t\t.kernel_load_addr = {0},".format(vm_info.os_cfg.kern_load_addr[vm_i]), file=config) if vm_i in vm_info.os_cfg.kern_entry_addr.keys() and vm_info.os_cfg.kern_entry_addr[vm_i]: print("\t\t\t.kernel_entry_addr = {0},".format(vm_info.os_cfg.kern_entry_addr[vm_i]), file=config) if vm_i in vm_info.os_cfg.kern_args.keys() and vm_info.os_cfg.kern_args[vm_i]: print("\t\t\t.bootargs = ", end="", file=config) split_cmdline(vm_info.os_cfg.kern_args[vm_i].strip(), config) print("\t\t},", file=config) # VUART err_dic = vuart_output(vm_type, vm_i, vm_info, config) if err_dic: return err_dic if (vm_i in vm_info.cfg_pci.pci_devs.keys() and vm_info.cfg_pci.pci_devs[vm_i]) or \ (vm_info.shmem.shmem_enabled == 'y' and vm_i in vm_info.shmem.shmem_regions.keys() \ and vm_info.shmem.shmem_regions[vm_i]): print("\t\t.pci_dev_num = VM{}_CONFIG_PCI_DEV_NUM,".format(vm_i), file=config) print("\t\t.pci_devs = vm{}_pci_devs,".format(vm_i), file=config) if vm_i == 0 and board_cfg_lib.is_tpm_passthru(): print("#ifdef VM0_PASSTHROUGH_TPM", file=config) print("\t\t.pt_tpm2 = true,", file=config) print("\t\t.mmiodevs[0] = {", file=config) print("\t\t\t.base_gpa = 0xFED40000UL,", file=config) print("\t\t\t.base_hpa = VM0_TPM_BUFFER_BASE_ADDR,", file=config) print("\t\t\t.size = VM0_TPM_BUFFER_SIZE,", file=config) print("\t\t},", file=config) print("#endif", file=config) if (vm_i == 0 and vm_info.mmio_resource_info.p2sb.get(vm_i) is not None and vm_info.mmio_resource_info.p2sb[vm_i]): print("#ifdef P2SB_BAR_ADDR", file=config) print("\t\t.pt_p2sb_bar = true,", file=config) print("\t\t.mmiodevs[0] = {", file=config) gpa = common.hpa2gpa(0, board_cfg_lib.find_p2sb_bar_addr(), 0x1000000) print("\t\t\t.base_gpa = 0x{:X}UL,".format(gpa), file=config) print("\t\t\t.base_hpa = P2SB_BAR_ADDR,", file=config) print("\t\t\t.size = 0x1000000UL,", file=config) print("\t\t},", file=config) print("#endif", file=config) if (vm_i == 0 and board_cfg_lib.is_matched_board(("ehl-crb-b")) and vm_info.pt_intx_info.phys_gsi.get(vm_i) is not None and len(vm_info.pt_intx_info.phys_gsi[vm_i]) > 0): print("\t\t.pt_intx_num = {}U,".format(len(vm_info.pt_intx_info.phys_gsi[vm_i])), file=config) print("\t\t.pt_intx = &vm0_pt_intx[0U],", file=config) print("\t},", file=config)
def generate_file(config): matching_mmios, non_matching_mmios = get_mmio_windows_with_key( ['PCI Bus 0000:00']) matching_mmios = removed_nested(matching_mmios, non_matching_mmios) non_matching_mmios = [ w for w in non_matching_mmios if any((w.overlaps(w2) for w2 in matching_mmios)) ] non_matching_mmios = merged_windows(non_matching_mmios) # list of all vmsix supported device list in bdf format bdf_list = board_cfg_lib.get_known_caps_pci_devs().get('VMSIX', []) # list of all PRE_LAUNCHED_VMs' vmsix supported passthrough devices in bdf format pci_items = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "pci_devs", "pci_dev") pci_devs = scenario_cfg_lib.get_pt_pci_devs(pci_items) pci_devs_per_vm = get_devs_per_vm_with_key(pci_devs, bdf_list) # list Service VM vmsix supported devices without other PRE_LAUNCHED_VMs' in bdf format sos_bdf_list = [ d for d in bdf_list if all((d not in pci_devs_per_vm[i] for i in pci_devs_per_vm)) ] for vm_i in pci_devs_per_vm: vm_type = common.VM_TYPES[vm_i] if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SERVICE_VM": pci_devs_per_vm[vm_i] = sos_bdf_list mmiolist_per_vm = {} for vm_i, vm_type in common.VM_TYPES.items(): if vm_i not in mmiolist_per_vm.keys(): mmiolist_per_vm[vm_i] = [] if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SERVICE_VM": mmiolist_per_vm[vm_i] = non_matching_mmios else: if vm_i in pci_devs.keys(): match, _ = get_mmio_windows_with_key(pci_devs[vm_i]) mmiolist_per_vm[vm_i] = match if scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "PRE_LAUNCHED_VM": if vm_i not in mmiolist_per_vm.keys(): mmiolist_per_vm[vm_i] = [] # TSN reserved region mmiolist_per_vm[vm_i].append( MmioWindow(start=0xffff0000, end=0xffffffff)) # For the pre-launched vm, if the TPM is passtrough, this address is used if vm_i == 0 and board_cfg_lib.is_tpm_passthru(): mmiolist_per_vm[vm_i].append( MmioWindow(start=0xfed40000, end=0xfed40000 + 0x5000 - 1)) # For the pre-launched vm o ehl-crb-b, if the p2sb is passtrough, this address is used if board_cfg_lib.is_matched_board(('ehl-crb-b')): p2sb_start = board_cfg_lib.find_p2sb_bar_addr() mmiolist_per_vm[vm_i].append( MmioWindow(start=p2sb_start, end=p2sb_start + 0x1000000 - 1)) mmiolist_per_vm[vm_i].sort() # start to generate board_info.h print("{0}".format(board_cfg_lib.HEADER_LICENSE), file=config) print(VBAR_INFO_DEFINE, file=config) common.get_vm_types() pre_vm = False sos_vm = False for vm_type in common.VM_TYPES.values(): if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "PRE_LAUNCHED_VM": pre_vm = True if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SERVICE_VM": sos_vm = True if not pre_vm and not sos_vm: print(VBAR_INFO_ENDIF, file=config) return ivshmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") if ivshmem_enabled == 'y': write_ivshmem_vbar(mmiolist_per_vm, matching_mmios, config) # Get passthrough devices vbar bases compared_bdf = [] for cnt_sub_name in board_cfg_lib.SUB_NAME_COUNT.keys(): i_cnt = 0 for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items( ): if cnt_sub_name == bar_attr.name and bdf not in compared_bdf: compared_bdf.append(bdf) else: continue write_vbar(i_cnt, bdf, board_cfg_lib.PCI_DEV_BAR_DESC.pci_bar_dic, bar_attr, \ pci_devs_per_vm, mmiolist_per_vm, matching_mmios, config) i_cnt += 1 write_vuart_vbar(mmiolist_per_vm, matching_mmios, config) print(VBAR_INFO_ENDIF, file=config)
def generate_file(config): matching_mmios, non_matching_mmios = get_mmio_windows_with_key( ['PCI Bus 0000:00']) matching_mmios = removed_nested(matching_mmios, non_matching_mmios) non_matching_mmios = [ w for w in non_matching_mmios if any((w.overlaps(w2) for w2 in matching_mmios)) ] non_matching_mmios = merged_windows(non_matching_mmios) # list of all vmsix supported device list in bdf format bdf_list = board_cfg_lib.get_known_caps_pci_devs().get('VMSIX', []) # list of all PRE_LAUNCHED_VMs' vmsix supported passthrough devices in bdf format pci_items = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "pci_devs", "pci_dev") pci_devs = scenario_cfg_lib.get_pci_devs(pci_items) pci_devs_per_vm = get_devs_per_vm_with_key(pci_devs, bdf_list) # list SOS vmsix supported devices without other PRE_LAUNCHED_VMs' in bdf format sos_bdf_list = [ d for d in bdf_list if all((d not in pci_devs_per_vm[i] for i in pci_devs_per_vm)) ] for vm_i in pci_devs_per_vm: vm_type = common.VM_TYPES[vm_i] if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM": pci_devs_per_vm[vm_i] = sos_bdf_list mmiolist_per_vm = {} for vm_i, vm_type in common.VM_TYPES.items(): if vm_i not in mmiolist_per_vm.keys(): mmiolist_per_vm[vm_i] = [] if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM": mmiolist_per_vm[vm_i] = non_matching_mmios else: if vm_i in pci_devs.keys(): match, _ = get_mmio_windows_with_key(pci_devs[vm_i]) mmiolist_per_vm[vm_i] = match if scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "PRE_LAUNCHED_VM": if vm_i not in mmiolist_per_vm.keys(): mmiolist_per_vm[vm_i] = [] # TSN reserved region mmiolist_per_vm[vm_i].append( MmioWindow(start=0xffff0000, end=0xffffffff)) # For the pre-launched vm, if the TPM is passtrough, this address is used if vm_i == 0 and board_cfg_lib.is_tpm_passthru(): mmiolist_per_vm[vm_i].append( MmioWindow(start=0xfed40000, end=0xfed40000 + 0x5000 - 1)) # For the pre-launched vm o ehl-crb-b, if the p2sb is passtrough, this address is used if board_cfg_lib.is_matched_board(('ehl-crb-b')): p2sb_start = board_cfg_lib.find_p2sb_bar_addr() mmiolist_per_vm[vm_i].append( MmioWindow(start=p2sb_start, end=p2sb_start + 0x1000000 - 1)) mmiolist_per_vm[vm_i].sort() # start to generate board_info.h print("{0}".format(board_cfg_lib.HEADER_LICENSE), file=config) print(VBAR_INFO_DEFINE, file=config) common.get_vm_types() pre_vm = False sos_vm = False for vm_type in common.VM_TYPES.values(): if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "PRE_LAUNCHED_VM": pre_vm = True if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM": sos_vm = True if not pre_vm and not sos_vm: print(VBAR_INFO_ENDIF, file=config) return ivshmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") if ivshmem_enabled == 'y': for vm_id, vm_type in common.VM_TYPES.items(): free_bar = [] if scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "PRE_LAUNCHED_VM": board_cfg_lib.parse_mem() for shm_name, bar_attr_dic in board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic.items( ): index = shm_name[:shm_name.find('_')] i_cnt = 0 for bar_i, bar_attr in bar_attr_dic.items(): i_cnt += 1 if bar_i == 2: raw_shmem_regions = common.get_hv_item_tag( common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") for shm in raw_shmem_regions: if shm is None or shm.strip() == '': continue shm_splited = shm.split(',') name = shm_splited[0].strip() size = shm_splited[1].strip() try: int_size = int(size) * 0x100000 except: int_size = 0 bar_2 = int(bar_attr.addr, 16) mmiolist_per_vm[vm_id].append( MmioWindow(start=bar_2, end=bar_2 + int_size - 1)) mmiolist_per_vm[vm_id].sort() if bar_i == 0: bar_0 = MmioWindow(start=int(bar_attr.addr, 16), end=int(bar_attr.addr, 16) + 0x100 - 1) mmiolist_per_vm[vm_id].append(bar_0) mmiolist_per_vm[vm_id].sort() if len(bar_attr_dic.keys()) == 1: print("#define IVSHMEM_DEVICE_%-23s" % (str(index) + "_VBAR"), " .vbar_base[{}] = {}UL".format( bar_i, bar_attr.addr), file=config) else: print( "#define IVSHMEM_DEVICE_%-23s" % (str(index) + "_VBAR"), " .vbar_base[{}] = {}UL, \\".format( bar_i, bar_attr.addr), file=config) # vbar[1] for share memory is fix to 4K free_bar = get_free_mmio([MmioWindow(start=common.SIZE_2G, end=common.SIZE_4G-1)], \ mmiolist_per_vm[vm_id], BAR1_SHEMEM_ALIGNMENT + BAR1_SHEMEM_SIZE) free_bar_start_addr = common.round_up( free_bar.start, BAR1_SHEMEM_ALIGNMENT) free_bar_end_addr = free_bar_start_addr + BAR1_SHEMEM_SIZE - 1 free_bar = MmioWindow(free_bar_start_addr, free_bar_end_addr) mmiolist_per_vm[vm_id].append(free_bar) mmiolist_per_vm[vm_id].sort() print("{}.vbar_base[1] = {:#x}UL, \\".format( ' ' * 54, free_bar.start), file=config) elif i_cnt == len(bar_attr_dic.keys()): print("{}.vbar_base[{}] = {}UL".format( ' ' * 54, bar_i, bar_attr.addr), file=config) else: print("{}.vbar_base[{}] = {}UL, \\".format( ' ' * 54, bar_i, bar_attr.addr), file=config) print("", file=config) elif scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM": ivshmem_region = common.get_hv_item_tag( common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") shmem_regions = scenario_cfg_lib.get_shmem_regions( ivshmem_region) if vm_id not in shmem_regions.keys(): continue idx = 0 for shm in ivshmem_region: if shm is None or shm.strip() == '': continue shm_splited = shm.split(',') name = shm_splited[0].strip() size = shm_splited[1].strip() try: int_size = int(size) * 0x100000 except: int_size = 0 # vbar[0] for shared memory is 0x100 free_bar0 = get_free_mmio( matching_mmios, mmiolist_per_vm[vm_id], BAR0_SHEMEM_ALIGNMENT + BAR0_SHEMEM_SIZE) free_bar0_start_addr = common.round_up( free_bar0.start, BAR0_SHEMEM_ALIGNMENT) free_bar0_end_addr = free_bar0_start_addr + BAR0_SHEMEM_SIZE - 1 free_bar0 = MmioWindow(free_bar0_start_addr, free_bar0_end_addr) mmiolist_per_vm[vm_id].append(free_bar0) mmiolist_per_vm[vm_id].sort() # vbar[1] for shared memory is 4K free_bar1 = get_free_mmio( matching_mmios, mmiolist_per_vm[vm_id], BAR1_SHEMEM_ALIGNMENT + BAR1_SHEMEM_SIZE) free_bar1_start_addr = common.round_up( free_bar1.start, BAR1_SHEMEM_ALIGNMENT) free_bar1_end_addr = free_bar1_start_addr + BAR1_SHEMEM_SIZE - 1 free_bar1 = MmioWindow(free_bar1_start_addr, free_bar1_end_addr) mmiolist_per_vm[vm_id].append(free_bar1) mmiolist_per_vm[vm_id].sort() # vbar[2] for shared memory is specified size in MB free_bar2 = get_free_mmio(matching_mmios, mmiolist_per_vm[vm_id], BAR2_SHEMEM_ALIGNMENT + int_size) free_bar2_start_addr = common.round_up( free_bar2.start, BAR2_SHEMEM_ALIGNMENT) + 0xC free_bar2_end_addr = free_bar2_start_addr + int_size - 1 free_bar2 = MmioWindow(free_bar2_start_addr, free_bar2_end_addr) mmiolist_per_vm[vm_id].append(free_bar2) mmiolist_per_vm[vm_id].sort() print("#define SOS_IVSHMEM_DEVICE_%-19s" % (str(idx) + "_VBAR"), " .vbar_base[0] = {:#x}UL, \\".format( free_bar0.start), file=config) print("{}.vbar_base[1] = {:#x}UL, \\".format( ' ' * 54, free_bar1.start), file=config) print("{}.vbar_base[2] = {:#x}UL".format( ' ' * 54, free_bar2.start), file=config) print("", file=config) idx += 1 # Get passthrough devices vbar bases compared_bdf = [] for cnt_sub_name in board_cfg_lib.SUB_NAME_COUNT.keys(): i_cnt = 0 for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items( ): if cnt_sub_name == bar_attr.name and bdf not in compared_bdf: compared_bdf.append(bdf) else: continue write_vbar(i_cnt, bdf, board_cfg_lib.PCI_DEV_BAR_DESC.pci_bar_dic, bar_attr, \ pci_devs_per_vm, mmiolist_per_vm, matching_mmios, config) i_cnt += 1 write_vuart_vbar(mmiolist_per_vm, matching_mmios, config) print(VBAR_INFO_ENDIF, file=config)