def pci_dev_num_per_vm_gen(config): pci_items = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "pci_devs", "pci_dev") pci_devs = scenario_cfg_lib.get_pci_devs(pci_items) pci_dev_num = scenario_cfg_lib.get_pci_num(pci_devs) ivshmem_region = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") shmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") shmem_regions = scenario_cfg_lib.get_shmem_regions(ivshmem_region) shmem_num = scenario_cfg_lib.get_shmem_num(shmem_regions) for vm_i,vm_type in common.VM_TYPES.items(): if "POST_LAUNCHED_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']: if shmem_enabled == 'y' and vm_i in shmem_num.keys(): print("#define VM{}_CONFIG_PCI_DEV_NUM\t{}U".format(vm_i, shmem_num[vm_i]), file=config) elif "PRE_LAUNCHED_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']: shmem_num_i = 0 if shmem_enabled == 'y' and vm_i in shmem_num.keys(): shmem_num_i = shmem_num[vm_i] print("#define VM{}_CONFIG_PCI_DEV_NUM\t{}U".format(vm_i, pci_dev_num[vm_i] + shmem_num_i), file=config) print("", file=config)
def set_ivshmem(self, ivshmem_regions): """ set ivshmem regions for VMs. :param ivshmem_regions: :return: """ self.raw_shmem_regions = ivshmem_regions self.shmem_enabled = common.get_hv_item_tag(self.scenario_info, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") self.shmem_regions = scenario_cfg_lib.get_shmem_regions(ivshmem_regions) self.shmem_num = scenario_cfg_lib.get_shmem_num(self.shmem_regions)
def pci_dev_num_per_vm_gen(config): pci_items = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "pci_devs", "pci_dev") pci_devs = scenario_cfg_lib.get_pt_pci_devs(pci_items) pt_pci_num = scenario_cfg_lib.get_pt_pci_num(pci_devs) ivshmem_region = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") shmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") shmem_regions = scenario_cfg_lib.get_shmem_regions(ivshmem_region) shmem_num = scenario_cfg_lib.get_shmem_num(shmem_regions) vuarts = common.get_vuart_info(common.SCENARIO_INFO_FILE) pci_vuarts_num = scenario_cfg_lib.get_pci_vuart_num(vuarts) for vm_i, vm_type in common.VM_TYPES.items(): num = 0 if "POST_LAUNCHED_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']: shmem_num_i = 0 pci_vuart_num = pci_vuarts_num[vm_i] if shmem_enabled == 'y' and vm_i in shmem_num.keys(): shmem_num_i = shmem_num[vm_i] num = shmem_num_i + pci_vuart_num elif "PRE_LAUNCHED_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']: shmem_num_i = 0 if shmem_enabled == 'y' and vm_i in shmem_num.keys(): shmem_num_i = shmem_num[vm_i] num = pt_pci_num[vm_i] + shmem_num_i + pci_vuarts_num[vm_i] if pt_pci_num[vm_i] > 0 or shmem_num_i > 0 or pci_vuarts_num[ vm_i] > 0: # if there is passthrough device or ivshmem, vhostbridge is needed num += 1 elif "SOS_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']: continue if num > 0: print("#define VM{}_CONFIG_PCI_DEV_NUM\t{}U".format(vm_i, num), file=config) print("", file=config)
def write_ivshmem_vbar(mmiolist_per_vm, sos_mmio_range, config): for vm_id, vm_type in common.VM_TYPES.items(): ivshmem_region = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") shmem_regions = scenario_cfg_lib.get_shmem_regions(ivshmem_region) if vm_id not in shmem_regions: continue shmems = shmem_regions.get(vm_id) idx = 0 for shm in shmems: if shm is None or shm.strip() == '': continue shm_splited = shm.split(',') size = shm_splited[1].strip() try: int_size = int(size) * 0x100000 except: int_size = 0 if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SERVICE_VM": # vbar[0] for shared memory is 4k free_bar0 = get_free_mmio( sos_mmio_range, mmiolist_per_vm[vm_id], BAR0_SHEMEM_ALIGNMENT + BAR0_SHEMEM_SIZE) free_bar0_start_addr = common.round_up(free_bar0.start, BAR0_SHEMEM_ALIGNMENT) free_bar0_end_addr = free_bar0_start_addr + BAR0_SHEMEM_SIZE - 1 free_bar0 = MmioWindow(free_bar0_start_addr, free_bar0_end_addr) mmiolist_per_vm[vm_id].append(free_bar0) mmiolist_per_vm[vm_id].sort() # vbar[1] for shared memory is 4K free_bar1 = get_free_mmio( sos_mmio_range, mmiolist_per_vm[vm_id], BAR1_SHEMEM_ALIGNMENT + BAR1_SHEMEM_SIZE) free_bar1_start_addr = common.round_up(free_bar1.start, BAR1_SHEMEM_ALIGNMENT) free_bar1_end_addr = free_bar1_start_addr + BAR1_SHEMEM_SIZE - 1 free_bar1 = MmioWindow(free_bar1_start_addr, free_bar1_end_addr) mmiolist_per_vm[vm_id].append(free_bar1) mmiolist_per_vm[vm_id].sort() # vbar[2] for shared memory is specified size in MB free_bar2 = get_free_mmio(sos_mmio_range, mmiolist_per_vm[vm_id], BAR2_SHEMEM_ALIGNMENT + int_size) free_bar2_start_addr = common.round_up( free_bar2.start, BAR2_SHEMEM_ALIGNMENT) + 0xC free_bar2_end_addr = free_bar2_start_addr + int_size - 1 free_bar2 = MmioWindow(free_bar2_start_addr, free_bar2_end_addr) mmiolist_per_vm[vm_id].append(free_bar2) mmiolist_per_vm[vm_id].sort() print("#define SOS_IVSHMEM_DEVICE_%-19s" % (str(idx) + "_VBAR"), " .vbar_base[0] = {:#x}UL, \\".format( free_bar0.start), file=config) print("{}.vbar_base[1] = {:#x}UL, \\".format( ' ' * 54, free_bar1.start), file=config) print("{}.vbar_base[2] = {:#x}UL".format( ' ' * 54, free_bar2.start), file=config) print("", file=config) idx += 1 elif scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "PRE_LAUNCHED_VM": mmioRange = [ MmioWindow(start=common.SIZE_2G, end=common.SIZE_4G - 1) ] # vbar[0] for shared memory is 4k free_bar0 = get_free_mmio( mmioRange, mmiolist_per_vm[vm_id], BAR0_SHEMEM_ALIGNMENT + BAR0_SHEMEM_SIZE) free_bar0_start_addr = common.round_up(free_bar0.start, BAR0_SHEMEM_ALIGNMENT) free_bar0_end_addr = free_bar0_start_addr + BAR0_SHEMEM_SIZE - 1 free_bar0 = MmioWindow(free_bar0_start_addr, free_bar0_end_addr) mmiolist_per_vm[vm_id].append(free_bar0) mmiolist_per_vm[vm_id].sort() # vbar[1] for shared memory is 4K free_bar1 = get_free_mmio( mmioRange, mmiolist_per_vm[vm_id], BAR1_SHEMEM_ALIGNMENT + BAR1_SHEMEM_SIZE) free_bar1_start_addr = common.round_up(free_bar1.start, BAR1_SHEMEM_ALIGNMENT) free_bar1_end_addr = free_bar1_start_addr + BAR1_SHEMEM_SIZE - 1 free_bar1 = MmioWindow(free_bar1_start_addr, free_bar1_end_addr) mmiolist_per_vm[vm_id].append(free_bar1) mmiolist_per_vm[vm_id].sort() # vbar[2] for shared memory is specified size in MB free_bar2 = get_free_mmio(mmioRange, mmiolist_per_vm[vm_id], BAR2_SHEMEM_ALIGNMENT + int_size) free_bar2_start_addr = common.round_up( free_bar2.start, BAR2_SHEMEM_ALIGNMENT) + 0xC free_bar2_end_addr = free_bar2_start_addr + int_size - 1 free_bar2 = MmioWindow(free_bar2_start_addr, free_bar2_end_addr) mmiolist_per_vm[vm_id].append(free_bar2) mmiolist_per_vm[vm_id].sort() print("#define IVSHMEM_DEVICE_%-23s" % (str(idx) + "_VBAR"), " .vbar_base[0] = {:#x}UL, \\".format( free_bar0.start), file=config) print("{}.vbar_base[1] = {:#x}UL, \\".format( ' ' * 54, free_bar1.start), file=config) print("{}.vbar_base[2] = {:#x}UL".format( ' ' * 54, free_bar2.start), file=config) print("", file=config) idx += 1
def generate_file(config): matching_mmios, non_matching_mmios = get_mmio_windows_with_key( ['PCI Bus 0000:00']) matching_mmios = removed_nested(matching_mmios, non_matching_mmios) non_matching_mmios = [ w for w in non_matching_mmios if any((w.overlaps(w2) for w2 in matching_mmios)) ] non_matching_mmios = merged_windows(non_matching_mmios) # list of all vmsix supported device list in bdf format bdf_list = board_cfg_lib.get_known_caps_pci_devs().get('VMSIX', []) # list of all PRE_LAUNCHED_VMs' vmsix supported passthrough devices in bdf format pci_items = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "pci_devs", "pci_dev") pci_devs = scenario_cfg_lib.get_pci_devs(pci_items) pci_devs_per_vm = get_devs_per_vm_with_key(pci_devs, bdf_list) # list SOS vmsix supported devices without other PRE_LAUNCHED_VMs' in bdf format sos_bdf_list = [ d for d in bdf_list if all((d not in pci_devs_per_vm[i] for i in pci_devs_per_vm)) ] for vm_i in pci_devs_per_vm: vm_type = common.VM_TYPES[vm_i] if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM": pci_devs_per_vm[vm_i] = sos_bdf_list mmiolist_per_vm = {} for vm_i, vm_type in common.VM_TYPES.items(): if vm_i not in mmiolist_per_vm.keys(): mmiolist_per_vm[vm_i] = [] if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM": mmiolist_per_vm[vm_i] = non_matching_mmios else: if vm_i in pci_devs.keys(): match, _ = get_mmio_windows_with_key(pci_devs[vm_i]) mmiolist_per_vm[vm_i] = match if scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "PRE_LAUNCHED_VM": if vm_i not in mmiolist_per_vm.keys(): mmiolist_per_vm[vm_i] = [] # TSN reserved region mmiolist_per_vm[vm_i].append( MmioWindow(start=0xffff0000, end=0xffffffff)) # For the pre-launched vm, if the TPM is passtrough, this address is used if vm_i == 0 and board_cfg_lib.is_tpm_passthru(): mmiolist_per_vm[vm_i].append( MmioWindow(start=0xfed40000, end=0xfed40000 + 0x5000 - 1)) # For the pre-launched vm o ehl-crb-b, if the p2sb is passtrough, this address is used if board_cfg_lib.is_matched_board(('ehl-crb-b')): p2sb_start = board_cfg_lib.find_p2sb_bar_addr() mmiolist_per_vm[vm_i].append( MmioWindow(start=p2sb_start, end=p2sb_start + 0x1000000 - 1)) mmiolist_per_vm[vm_i].sort() # start to generate board_info.h print("{0}".format(board_cfg_lib.HEADER_LICENSE), file=config) print(VBAR_INFO_DEFINE, file=config) common.get_vm_types() pre_vm = False sos_vm = False for vm_type in common.VM_TYPES.values(): if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "PRE_LAUNCHED_VM": pre_vm = True if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM": sos_vm = True if not pre_vm and not sos_vm: print(VBAR_INFO_ENDIF, file=config) return ivshmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") if ivshmem_enabled == 'y': for vm_id, vm_type in common.VM_TYPES.items(): free_bar = [] if scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "PRE_LAUNCHED_VM": board_cfg_lib.parse_mem() for shm_name, bar_attr_dic in board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic.items( ): index = shm_name[:shm_name.find('_')] i_cnt = 0 for bar_i, bar_attr in bar_attr_dic.items(): i_cnt += 1 if bar_i == 2: raw_shmem_regions = common.get_hv_item_tag( common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") for shm in raw_shmem_regions: if shm is None or shm.strip() == '': continue shm_splited = shm.split(',') name = shm_splited[0].strip() size = shm_splited[1].strip() try: int_size = int(size) * 0x100000 except: int_size = 0 bar_2 = int(bar_attr.addr, 16) mmiolist_per_vm[vm_id].append( MmioWindow(start=bar_2, end=bar_2 + int_size - 1)) mmiolist_per_vm[vm_id].sort() if bar_i == 0: bar_0 = MmioWindow(start=int(bar_attr.addr, 16), end=int(bar_attr.addr, 16) + 0x100 - 1) mmiolist_per_vm[vm_id].append(bar_0) mmiolist_per_vm[vm_id].sort() if len(bar_attr_dic.keys()) == 1: print("#define IVSHMEM_DEVICE_%-23s" % (str(index) + "_VBAR"), " .vbar_base[{}] = {}UL".format( bar_i, bar_attr.addr), file=config) else: print( "#define IVSHMEM_DEVICE_%-23s" % (str(index) + "_VBAR"), " .vbar_base[{}] = {}UL, \\".format( bar_i, bar_attr.addr), file=config) # vbar[1] for share memory is fix to 4K free_bar = get_free_mmio([MmioWindow(start=common.SIZE_2G, end=common.SIZE_4G-1)], \ mmiolist_per_vm[vm_id], BAR1_SHEMEM_ALIGNMENT + BAR1_SHEMEM_SIZE) free_bar_start_addr = common.round_up( free_bar.start, BAR1_SHEMEM_ALIGNMENT) free_bar_end_addr = free_bar_start_addr + BAR1_SHEMEM_SIZE - 1 free_bar = MmioWindow(free_bar_start_addr, free_bar_end_addr) mmiolist_per_vm[vm_id].append(free_bar) mmiolist_per_vm[vm_id].sort() print("{}.vbar_base[1] = {:#x}UL, \\".format( ' ' * 54, free_bar.start), file=config) elif i_cnt == len(bar_attr_dic.keys()): print("{}.vbar_base[{}] = {}UL".format( ' ' * 54, bar_i, bar_attr.addr), file=config) else: print("{}.vbar_base[{}] = {}UL, \\".format( ' ' * 54, bar_i, bar_attr.addr), file=config) print("", file=config) elif scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM": ivshmem_region = common.get_hv_item_tag( common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") shmem_regions = scenario_cfg_lib.get_shmem_regions( ivshmem_region) if vm_id not in shmem_regions.keys(): continue idx = 0 for shm in ivshmem_region: if shm is None or shm.strip() == '': continue shm_splited = shm.split(',') name = shm_splited[0].strip() size = shm_splited[1].strip() try: int_size = int(size) * 0x100000 except: int_size = 0 # vbar[0] for shared memory is 0x100 free_bar0 = get_free_mmio( matching_mmios, mmiolist_per_vm[vm_id], BAR0_SHEMEM_ALIGNMENT + BAR0_SHEMEM_SIZE) free_bar0_start_addr = common.round_up( free_bar0.start, BAR0_SHEMEM_ALIGNMENT) free_bar0_end_addr = free_bar0_start_addr + BAR0_SHEMEM_SIZE - 1 free_bar0 = MmioWindow(free_bar0_start_addr, free_bar0_end_addr) mmiolist_per_vm[vm_id].append(free_bar0) mmiolist_per_vm[vm_id].sort() # vbar[1] for shared memory is 4K free_bar1 = get_free_mmio( matching_mmios, mmiolist_per_vm[vm_id], BAR1_SHEMEM_ALIGNMENT + BAR1_SHEMEM_SIZE) free_bar1_start_addr = common.round_up( free_bar1.start, BAR1_SHEMEM_ALIGNMENT) free_bar1_end_addr = free_bar1_start_addr + BAR1_SHEMEM_SIZE - 1 free_bar1 = MmioWindow(free_bar1_start_addr, free_bar1_end_addr) mmiolist_per_vm[vm_id].append(free_bar1) mmiolist_per_vm[vm_id].sort() # vbar[2] for shared memory is specified size in MB free_bar2 = get_free_mmio(matching_mmios, mmiolist_per_vm[vm_id], BAR2_SHEMEM_ALIGNMENT + int_size) free_bar2_start_addr = common.round_up( free_bar2.start, BAR2_SHEMEM_ALIGNMENT) + 0xC free_bar2_end_addr = free_bar2_start_addr + int_size - 1 free_bar2 = MmioWindow(free_bar2_start_addr, free_bar2_end_addr) mmiolist_per_vm[vm_id].append(free_bar2) mmiolist_per_vm[vm_id].sort() print("#define SOS_IVSHMEM_DEVICE_%-19s" % (str(idx) + "_VBAR"), " .vbar_base[0] = {:#x}UL, \\".format( free_bar0.start), file=config) print("{}.vbar_base[1] = {:#x}UL, \\".format( ' ' * 54, free_bar1.start), file=config) print("{}.vbar_base[2] = {:#x}UL".format( ' ' * 54, free_bar2.start), file=config) print("", file=config) idx += 1 # Get passthrough devices vbar bases compared_bdf = [] for cnt_sub_name in board_cfg_lib.SUB_NAME_COUNT.keys(): i_cnt = 0 for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items( ): if cnt_sub_name == bar_attr.name and bdf not in compared_bdf: compared_bdf.append(bdf) else: continue write_vbar(i_cnt, bdf, board_cfg_lib.PCI_DEV_BAR_DESC.pci_bar_dic, bar_attr, \ pci_devs_per_vm, mmiolist_per_vm, matching_mmios, config) i_cnt += 1 write_vuart_vbar(mmiolist_per_vm, matching_mmios, config) print(VBAR_INFO_ENDIF, file=config)