def parse_mem(): raw_shmem_regions = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") global USED_RAM_RANGE for shm_name, shm_bar_dic in PCI_DEV_BAR_DESC.shm_bar_dic.items(): if 0 in shm_bar_dic.keys() and int(shm_bar_dic[0].addr, 16) in USED_RAM_RANGE.keys(): del USED_RAM_RANGE[int(shm_bar_dic[0].addr, 16)] if 2 in shm_bar_dic.keys() and int(shm_bar_dic[2].addr, 16) - 0xC in USED_RAM_RANGE.keys(): del USED_RAM_RANGE[int(shm_bar_dic[2].addr, 16) - 0xC] idx = 0 for shm in raw_shmem_regions: if shm is None or shm.strip() == '': continue shm_splited = shm.split(',') name = shm_splited[0].strip() size = shm_splited[1].strip() try: int_size = int(size) * 0x100000 except: int_size = 0 ram_range = get_ram_range() tmp_bar_dict = {} hv_start_offset = 0x80000000 ret_start_addr = find_avl_memory(ram_range, str(0x200100), hv_start_offset) bar_mem_0 = Bar_Mem() bar_mem_0.addr = hex(common.round_up(int(ret_start_addr, 16), 0x200000)) USED_RAM_RANGE[int(bar_mem_0.addr, 16)] = 0x100 tmp_bar_dict[0] = bar_mem_0 ram_range = get_ram_range() hv_start_offset2 = 0x100000000 ret_start_addr2 = find_avl_memory(ram_range, str(int_size + 0x200000), hv_start_offset2) bar_mem_2 = Bar_Mem() bar_mem_2.addr = hex( common.round_up(int(ret_start_addr2, 16), 0x200000) + 0xC) USED_RAM_RANGE[common.round_up(int(ret_start_addr2, 16), 0x20000)] = int_size tmp_bar_dict[2] = bar_mem_2 PCI_DEV_BAR_DESC.shm_bar_dic[str(idx) + '_' + name] = tmp_bar_dict idx += 1
def allocate_log_area(board_etree, scenario_etree, allocation_etree): tpm2_enabled = common.get_node( f"//vm[@id = '0']/mmio_resources/TPM2/text()", scenario_etree) if tpm2_enabled is None or tpm2_enabled == 'n': return if common.get_node("//capability[@id='log_area']", board_etree) is not None: log_area_min_len_native = int( common.get_node(f"//log_area_minimum_length/text()", board_etree), 16) log_area_start_address = common.round_up(VIRT_ACPI_NVS_ADDR, 0x10000) + RESERVED_NVS_AREA allocation_vm_node = common.get_node(f"/acrn-config/vm[@id = '0']", allocation_etree) if allocation_vm_node is None: allocation_vm_node = common.append_node("/acrn-config/vm", None, allocation_etree, id='0') common.append_node("./log_area_start_address", hex(log_area_start_address).upper(), allocation_vm_node) common.append_node("./log_area_minimum_length", hex(log_area_min_len_native).upper(), allocation_vm_node)
def remap_bar_addr_to_high(bar_addr, line): """Generate vbar address""" global HI_MMIO_OFFSET size = get_size(line) cur_addr = common.round_up(bar_addr, size) HI_MMIO_OFFSET = cur_addr + size return cur_addr
def get_free_addr(windowslist, used, size, alignment): if not size: raise ValueError(f"allocate size cannot be: {size}") if not windowslist: raise ValueError(f"No address range is specified:{windowslist}") alignment = max(alignment, size) for w in windowslist: new_w_start = common.round_up(w.start, alignment) window = AddrWindow(start = new_w_start, end = new_w_start + size - 1) for u in used: if window.overlaps(u): new_u_end = common.round_up(u.end + 1, alignment) window = AddrWindow(start = new_u_end, end = new_u_end + size - 1) continue if window.overlaps(w): return window raise lib.error.ResourceError(f"Not enough address window for a device size: {size}, free address windows: {windowslist}, used address windos{used}")
def sort(self, nodelist: list): nodelist_up = sorted( [x for x in nodelist if (int(round_up((x[0] - 1) / 2)) % 2) == 0], key=lambda k: k[1], reverse=False) # ascending y for nodes in aisles up nodelist_up = sorted(nodelist_up, key=lambda k: int(round_up((k[0] - 1) / 2)), reverse=True) nodelist_down = sorted( [x for x in nodelist if (int(round_up((x[0] - 1) / 2)) % 2) != 0], key=lambda k: k[1], reverse=True) # descending y for nodes in aisles down nodelist_up = sorted(nodelist_up, key=lambda k: int(round_up((k[0] - 1) / 2)), reverse=True) sorted_nodelist = sorted( nodelist_up + nodelist_down, key=lambda k: int(round_up((k[0] - 1) / 2)), reverse=True ) # nodelists combined, sorted based on the aisle they are in, not X-coordinate return sorted_nodelist
def fn(board_etree, scenario_etree, allocation_etree): # this dictonary mapped with 'address start':'mem range' ram_range = {} post_launched_vm_num = 0 for id in common.VM_TYPES: if common.VM_TYPES[id] in scenario_cfg_lib.VM_DB and \ scenario_cfg_lib.VM_DB[common.VM_TYPES[id]]["load_type"] == "POST_LAUNCHED_VM": post_launched_vm_num += 1 hv_ram_size = common.HV_BASE_RAM_SIZE + common.POST_LAUNCHED_VM_RAM_SIZE * post_launched_vm_num ivshmem_enabled = common.get_node("//IVSHMEM_ENABLED/text()", scenario_etree) total_shm_size = 0 if ivshmem_enabled == 'y': raw_shmem_regions = scenario_etree.xpath("//IVSHMEM_REGION/text()") for raw_shm in raw_shmem_regions: if raw_shm.strip() == '': continue raw_shm_splited = raw_shm.split(',') if len(raw_shm_splited) == 3 and raw_shm_splited[0].strip() != '' \ and raw_shm_splited[1].strip() != '' and len(raw_shm_splited[2].strip().split(':')) >= 1: try: size = raw_shm_splited[1].strip() int_size = int(size) * 0x100000 total_shm_size += int_size except Exception as e: print(e) hv_ram_size += 2 * max(total_shm_size, 0x200000) assert (hv_ram_size <= HV_RAM_SIZE_MAX) # reseve 16M memory for hv sbuf, ramoops, etc. reserved_ram = 0x1000000 # We recommend to put hv ram start address high than 0x10000000 to # reduce memory conflict with GRUB/SOS Kernel. hv_start_offset = 0x10000000 total_size = reserved_ram + hv_ram_size for start_addr in list(board_cfg_lib.USED_RAM_RANGE): if hv_start_offset <= start_addr < 0x80000000: del board_cfg_lib.USED_RAM_RANGE[start_addr] ram_range = board_cfg_lib.get_ram_range() avl_start_addr = board_cfg_lib.find_avl_memory(ram_range, str(total_size), hv_start_offset) hv_start_addr = int(avl_start_addr, 16) + int(hex(reserved_ram), 16) hv_start_addr = common.round_up(hv_start_addr, MEM_ALIGN) board_cfg_lib.USED_RAM_RANGE[hv_start_addr] = total_size common.append_node("/acrn-config/hv/MEMORY/HV_RAM_START", hex(hv_start_addr), allocation_etree) common.append_node("/acrn-config/hv/MEMORY/HV_RAM_SIZE", hex(hv_ram_size), allocation_etree)
def find_hi_mmio_window(config): i_cnt = 0 mmio_min = 0 mmio_max = 0 is_hi_mmio = False iomem_lines = board_cfg_lib.get_info(common.BOARD_INFO_FILE, "<IOMEM_INFO>", "</IOMEM_INFO>") for line in iomem_lines: if "PCI Bus" not in line: continue line_start_addr = int(line.split('-')[0], 16) line_end_addr = int(line.split('-')[1].split()[0], 16) if line_start_addr < common.SIZE_4G and line_end_addr < common.SIZE_4G: continue elif line_start_addr < common.SIZE_4G and line_end_addr >= common.SIZE_4G: i_cnt += 1 is_hi_mmio = True mmio_min = common.SIZE_4G mmio_max = line_end_addr continue is_hi_mmio = True if i_cnt == 0: mmio_min = line_start_addr mmio_max = line_end_addr if mmio_max < line_end_addr: mmio_max = line_end_addr i_cnt += 1 print("", file=config) if is_hi_mmio: print("#define HI_MMIO_START\t\t\t0x%xUL" % common.round_down(mmio_min, common.SIZE_G), file=config) print("#define HI_MMIO_END\t\t\t0x%xUL" % common.round_up(mmio_max, common.SIZE_G), file=config) else: print("#define HI_MMIO_START\t\t\t~0UL", file=config) print("#define HI_MMIO_END\t\t\t0UL", file=config) print("#define HI_MMIO_SIZE\t\t\t{}UL".format( hex(board_cfg_lib.HI_MMIO_OFFSET)), file=config)
def fn(board_etree, scenario_etree, allocation_etree): # this dictonary mapped with 'address start':'mem range' ram_range = {} vm_count = common.count_nodes("//*[local-name() = 'vm']", scenario_etree) hv_ram_size = VM_NUM_MAP_TOTAL_HV_RAM_SIZE[vm_count] ivshmem_enabled = common.get_text("//IVSHMEM_ENABLED", scenario_etree) total_shm_size = 0 if ivshmem_enabled == 'y': raw_shmem_regions = scenario_etree.xpath("//IVSHMEM_REGION/text()") for raw_shm in raw_shmem_regions: if raw_shm.strip() == '': continue raw_shm_splited = raw_shm.split(',') if len(raw_shm_splited) == 3 and raw_shm_splited[0].strip() != '' \ and raw_shm_splited[1].strip() != '' and len(raw_shm_splited[2].strip().split(':')) >= 1: try: size = raw_shm_splited[1].strip() int_size = int(size) * 0x100000 total_shm_size += int_size except Exception as e: print(e) hv_ram_size += total_shm_size assert (hv_ram_size <= HV_RAM_SIZE_MAX) # reseve 16M memory for hv sbuf, ramoops, etc. reserved_ram = 0x1000000 # We recommend to put hv ram start address high than 0x10000000 to # reduce memory conflict with GRUB/SOS Kernel. hv_start_offset = 0x10000000 total_size = reserved_ram + hv_ram_size for start_addr in list(board_cfg_lib.USED_RAM_RANGE): if hv_start_offset <= start_addr < 0x80000000: del board_cfg_lib.USED_RAM_RANGE[start_addr] ram_range = board_cfg_lib.get_ram_range() avl_start_addr = board_cfg_lib.find_avl_memory(ram_range, str(total_size), hv_start_offset) hv_start_addr = int(avl_start_addr, 16) + int(hex(reserved_ram), 16) hv_start_addr = common.round_up(hv_start_addr, MEM_ALIGN) board_cfg_lib.USED_RAM_RANGE[hv_start_addr] = total_size common.append_node("/acrn-config/hv/MEMORY/HV_RAM_START", hex(hv_start_addr), allocation_etree) common.append_node("/acrn-config/hv/MEMORY/HV_RAM_SIZE", hex(hv_ram_size), allocation_etree)
def get_memory(hv_info, config): # this dictonary mapped with 'address start':'mem range' ram_range = {} if common.VM_COUNT in list(VM_NUM_MAP_TOTAL_HV_RAM_SIZE.keys()): hv_ram_size = VM_NUM_MAP_TOTAL_HV_RAM_SIZE[common.VM_COUNT] else: common.print_red("VM num should not be greater than 8", err=True) err_dic[ "board config: total vm number error"] = "VM num should not be greater than 8" return err_dic ram_range = get_ram_range() # reseve 16M memory for hv sbuf, ramoops, etc. reserved_ram = 0x1000000 # We recommend to put hv ram start address high than 0x10000000 to # reduce memory conflict with GRUB/SOS Kernel. hv_start_offset = 0x10000000 total_size = reserved_ram + hv_ram_size avl_start_addr = find_avl_memory(ram_range, str(total_size), hv_start_offset) hv_start_addr = int(avl_start_addr, 16) + int(hex(reserved_ram), 16) hv_start_addr = common.round_up(hv_start_addr, MEM_ALIGN) if not hv_info.mem.hv_ram_start: print("CONFIG_HV_RAM_START={}".format(hex(hv_start_addr)), file=config) else: print("CONFIG_HV_RAM_START={}".format(hv_info.mem.hv_ram_start), file=config) if not hv_info.mem.hv_ram_size: print("CONFIG_HV_RAM_SIZE={}".format(hex(hv_ram_size)), file=config) else: print("CONFIG_HV_RAM_SIZE={}".format(hv_info.mem.hv_ram_size), file=config) print("CONFIG_PLATFORM_RAM_SIZE={}".format(hv_info.mem.platform_ram_size), file=config) print("CONFIG_LOW_RAM_SIZE={}".format(hv_info.mem.low_ram_size), file=config) print("CONFIG_SOS_RAM_SIZE={}".format(hv_info.mem.sos_ram_size), file=config) print("CONFIG_UOS_RAM_SIZE={}".format(hv_info.mem.uos_ram_size), file=config) print("CONFIG_STACK_SIZE={}".format(hv_info.mem.stack_size), file=config)
def generate_file(config): """Start to generate board.c :param config: it is a file pointer of board information for writing to """ err_dic = {} # this dictonary mapped with 'address start':'mem range' ram_range = {} if common.VM_COUNT in list(VM_NUM_MAP_TOTAL_HV_RAM_SIZE.keys()): hv_ram_size = VM_NUM_MAP_TOTAL_HV_RAM_SIZE[common.VM_COUNT] else: common.print_red("VM num should not be greater than 8", err=True) err_dic[ "board config: total vm number error"] = "VM num should not be greater than 8" return err_dic ram_range = get_ram_range() # reseve 16M memory for hv sbuf, ramoops, etc. reserved_ram = 0x1000000 # We recommend to put hv ram start address high than 0x10000000 to # reduce memory conflict with GRUB/SOS Kernel. hv_start_offset = 0x10000000 total_size = reserved_ram + hv_ram_size avl_start_addr = find_avl_memory(ram_range, str(total_size), hv_start_offset) hv_start_addr = int(avl_start_addr, 16) + int(hex(reserved_ram), 16) hv_start_addr = common.round_up(hv_start_addr, MEM_ALIGN) # add config scenario name (err_dic, scenario_name) = common.get_scenario_name() print("{}".format(DESC), file=config) print("CONFIG_{}=y".format(scenario_name.upper()), file=config) print('CONFIG_BOARD="{}"'.format(board_cfg_lib.BOARD_NAME), file=config) (serial_type, serial_value) = get_serial_type() if serial_type == "portio": print("CONFIG_SERIAL_LEGACY=y", file=config) print("CONFIG_SERIAL_PIO_BASE={}".format(serial_value), file=config) if serial_type == "mmio": print("CONFIG_SERIAL_PCI=y", file=config) print('CONFIG_SERIAL_PCI_BDF="{}"'.format(serial_value), file=config) print("CONFIG_HV_RAM_START={}".format(hex(hv_start_addr)), file=config) print("CONFIG_HV_RAM_SIZE={}".format(hex(hv_ram_size)), file=config) cpu_core_num = len(board_cfg_lib.get_processor_info()) if scenario_name == "sdc" and cpu_core_num > 2: print("CONFIG_MAX_KATA_VM_NUM=1", file=config) else: if cpu_core_num == 2: print("# KATA VM is not supported on dual-core systems", file=config) print("CONFIG_MAX_KATA_VM_NUM=0", file=config) if is_rdt_supported(): print("CONFIG_RDT_ENABLED=y", file=config) else: print("CONFIG_RDT_ENABLED=n", file=config) print("CONFIG_ENFORCE_VALIDATED_ACPI_INFO=y", file=config) return err_dic
def get_memory(hv_info, config): # this dictonary mapped with 'address start':'mem range' ram_range = {} if common.VM_COUNT in list(VM_NUM_MAP_TOTAL_HV_RAM_SIZE.keys()): hv_ram_size = VM_NUM_MAP_TOTAL_HV_RAM_SIZE[common.VM_COUNT] else: common.print_red("VM num should not be greater than 8", err=True) err_dic[ "board config: total vm number error"] = "VM num should not be greater than 8" return err_dic ivshmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") total_shm_size = 0 if ivshmem_enabled == 'y': raw_shmem_regions = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") for raw_shm in raw_shmem_regions: if raw_shm is None or raw_shm.strip() == '': continue raw_shm_splited = raw_shm.split(',') if len(raw_shm_splited) == 3 and raw_shm_splited[0].strip() != '' \ and raw_shm_splited[1].strip() != '' and len(raw_shm_splited[2].strip().split(':')) >= 1: try: size = raw_shm_splited[1].strip() int_size = int(size) * 0x100000 total_shm_size += int_size except Exception as e: print(e) hv_ram_size += total_shm_size if hv_ram_size > HV_RAM_SIZE_MAX: common.print_red("requested RAM size should be smaller then {}".format( HV_RAM_SIZE_MAX), err=True) err_dic["board config: total vm number error"] = \ "requested RAM size should be smaller then {}".format(HV_RAM_SIZE_MAX) return err_dic # reseve 16M memory for hv sbuf, ramoops, etc. reserved_ram = 0x1000000 # We recommend to put hv ram start address high than 0x10000000 to # reduce memory conflict with GRUB/SOS Kernel. hv_start_offset = 0x10000000 total_size = reserved_ram + hv_ram_size for start_addr in list(board_cfg_lib.USED_RAM_RANGE): if hv_start_offset <= start_addr < 0x80000000: del board_cfg_lib.USED_RAM_RANGE[start_addr] ram_range = board_cfg_lib.get_ram_range() avl_start_addr = board_cfg_lib.find_avl_memory(ram_range, str(total_size), hv_start_offset) hv_start_addr = int(avl_start_addr, 16) + int(hex(reserved_ram), 16) hv_start_addr = common.round_up(hv_start_addr, MEM_ALIGN) board_cfg_lib.USED_RAM_RANGE[hv_start_addr] = total_size if not hv_info.mem.hv_ram_start: print("CONFIG_HV_RAM_START={}".format(hex(hv_start_addr)), file=config) else: print("CONFIG_HV_RAM_START={}".format(hv_info.mem.hv_ram_start), file=config) if not hv_info.mem.hv_ram_size: print("CONFIG_HV_RAM_SIZE={}".format(hex(hv_ram_size)), file=config) else: print("CONFIG_HV_RAM_SIZE={}".format(hv_info.mem.hv_ram_size), file=config) print("CONFIG_PLATFORM_RAM_SIZE={}".format(hv_info.mem.platform_ram_size), file=config) print("CONFIG_LOW_RAM_SIZE={}".format(hv_info.mem.low_ram_size), file=config) print("CONFIG_SOS_RAM_SIZE={}".format(hv_info.mem.sos_ram_size), file=config) print("CONFIG_UOS_RAM_SIZE={}".format(hv_info.mem.uos_ram_size), file=config) print("CONFIG_STACK_SIZE={}".format(hv_info.mem.stack_size), file=config) print("CONFIG_IVSHMEM_ENABLED={}".format(hv_info.mem.ivshmem_enable), file=config)
def generate_file(config): matching_mmios, non_matching_mmios = get_mmio_windows_with_key( ['PCI Bus 0000:00']) matching_mmios = removed_nested(matching_mmios, non_matching_mmios) non_matching_mmios = [ w for w in non_matching_mmios if any((w.overlaps(w2) for w2 in matching_mmios)) ] non_matching_mmios = merged_windows(non_matching_mmios) # list of all vmsix supported device list in bdf format bdf_list = board_cfg_lib.get_known_caps_pci_devs().get('VMSIX', []) # list of all PRE_LAUNCHED_VMs' vmsix supported passthrough devices in bdf format pci_items = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "pci_devs", "pci_dev") pci_devs = scenario_cfg_lib.get_pci_devs(pci_items) pci_devs_per_vm = get_devs_per_vm_with_key(pci_devs, bdf_list) # list SOS vmsix supported devices without other PRE_LAUNCHED_VMs' in bdf format sos_bdf_list = [ d for d in bdf_list if all((d not in pci_devs_per_vm[i] for i in pci_devs_per_vm)) ] for vm_i in pci_devs_per_vm: vm_type = common.VM_TYPES[vm_i] if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM": pci_devs_per_vm[vm_i] = sos_bdf_list mmiolist_per_vm = {} for vm_i, vm_type in common.VM_TYPES.items(): if vm_i not in mmiolist_per_vm.keys(): mmiolist_per_vm[vm_i] = [] if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM": mmiolist_per_vm[vm_i] = non_matching_mmios else: if vm_i in pci_devs.keys(): match, _ = get_mmio_windows_with_key(pci_devs[vm_i]) mmiolist_per_vm[vm_i] = match if scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "PRE_LAUNCHED_VM": if vm_i not in mmiolist_per_vm.keys(): mmiolist_per_vm[vm_i] = [] # TSN reserved region mmiolist_per_vm[vm_i].append( MmioWindow(start=0xffff0000, end=0xffffffff)) # For the pre-launched vm, if the TPM is passtrough, this address is used if vm_i == 0 and board_cfg_lib.is_tpm_passthru(): mmiolist_per_vm[vm_i].append( MmioWindow(start=0xfed40000, end=0xfed40000 + 0x5000 - 1)) # For the pre-launched vm o ehl-crb-b, if the p2sb is passtrough, this address is used if board_cfg_lib.is_matched_board(('ehl-crb-b')): p2sb_start = board_cfg_lib.find_p2sb_bar_addr() mmiolist_per_vm[vm_i].append( MmioWindow(start=p2sb_start, end=p2sb_start + 0x1000000 - 1)) mmiolist_per_vm[vm_i].sort() # start to generate board_info.h print("{0}".format(board_cfg_lib.HEADER_LICENSE), file=config) print(VBAR_INFO_DEFINE, file=config) common.get_vm_types() pre_vm = False sos_vm = False for vm_type in common.VM_TYPES.values(): if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "PRE_LAUNCHED_VM": pre_vm = True if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM": sos_vm = True if not pre_vm and not sos_vm: print(VBAR_INFO_ENDIF, file=config) return ivshmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") if ivshmem_enabled == 'y': for vm_id, vm_type in common.VM_TYPES.items(): free_bar = [] if scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "PRE_LAUNCHED_VM": board_cfg_lib.parse_mem() for shm_name, bar_attr_dic in board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic.items( ): index = shm_name[:shm_name.find('_')] i_cnt = 0 for bar_i, bar_attr in bar_attr_dic.items(): i_cnt += 1 if bar_i == 2: raw_shmem_regions = common.get_hv_item_tag( common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") for shm in raw_shmem_regions: if shm is None or shm.strip() == '': continue shm_splited = shm.split(',') name = shm_splited[0].strip() size = shm_splited[1].strip() try: int_size = int(size) * 0x100000 except: int_size = 0 bar_2 = int(bar_attr.addr, 16) mmiolist_per_vm[vm_id].append( MmioWindow(start=bar_2, end=bar_2 + int_size - 1)) mmiolist_per_vm[vm_id].sort() if bar_i == 0: bar_0 = MmioWindow(start=int(bar_attr.addr, 16), end=int(bar_attr.addr, 16) + 0x100 - 1) mmiolist_per_vm[vm_id].append(bar_0) mmiolist_per_vm[vm_id].sort() if len(bar_attr_dic.keys()) == 1: print("#define IVSHMEM_DEVICE_%-23s" % (str(index) + "_VBAR"), " .vbar_base[{}] = {}UL".format( bar_i, bar_attr.addr), file=config) else: print( "#define IVSHMEM_DEVICE_%-23s" % (str(index) + "_VBAR"), " .vbar_base[{}] = {}UL, \\".format( bar_i, bar_attr.addr), file=config) # vbar[1] for share memory is fix to 4K free_bar = get_free_mmio([MmioWindow(start=common.SIZE_2G, end=common.SIZE_4G-1)], \ mmiolist_per_vm[vm_id], BAR1_SHEMEM_ALIGNMENT + BAR1_SHEMEM_SIZE) free_bar_start_addr = common.round_up( free_bar.start, BAR1_SHEMEM_ALIGNMENT) free_bar_end_addr = free_bar_start_addr + BAR1_SHEMEM_SIZE - 1 free_bar = MmioWindow(free_bar_start_addr, free_bar_end_addr) mmiolist_per_vm[vm_id].append(free_bar) mmiolist_per_vm[vm_id].sort() print("{}.vbar_base[1] = {:#x}UL, \\".format( ' ' * 54, free_bar.start), file=config) elif i_cnt == len(bar_attr_dic.keys()): print("{}.vbar_base[{}] = {}UL".format( ' ' * 54, bar_i, bar_attr.addr), file=config) else: print("{}.vbar_base[{}] = {}UL, \\".format( ' ' * 54, bar_i, bar_attr.addr), file=config) print("", file=config) elif scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM": ivshmem_region = common.get_hv_item_tag( common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") shmem_regions = scenario_cfg_lib.get_shmem_regions( ivshmem_region) if vm_id not in shmem_regions.keys(): continue idx = 0 for shm in ivshmem_region: if shm is None or shm.strip() == '': continue shm_splited = shm.split(',') name = shm_splited[0].strip() size = shm_splited[1].strip() try: int_size = int(size) * 0x100000 except: int_size = 0 # vbar[0] for shared memory is 0x100 free_bar0 = get_free_mmio( matching_mmios, mmiolist_per_vm[vm_id], BAR0_SHEMEM_ALIGNMENT + BAR0_SHEMEM_SIZE) free_bar0_start_addr = common.round_up( free_bar0.start, BAR0_SHEMEM_ALIGNMENT) free_bar0_end_addr = free_bar0_start_addr + BAR0_SHEMEM_SIZE - 1 free_bar0 = MmioWindow(free_bar0_start_addr, free_bar0_end_addr) mmiolist_per_vm[vm_id].append(free_bar0) mmiolist_per_vm[vm_id].sort() # vbar[1] for shared memory is 4K free_bar1 = get_free_mmio( matching_mmios, mmiolist_per_vm[vm_id], BAR1_SHEMEM_ALIGNMENT + BAR1_SHEMEM_SIZE) free_bar1_start_addr = common.round_up( free_bar1.start, BAR1_SHEMEM_ALIGNMENT) free_bar1_end_addr = free_bar1_start_addr + BAR1_SHEMEM_SIZE - 1 free_bar1 = MmioWindow(free_bar1_start_addr, free_bar1_end_addr) mmiolist_per_vm[vm_id].append(free_bar1) mmiolist_per_vm[vm_id].sort() # vbar[2] for shared memory is specified size in MB free_bar2 = get_free_mmio(matching_mmios, mmiolist_per_vm[vm_id], BAR2_SHEMEM_ALIGNMENT + int_size) free_bar2_start_addr = common.round_up( free_bar2.start, BAR2_SHEMEM_ALIGNMENT) + 0xC free_bar2_end_addr = free_bar2_start_addr + int_size - 1 free_bar2 = MmioWindow(free_bar2_start_addr, free_bar2_end_addr) mmiolist_per_vm[vm_id].append(free_bar2) mmiolist_per_vm[vm_id].sort() print("#define SOS_IVSHMEM_DEVICE_%-19s" % (str(idx) + "_VBAR"), " .vbar_base[0] = {:#x}UL, \\".format( free_bar0.start), file=config) print("{}.vbar_base[1] = {:#x}UL, \\".format( ' ' * 54, free_bar1.start), file=config) print("{}.vbar_base[2] = {:#x}UL".format( ' ' * 54, free_bar2.start), file=config) print("", file=config) idx += 1 # Get passthrough devices vbar bases compared_bdf = [] for cnt_sub_name in board_cfg_lib.SUB_NAME_COUNT.keys(): i_cnt = 0 for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items( ): if cnt_sub_name == bar_attr.name and bdf not in compared_bdf: compared_bdf.append(bdf) else: continue write_vbar(i_cnt, bdf, board_cfg_lib.PCI_DEV_BAR_DESC.pci_bar_dic, bar_attr, \ pci_devs_per_vm, mmiolist_per_vm, matching_mmios, config) i_cnt += 1 write_vuart_vbar(mmiolist_per_vm, matching_mmios, config) print(VBAR_INFO_ENDIF, file=config)
def get_memory(hv_info, config): # this dictonary mapped with 'address start':'mem range' ram_range = {} post_launched_vm_num = 0 for id in common.VM_TYPES: if common.VM_TYPES[id] in scenario_cfg_lib.VM_DB and \ scenario_cfg_lib.VM_DB[common.VM_TYPES[id]]["load_type"] == "POST_LAUNCHED_VM": post_launched_vm_num += 1 hv_ram_size = common.HV_BASE_RAM_SIZE + common.POST_LAUNCHED_VM_RAM_SIZE * post_launched_vm_num ivshmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED") total_shm_size = 0 if ivshmem_enabled == 'y': raw_shmem_regions = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") for raw_shm in raw_shmem_regions: if raw_shm is None or raw_shm.strip() == '': continue raw_shm_splited = raw_shm.split(',') if len(raw_shm_splited) == 3 and raw_shm_splited[0].strip() != '' \ and raw_shm_splited[1].strip() != '' and len(raw_shm_splited[2].strip().split(':')) >= 1: try: size = raw_shm_splited[1].strip() int_size = int(size) * 0x100000 total_shm_size += int_size except Exception as e: print(e) hv_ram_size += 2 * max(total_shm_size, 0x200000) if hv_ram_size > HV_RAM_SIZE_MAX: common.print_red("requested RAM size should be smaller then {}".format( HV_RAM_SIZE_MAX), err=True) err_dic["board config: total vm number error"] = \ "requested RAM size should be smaller then {}".format(HV_RAM_SIZE_MAX) return err_dic # reseve 16M memory for hv sbuf, ramoops, etc. reserved_ram = 0x1000000 # We recommend to put hv ram start address high than 0x10000000 to # reduce memory conflict with GRUB/SOS Kernel. hv_start_offset = 0x10000000 total_size = reserved_ram + hv_ram_size for start_addr in list(board_cfg_lib.USED_RAM_RANGE): if hv_start_offset <= start_addr < 0x80000000: del board_cfg_lib.USED_RAM_RANGE[start_addr] ram_range = board_cfg_lib.get_ram_range() avl_start_addr = board_cfg_lib.find_avl_memory(ram_range, str(total_size), hv_start_offset) hv_start_addr = int(avl_start_addr, 16) + int(hex(reserved_ram), 16) hv_start_addr = common.round_up(hv_start_addr, MEM_ALIGN) board_cfg_lib.USED_RAM_RANGE[hv_start_addr] = total_size if not hv_info.mem.hv_ram_start: print("CONFIG_HV_RAM_START={}".format(hex(hv_start_addr)), file=config) else: print("CONFIG_HV_RAM_START={}".format(hv_info.mem.hv_ram_start), file=config) if not hv_info.mem.hv_ram_size: print("CONFIG_HV_RAM_SIZE={}".format(hex(hv_ram_size)), file=config) else: print("CONFIG_HV_RAM_SIZE={}".format(hv_info.mem.hv_ram_size), file=config) print("CONFIG_PLATFORM_RAM_SIZE={}".format(hv_info.mem.platform_ram_size), file=config) print("CONFIG_LOW_RAM_SIZE={}".format(hv_info.mem.low_ram_size), file=config) print("CONFIG_STACK_SIZE={}".format(hv_info.mem.stack_size), file=config) print("CONFIG_IVSHMEM_ENABLED={}".format(hv_info.mem.ivshmem_enable), file=config)
def round_up(addr, mem_align): """Keep memory align""" return common.round_up(addr, mem_align)
def write_vbar(i_cnt, bdf, pci_bar_dic, bar_attr, \ pci_devs_per_vm, mmiolist_per_vm, sos_mmio_range,config): """ Parser and generate vbar :param i_cnt: the number of pci devices have the same PCI sub class name :param bdf: it is a string what contains BDF :param pci_bar_dic: it is a dictionary of pci vbar for those BDF :param bar_attr: it is a class, contains PIC bar attribute :param config: it is a file pointer of pci information for writing to """ align = ' ' * 54 ptdev_mmio_str = '' tmp_sub_name = board_cfg_lib.get_sub_pci_name(i_cnt, bar_attr) if bdf in pci_bar_dic.keys(): bar_list = list(pci_bar_dic[bdf].keys()) bar_len = len(bar_list) bar_num = 0 bar_val = "" free = MmioWindow(0, 0) is_vmsix = False # If the device is vmsix device, find a free mmio window up to 4k size if board_cfg_lib.is_matched_board(('ehl-crb-b')): for vm_i in pci_devs_per_vm: if bdf in pci_devs_per_vm[vm_i]: if scenario_cfg_lib.VM_DB[common.VM_TYPES[vm_i]][ 'load_type'] == "PRE_LAUNCHED_VM": is_vmsix = True bar_len += 1 # For pre-launched VM, the windows range is form 2G to 4G free = get_free_mmio([MmioWindow(start=common.SIZE_2G, end=common.SIZE_4G-1)], \ mmiolist_per_vm[vm_i], VMSIX_VBAR_ALIGNMENT + VMSIX_VBAR_SIZE) free_vbar_start_addr = common.round_up( free.start, VMSIX_VBAR_ALIGNMENT) free_vbar_end_addr = free_vbar_start_addr + VMSIX_VBAR_SIZE - 1 free = MmioWindow(free_vbar_start_addr, free_vbar_end_addr) mmiolist_per_vm[vm_i].append(free) mmiolist_per_vm[vm_i].sort() break for bar_i in bar_list: if not bar_attr.remappable: print("/* TODO: add {} 64bit BAR support */".format( tmp_sub_name), file=config) bar_num += 1 bar_val = pci_bar_dic[bdf][bar_i].addr if pci_bar_dic[bdf][bar_i].remapped: ptdev_mmio_str = 'HI_MMIO_START + ' if bar_num == bar_len: if bar_len == 1: print("#define %-38s" % (tmp_sub_name+"_VBAR"), " .vbar_base[{}] = {}{}UL" \ .format(bar_i, ptdev_mmio_str, bar_val), file=config) else: print("{}.vbar_base[{}] = {}{}UL" \ .format(align, bar_i, ptdev_mmio_str, bar_val), file=config) elif bar_num == 1: print("#define %-38s" % (tmp_sub_name + "_VBAR"), " .vbar_base[{}] = {}{}UL, \\".format( bar_i, ptdev_mmio_str, bar_val), file=config) else: print("{}.vbar_base[{}] = {}{}UL, \\".format( align, bar_i, ptdev_mmio_str, bar_val), file=config) if is_vmsix: next_bar_idx = find_next_bar(bar_val, bar_list) print("{}.vbar_base[{}] = {}{}UL".format(align, next_bar_idx, ptdev_mmio_str, hex(free.start)), file=config) print("", file=config)
def write_ivshmem_vbar(mmiolist_per_vm, sos_mmio_range, config): for vm_id, vm_type in common.VM_TYPES.items(): ivshmem_region = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_REGION") shmem_regions = scenario_cfg_lib.get_shmem_regions(ivshmem_region) if vm_id not in shmem_regions: continue shmems = shmem_regions.get(vm_id) idx = 0 for shm in shmems: if shm is None or shm.strip() == '': continue shm_splited = shm.split(',') size = shm_splited[1].strip() try: int_size = int(size) * 0x100000 except: int_size = 0 if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SERVICE_VM": # vbar[0] for shared memory is 4k free_bar0 = get_free_mmio( sos_mmio_range, mmiolist_per_vm[vm_id], BAR0_SHEMEM_ALIGNMENT + BAR0_SHEMEM_SIZE) free_bar0_start_addr = common.round_up(free_bar0.start, BAR0_SHEMEM_ALIGNMENT) free_bar0_end_addr = free_bar0_start_addr + BAR0_SHEMEM_SIZE - 1 free_bar0 = MmioWindow(free_bar0_start_addr, free_bar0_end_addr) mmiolist_per_vm[vm_id].append(free_bar0) mmiolist_per_vm[vm_id].sort() # vbar[1] for shared memory is 4K free_bar1 = get_free_mmio( sos_mmio_range, mmiolist_per_vm[vm_id], BAR1_SHEMEM_ALIGNMENT + BAR1_SHEMEM_SIZE) free_bar1_start_addr = common.round_up(free_bar1.start, BAR1_SHEMEM_ALIGNMENT) free_bar1_end_addr = free_bar1_start_addr + BAR1_SHEMEM_SIZE - 1 free_bar1 = MmioWindow(free_bar1_start_addr, free_bar1_end_addr) mmiolist_per_vm[vm_id].append(free_bar1) mmiolist_per_vm[vm_id].sort() # vbar[2] for shared memory is specified size in MB free_bar2 = get_free_mmio(sos_mmio_range, mmiolist_per_vm[vm_id], BAR2_SHEMEM_ALIGNMENT + int_size) free_bar2_start_addr = common.round_up( free_bar2.start, BAR2_SHEMEM_ALIGNMENT) + 0xC free_bar2_end_addr = free_bar2_start_addr + int_size - 1 free_bar2 = MmioWindow(free_bar2_start_addr, free_bar2_end_addr) mmiolist_per_vm[vm_id].append(free_bar2) mmiolist_per_vm[vm_id].sort() print("#define SOS_IVSHMEM_DEVICE_%-19s" % (str(idx) + "_VBAR"), " .vbar_base[0] = {:#x}UL, \\".format( free_bar0.start), file=config) print("{}.vbar_base[1] = {:#x}UL, \\".format( ' ' * 54, free_bar1.start), file=config) print("{}.vbar_base[2] = {:#x}UL".format( ' ' * 54, free_bar2.start), file=config) print("", file=config) idx += 1 elif scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "PRE_LAUNCHED_VM": mmioRange = [ MmioWindow(start=common.SIZE_2G, end=common.SIZE_4G - 1) ] # vbar[0] for shared memory is 4k free_bar0 = get_free_mmio( mmioRange, mmiolist_per_vm[vm_id], BAR0_SHEMEM_ALIGNMENT + BAR0_SHEMEM_SIZE) free_bar0_start_addr = common.round_up(free_bar0.start, BAR0_SHEMEM_ALIGNMENT) free_bar0_end_addr = free_bar0_start_addr + BAR0_SHEMEM_SIZE - 1 free_bar0 = MmioWindow(free_bar0_start_addr, free_bar0_end_addr) mmiolist_per_vm[vm_id].append(free_bar0) mmiolist_per_vm[vm_id].sort() # vbar[1] for shared memory is 4K free_bar1 = get_free_mmio( mmioRange, mmiolist_per_vm[vm_id], BAR1_SHEMEM_ALIGNMENT + BAR1_SHEMEM_SIZE) free_bar1_start_addr = common.round_up(free_bar1.start, BAR1_SHEMEM_ALIGNMENT) free_bar1_end_addr = free_bar1_start_addr + BAR1_SHEMEM_SIZE - 1 free_bar1 = MmioWindow(free_bar1_start_addr, free_bar1_end_addr) mmiolist_per_vm[vm_id].append(free_bar1) mmiolist_per_vm[vm_id].sort() # vbar[2] for shared memory is specified size in MB free_bar2 = get_free_mmio(mmioRange, mmiolist_per_vm[vm_id], BAR2_SHEMEM_ALIGNMENT + int_size) free_bar2_start_addr = common.round_up( free_bar2.start, BAR2_SHEMEM_ALIGNMENT) + 0xC free_bar2_end_addr = free_bar2_start_addr + int_size - 1 free_bar2 = MmioWindow(free_bar2_start_addr, free_bar2_end_addr) mmiolist_per_vm[vm_id].append(free_bar2) mmiolist_per_vm[vm_id].sort() print("#define IVSHMEM_DEVICE_%-23s" % (str(idx) + "_VBAR"), " .vbar_base[0] = {:#x}UL, \\".format( free_bar0.start), file=config) print("{}.vbar_base[1] = {:#x}UL, \\".format( ' ' * 54, free_bar1.start), file=config) print("{}.vbar_base[2] = {:#x}UL".format( ' ' * 54, free_bar2.start), file=config) print("", file=config) idx += 1
def write_vuart_vbar(mmiolist_per_vm, sos_mmio_range, config): # get legacy vuart information vuart0_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 0) vuart1_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 1) # get pci vuart information vuarts = common.get_vuart_info(common.SCENARIO_INFO_FILE) for vm_id in vuarts.keys(): vm_type = common.VM_TYPES[vm_id] if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "POST_LAUNCHED_VM": continue for vuart_id in vuarts[vm_id].keys(): if vuarts[vm_id][vuart_id]['base'] == "INVALID_PCI_BASE": continue # Skip pci vuart 0 if the legacy vuart 0 is enabled if vuart_id == 0 and vm_id in vuart0_setting \ and vuart0_setting[vm_id]['base'] != "INVALID_COM_BASE": continue # Skip pci vuart 1 if the legacy vuart 1 is enabled if vuart_id == 1 and vm_id in vuart1_setting \ and vuart1_setting[vm_id]['base'] != "INVALID_COM_BASE": continue free_bar0 = [] free_bar1 = [] # vuart decice requires 2 bars if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SERVICE_VM": free_bar0 = get_free_mmio(sos_mmio_range, mmiolist_per_vm[vm_id], \ PCI_VUART_VBAR0_SIZE + PCI_VUART_VBAR0_ALIGNMENT) free_bar0_start_addr = common.round_up( free_bar0.start, PCI_VUART_VBAR0_ALIGNMENT) free_bar0_end_addr = free_bar0_start_addr + PCI_VUART_VBAR0_SIZE - 1 free_bar0 = MmioWindow(free_bar0_start_addr, free_bar0_end_addr) mmiolist_per_vm[vm_id].append(free_bar0) mmiolist_per_vm[vm_id].sort() free_bar1 = get_free_mmio(sos_mmio_range, mmiolist_per_vm[vm_id], \ PCI_VUART_VBAR1_SIZE + PCI_VUART_VBAR1_ALIGNMENT) free_bar1_start_addr = common.round_up( free_bar1.start, PCI_VUART_VBAR1_ALIGNMENT) free_bar1_end_addr = free_bar1_start_addr + PCI_VUART_VBAR1_SIZE - 1 free_bar1 = MmioWindow(free_bar1_start_addr, free_bar1_end_addr) mmiolist_per_vm[vm_id].append(free_bar1) mmiolist_per_vm[vm_id].sort() elif scenario_cfg_lib.VM_DB[vm_type][ 'load_type'] == "PRE_LAUNCHED_VM": free_bar0 = get_free_mmio([MmioWindow(start=common.SIZE_2G, end=common.SIZE_4G-1)], \ mmiolist_per_vm[vm_id], PCI_VUART_VBAR0_SIZE + PCI_VUART_VBAR0_ALIGNMENT) free_bar0_start_addr = common.round_up( free_bar0.start, PCI_VUART_VBAR0_ALIGNMENT) free_bar0_end_addr = free_bar0_start_addr + PCI_VUART_VBAR0_SIZE - 1 free_bar0 = MmioWindow(free_bar0_start_addr, free_bar0_end_addr) mmiolist_per_vm[vm_id].append(free_bar0) mmiolist_per_vm[vm_id].sort() free_bar1 = get_free_mmio([MmioWindow(start=common.SIZE_2G, end=common.SIZE_4G-1)], \ mmiolist_per_vm[vm_id], PCI_VUART_VBAR1_SIZE + PCI_VUART_VBAR1_ALIGNMENT) free_bar1_start_addr = common.round_up( free_bar1.start, PCI_VUART_VBAR1_ALIGNMENT) free_bar1_end_addr = free_bar1_start_addr + PCI_VUART_VBAR1_SIZE - 1 free_bar1 = MmioWindow(free_bar1_start_addr, free_bar1_end_addr) mmiolist_per_vm[vm_id].append(free_bar1) mmiolist_per_vm[vm_id].sort() print("#define VM%s" % (str(vm_id) + "_VUART_%-28s") % (str(vuart_id) + "_VBAR"), " .vbar_base[0] = {:#x}UL, \\".format(free_bar0.start), file=config) print("{}.vbar_base[1] = {:#x}UL".format(' ' * 54, free_bar1.start), file=config) print("", file=config)