コード例 #1
0
 def get_info(self):
     """
     Get all items which belong to this class
     :return: None
     """
     self.v0_vuart = common.get_vuart_info_id(self.scenario_info, 0)
     self.v1_vuart = common.get_vuart_info_id(self.scenario_info, 1)
コード例 #2
0
def get_pci_vuart_num(vuarts):

    vuarts_num = {}
    # get legacy vuart information
    vuart0_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 0)
    vuart1_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 1)
    for vm_i,vuart_list in vuarts.items():
        vuarts_num[vm_i] = 0
        for vuart_id in vuart_list:
            if vuarts[vm_i][vuart_id]['base'] != "INVALID_PCI_BASE":
                vuarts_num[vm_i] += 1

    for vm_i in vuart0_setting:
        load_order = common.LOAD_ORDER[vm_i]
        # Skip post-launched vm's pci base vuart0
        if "POST_LAUNCHED_VM" == load_order and 0 in vuarts[vm_i].keys() \
             and vuarts[vm_i][0]['base'] != "INVALID_PCI_BASE":
            vuarts_num[vm_i] -= 1
            continue
        # Skip pci vuart 0 if the legacy vuart 0 is enabled
        if vuart0_setting[vm_i]['base'] != "INVALID_COM_BASE" and 0 in vuarts[vm_i].keys() \
             and vuarts[vm_i][0]['base'] != "INVALID_PCI_BASE":
            vuarts_num[vm_i] -= 1
    for vm_i in vuart1_setting:
        # Skip pci vuart 1 if the legacy vuart 1 is enabled
        if vuart1_setting[vm_i]['base'] != "INVALID_COM_BASE" and 1 in vuarts[vm_i].keys() \
             and vuarts[vm_i][1]['base'] != "INVALID_PCI_BASE":
            vuarts_num[vm_i] -= 1
    return vuarts_num
コード例 #3
0
def get_vuart_info_id(config_file, idx):
    """
    Get vuart information by vuart id indexx
    :param config_file: it is a file what contains information for script to read from
    :param idx: vuart index in range: [0,1]
    :return: dictionary which stored the vuart-id
    """
    tmp_tag = common.get_vuart_info_id(config_file, idx)
    return tmp_tag
コード例 #4
0
def check_communication_vuart(launch_communication_vuarts, scenario_info):
    vuarts = common.get_vuart_info(scenario_info)
    vuart1_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 1)

    for uos_id, vuart_list in launch_communication_vuarts.items():
        vuart_key = 'uos:id={},communication_vuarts,communication_vuart'.format(uos_id)
        for vuart_id in vuart_list:
            if not vuart_id:
                return
            if int(vuart_id) not in vuarts[uos_id].keys():
                ERR_LIST[vuart_key] = "communication_vuart {} of uos {} should be configured" \
                     "in scenario setting.".format(vuart_id, uos_id)
                return
            if int(vuart_id) == 1 and vuarts[uos_id][1]['base'] != "INVALID_PCI_BASE":
                if uos_id in vuart1_setting.keys() and vuart1_setting[uos_id]['base'] != "INVALID_COM_BASE":
                    ERR_LIST[vuart_key] = "uos {}'s communication_vuart 1 and legacy_vuart 1 should " \
                        "not be configured at the same time.".format(uos_id)
                return
コード例 #5
0
ファイル: misc_cfg_h.py プロジェクト: sh1970/acrn-hypervisor
def generate_file(config):
    """
    Start to generate board.c
    :param config: it is a file pointer of board information for writing to
    """
    board_cfg_lib.get_valid_irq(common.BOARD_INFO_FILE)

    # get the vuart0/vuart1 which user chosed from scenario.xml of board_private section
    (err_dic, ttys_n) = board_cfg_lib.parser_hv_console()
    if err_dic:
        return err_dic

    # parse sos_bootargs/rootfs/console
    (err_dic, sos_cmdlines, sos_rootfs, vuart0_dic, vuart1_dic) = parse_boot_info()
    if err_dic:
        return err_dic

    if vuart0_dic:
        # parse to get poart/base of vuart0/vuart1
        vuart0_port_base = board_cfg_lib.LEGACY_TTYS[list(vuart0_dic.keys())[0]]
        vuart0_irq = vuart0_dic[list(vuart0_dic.keys())[0]]

    vuart1_port_base = board_cfg_lib.LEGACY_TTYS[list(vuart1_dic.keys())[0]]
    vuart1_irq = vuart1_dic[list(vuart1_dic.keys())[0]]

    # parse the setting ttys vuatx dic: {vmid:base/irq}
    vuart0_setting = Vuart()
    vuart1_setting = Vuart()
    vuart0_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 0)
    vuart1_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 1)

    # sos command lines information
    sos_cmdlines = [i for i in sos_cmdlines[0].split() if i != '']

    # get native rootfs list from board_info.xml
    (root_devs, root_dev_num) = board_cfg_lib.get_rootfs(common.BOARD_INFO_FILE)

    # start to generate misc_cfg.h
    print("{0}".format(board_cfg_lib.HEADER_LICENSE), file=config)
    print("{}".format(MISC_CFG_HEADER), file=config)
    print("", file=config)

    # define rootfs with macro
    #for i in range(root_dev_num):
    #    print('#define ROOTFS_{}\t\t"root={} "'.format(i, root_devs[i]), file=config)

    # sos rootfs and console
    if "SOS_VM" in common.VM_TYPES.values():
        print('#define SOS_ROOTFS\t\t"root={} "'.format(sos_rootfs[0]), file=config)
        if ttys_n:
            print('#define SOS_CONSOLE\t\t"console={} "'.format(ttys_n), file=config)
        else:
            print('#define SOS_CONSOLE\t\t" "', file=config)

    # sos com base/irq
    i_type = 0
    for vm_i,vm_type in common.VM_TYPES.items():
        if vm_type == "SOS_VM":
            i_type = vm_i
            break

    if "SOS_VM" in common.VM_TYPES.values():
        if vuart0_dic:
            print("#define SOS_COM1_BASE\t\t{}U".format(vuart0_port_base), file=config)
            print("#define SOS_COM1_IRQ\t\t{}U".format(vuart0_irq), file=config)
        else:
            print("#define SOS_COM1_BASE\t\t0U", file=config)
            print("#define SOS_COM1_IRQ\t\t0U", file=config)

        if vuart1_setting[i_type]['base'] != "INVALID_COM_BASE":
            print("#define SOS_COM2_BASE\t\t{}U".format(vuart1_port_base), file=config)
            print("#define SOS_COM2_IRQ\t\t{}U".format(vuart1_irq), file=config)

        # sos boot command line
        print("", file=config)

    if "SOS_VM" in common.VM_TYPES.values():
        sos_bootarg_diff(sos_cmdlines, config)
        print("", file=config)

    if board_cfg_lib.is_rdt_supported():
        print("", file=config)
        common_clos_max = board_cfg_lib.get_common_clos_max()
        max_cache_clos_entries = common_clos_max
        if board_cfg_lib.is_cdp_enabled():
            max_cache_clos_entries = 2 * common_clos_max
        print("#define MAX_CACHE_CLOS_NUM_ENTRIES\t{}U".format(max_cache_clos_entries), file=config)

        (rdt_resources, rdt_res_clos_max, _) = board_cfg_lib.clos_info_parser(common.BOARD_INFO_FILE)
        cat_mask_list = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "RDT", "CLOS_MASK")
        mba_delay_list = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "RDT", "MBA_DELAY")
        idx = 0
        for mba_delay_mask in mba_delay_list:
            print("#define MBA_MASK_{}\t\t\t{}U".format(idx, mba_delay_mask), file=config)
            idx += 1

        idx = 0
        for cat_mask in cat_mask_list:
            print("#define CLOS_MASK_{}\t\t\t{}U".format(idx, cat_mask), file=config)
            idx += 1
        print("", file=config)

    vm0_pre_launch = False
    common.get_vm_types()
    for vm_idx,vm_type in common.VM_TYPES.items():
        if vm_idx == 0 and scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "PRE_LAUNCHED_VM":
            vm0_pre_launch = True

    if vm0_pre_launch and board_cfg_lib.is_tpm_passthru():
        print("#define VM0_PASSTHROUGH_TPM", file=config)
        print("#define VM0_TPM_BUFFER_BASE_ADDR   0xFED40000UL", file=config)
        print("#define VM0_TPM_BUFFER_SIZE        0x5000UL", file=config)

        print("", file=config)

    print("{}".format(MISC_CFG_END), file=config)

    return err_dic
コード例 #6
0
def generate_file(config):
    """
    Start to generate board.c
    :param config: it is a file pointer of board information for writing to
    """
    board_cfg_lib.get_valid_irq(common.BOARD_INFO_FILE)

    # get the vuart0/vuart1 which user chosed from scenario.xml of board_private section
    (err_dic, ttys_n) = board_cfg_lib.parser_hv_console()
    if err_dic:
        return err_dic

    # parse sos_bootargs/rootfs/console
    (err_dic, sos_cmdlines, sos_rootfs, vuart0_dic,
     vuart1_dic) = parse_boot_info()
    if err_dic:
        return err_dic

    if vuart0_dic:
        # parse to get poart/base of vuart0/vuart1
        vuart0_port_base = board_cfg_lib.LEGACY_TTYS[list(
            vuart0_dic.keys())[0]]
        vuart0_irq = vuart0_dic[list(vuart0_dic.keys())[0]]

    vuart1_port_base = board_cfg_lib.LEGACY_TTYS[list(vuart1_dic.keys())[0]]
    vuart1_irq = vuart1_dic[list(vuart1_dic.keys())[0]]

    # parse the setting ttys vuatx dic: {vmid:base/irq}
    vuart0_setting = Vuart()
    vuart1_setting = Vuart()
    vuart0_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 0)
    vuart1_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 1)

    # sos command lines information
    sos_cmdlines = [i for i in sos_cmdlines[0].split() if i != '']

    # add maxcpus parameter into sos cmdlines if there are pre-launched VMs
    pcpu_list = board_cfg_lib.get_processor_info()
    cpu_affinity = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE,
                                           "cpu_affinity", "pcpu_id")
    pre_cpu_list = []
    sos_cpu_num = 0
    for vmid, cpu_list in cpu_affinity.items():
        if vmid in common.VM_TYPES and cpu_list != [None]:
            vm_type = common.VM_TYPES[vmid]
            load_type = ''
            if vm_type in scenario_cfg_lib.VM_DB:
                load_type = scenario_cfg_lib.VM_DB[vm_type]['load_type']
            if load_type == "PRE_LAUNCHED_VM":
                pre_cpu_list += cpu_list
            elif load_type == "SOS_VM":
                sos_cpu_num += len(cpu_list)
    if sos_cpu_num == 0:
        sos_cpu_num_max = len(list(set(pcpu_list) - set(pre_cpu_list)))
    else:
        sos_cpu_num_max = sos_cpu_num
    if sos_cpu_num_max > 0:
        sos_cmdlines.append('maxcpus=' + str(sos_cpu_num_max))

    # get native rootfs list from board_info.xml
    (root_devs,
     root_dev_num) = board_cfg_lib.get_rootfs(common.BOARD_INFO_FILE)

    # start to generate misc_cfg.h
    print("{0}".format(board_cfg_lib.HEADER_LICENSE), file=config)
    print("{}".format(MISC_CFG_HEADER), file=config)
    print("", file=config)

    # define rootfs with macro
    #for i in range(root_dev_num):
    #    print('#define ROOTFS_{}\t\t"root={} "'.format(i, root_devs[i]), file=config)

    # sos rootfs and console
    if "SOS_VM" in common.VM_TYPES.values():
        print('#define SOS_ROOTFS\t\t"root={} "'.format(sos_rootfs[0]),
              file=config)
        if ttys_n:
            print('#define SOS_CONSOLE\t\t"console={} "'.format(ttys_n),
                  file=config)
        else:
            print('#define SOS_CONSOLE\t\t" "', file=config)

    # sos com base/irq
    i_type = 0
    for vm_i, vm_type in common.VM_TYPES.items():
        if vm_type == "SOS_VM":
            i_type = vm_i
            break

    if "SOS_VM" in common.VM_TYPES.values():
        if vuart0_dic:
            print("#define SOS_COM1_BASE\t\t{}U".format(vuart0_port_base),
                  file=config)
            print("#define SOS_COM1_IRQ\t\t{}U".format(vuart0_irq),
                  file=config)
        else:
            print("#define SOS_COM1_BASE\t\t0U", file=config)
            print("#define SOS_COM1_IRQ\t\t0U", file=config)

        if vuart1_setting[i_type]['base'] != "INVALID_COM_BASE":
            print("#define SOS_COM2_BASE\t\t{}U".format(vuart1_port_base),
                  file=config)
            print("#define SOS_COM2_IRQ\t\t{}U".format(vuart1_irq),
                  file=config)

        # sos boot command line
        print("", file=config)

    if "SOS_VM" in common.VM_TYPES.values():
        sos_bootarg_diff(sos_cmdlines, config)
        print("", file=config)

    cpu_affinity_per_vm_gen(config)

    common_clos_max = board_cfg_lib.get_common_clos_max()
    max_mba_clos_entries = common_clos_max
    max_cache_clos_entries = common_clos_max

    comments_max_clos = '''
/*
 * The maximum CLOS that is allowed by ACRN hypervisor,
 * its value is set to be least common Max CLOS (CPUID.(EAX=0x10,ECX=ResID):EDX[15:0])
 * among all supported RDT resources in the platform. In other words, it is
 * min(maximum CLOS of L2, L3 and MBA). This is done in order to have consistent
 * CLOS allocations between all the RDT resources.
 */'''

    comments_max_mba_clos = '''
/*
 * Max number of Cache Mask entries corresponding to each CLOS.
 * This can vary if CDP is enabled vs disabled, as each CLOS entry
 * will have corresponding cache mask values for Data and Code when
 * CDP is enabled.
 */'''

    comments_max_cache_clos = '''
/* Max number of MBA delay entries corresponding to each CLOS. */'''

    if board_cfg_lib.is_cdp_enabled():
        max_cache_clos_entries_cdp_enable = 2 * common_clos_max
        (res_info, rdt_res_clos_max,
         clos_max_mask_list) = board_cfg_lib.clos_info_parser(
             common.BOARD_INFO_FILE)
        common_clos_max_cdp_disable = min(rdt_res_clos_max)

        print("#ifdef CONFIG_RDT_ENABLED", file=config)
        print("#ifdef CONFIG_CDP_ENABLED", file=config)
        print(comments_max_clos, file=config)
        print("#define HV_SUPPORTED_MAX_CLOS\t{}U".format(common_clos_max),
              file=config)

        print(comments_max_cache_clos, file=config)
        print("#define MAX_CACHE_CLOS_NUM_ENTRIES\t{}U".format(
            max_cache_clos_entries_cdp_enable),
              file=config)

        print("#else", file=config)
        print(comments_max_clos, file=config)
        print("#define HV_SUPPORTED_MAX_CLOS\t{}U".format(
            common_clos_max_cdp_disable),
              file=config)

        print(comments_max_cache_clos, file=config)
        print("#define MAX_CACHE_CLOS_NUM_ENTRIES\t{}U".format(
            max_cache_clos_entries),
              file=config)
        print("#endif", file=config)

        print(comments_max_mba_clos, file=config)
        print("#define MAX_MBA_CLOS_NUM_ENTRIES\t{}U".format(
            max_mba_clos_entries),
              file=config)
    else:
        print("#ifdef CONFIG_RDT_ENABLED", file=config)
        print(comments_max_clos, file=config)
        print("#define HV_SUPPORTED_MAX_CLOS\t{}U".format(common_clos_max),
              file=config)

        print(comments_max_mba_clos, file=config)
        print("#define MAX_MBA_CLOS_NUM_ENTRIES\t{}U".format(
            max_mba_clos_entries),
              file=config)

        print(comments_max_cache_clos, file=config)
        print("#define MAX_CACHE_CLOS_NUM_ENTRIES\t{}U".format(
            max_cache_clos_entries),
              file=config)
        if not board_cfg_lib.is_rdt_supported():
            print("#endif", file=config)

    print("", file=config)

    if board_cfg_lib.is_rdt_supported():
        (rdt_resources, rdt_res_clos_max,
         _) = board_cfg_lib.clos_info_parser(common.BOARD_INFO_FILE)
        cat_mask_list = common.get_hv_item_tag(common.SCENARIO_INFO_FILE,
                                               "FEATURES", "RDT", "CLOS_MASK")
        mba_delay_list = common.get_hv_item_tag(common.SCENARIO_INFO_FILE,
                                                "FEATURES", "RDT", "MBA_DELAY")
        idx = 0
        for mba_delay_mask in mba_delay_list:
            print("#define MBA_MASK_{}\t\t\t{}U".format(idx, mba_delay_mask),
                  file=config)
            idx += 1

        idx = 0
        for cat_mask in cat_mask_list:
            print("#define CLOS_MASK_{}\t\t\t{}U".format(idx, cat_mask),
                  file=config)
            idx += 1
        print("", file=config)

        clos_per_vm_gen(config)
        print("#endif", file=config)
        print("", file=config)

    vm0_pre_launch = False
    common.get_vm_types()
    for vm_idx, vm_type in common.VM_TYPES.items():
        if vm_idx == 0 and scenario_cfg_lib.VM_DB[vm_type][
                'load_type'] == "PRE_LAUNCHED_VM":
            vm0_pre_launch = True

    if vm0_pre_launch and board_cfg_lib.is_tpm_passthru():
        tpm2_passthru_enabled = common.get_leaf_tag_map_bool(
            common.SCENARIO_INFO_FILE, "mmio_resources", "TPM2")
        if 0 in tpm2_passthru_enabled and tpm2_passthru_enabled[0]:
            print("#define VM0_PASSTHROUGH_TPM", file=config)
            print("#define VM0_TPM_BUFFER_BASE_ADDR   0xFED40000UL",
                  file=config)
            gpa = common.hpa2gpa(0, 0xFED40000, 0x5000)
            print(
                "#define VM0_TPM_BUFFER_BASE_ADDR_GPA   0x{:X}UL".format(gpa),
                file=config)
            print("#define VM0_TPM_BUFFER_SIZE        0x5000UL", file=config)
            print("", file=config)

    pci_dev_num_per_vm_gen(config)

    boot_args_per_vm_gen(config)

    pt_intx_num_vm0_gen(config)

    swsram_base_gpa_gen(config)

    print("{}".format(MISC_CFG_END), file=config)

    return err_dic
コード例 #7
0
def get_vuart1_from_scenario(vmid):
    """Get the vmid's  vuart1 base"""
    vuart1 = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 1)
    return vuart1[vmid]['base']
コード例 #8
0
def generate_file(config):
    """
    Start to generate board.c
    :param config: it is a file pointer of board information for writing to
    """
    board_cfg_lib.get_valid_irq(common.BOARD_INFO_FILE)

    # get cpu processor list
    cpu_list = board_cfg_lib.get_processor_info()
    max_cpu_num = len(cpu_list)

    # get the vuart0/vuart1 which user chosed from scenario.xml of board_private section
    (err_dic, ttys_n) = board_cfg_lib.parser_hv_console()
    if err_dic:
        return err_dic

    # parse sos_bootargs/rootfs/console
    (err_dic, sos_cmdlines, sos_rootfs, vuart0_dic, vuart1_dic) = parse_boot_info()
    if err_dic:
        return err_dic

    if vuart0_dic:
        # parse to get poart/base of vuart0/vuart1
        vuart0_port_base = board_cfg_lib.LEGACY_TTYS[list(vuart0_dic.keys())[0]]
        vuart0_irq = vuart0_dic[list(vuart0_dic.keys())[0]]

    vuart1_port_base = board_cfg_lib.LEGACY_TTYS[list(vuart1_dic.keys())[0]]
    vuart1_irq = vuart1_dic[list(vuart1_dic.keys())[0]]

    # parse the setting ttys vuatx dic: {vmid:base/irq}
    vuart0_setting = Vuart()
    vuart1_setting = Vuart()
    vuart0_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 0)
    vuart1_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 1)

    # sos command lines information
    sos_cmdlines = [i for i in sos_cmdlines[0].split() if i != '']

    # get native rootfs list from board_info.xml
    (root_devs, root_dev_num) = board_cfg_lib.get_rootfs(common.BOARD_INFO_FILE)

    # start to generate misc_cfg.h
    print("{0}".format(board_cfg_lib.HEADER_LICENSE), file=config)
    print("{}".format(MISC_CFG_HEADER), file=config)

    # define CONFIG_MAX_PCPCU_NUM
    print("#define MAX_PCPU_NUM\t{}U".format(max_cpu_num), file=config)

    # set macro of max clos number
    (_, clos_max, _) = board_cfg_lib.clos_info_parser(common.BOARD_INFO_FILE)
    if len(clos_max) != 0:
        common_clos_max = min(clos_max)
    else:
        common_clos_max = 0

    print("#define MAX_PLATFORM_CLOS_NUM\t{}U".format(common_clos_max), file=config)


    # define rootfs with macro
    for i in range(root_dev_num):
        print('#define ROOTFS_{}\t\t"root={} "'.format(i, root_devs[i]), file=config)

    # sos rootfs and console
    print("", file=config)
    if "SOS_VM" in common.VM_TYPES.values():
        print('#define SOS_ROOTFS\t\t"root={} "'.format(sos_rootfs[0]), file=config)
        if ttys_n:
            print('#define SOS_CONSOLE\t\t"console={} "'.format(ttys_n), file=config)
        else:
            print('#define SOS_CONSOLE\t\t" "', file=config)

    # sos com base/irq
    i_type = 0
    for vm_i,vm_type in common.VM_TYPES.items():
        if vm_type == "SOS_VM":
            i_type = vm_i
            break

    if "SOS_VM" in common.VM_TYPES.values():
        if vuart0_dic:
            print("#define SOS_COM1_BASE\t\t{}U".format(vuart0_port_base), file=config)
            print("#define SOS_COM1_IRQ\t\t{}U".format(vuart0_irq), file=config)
        else:
            print("#define SOS_COM1_BASE\t\t0U", file=config)
            print("#define SOS_COM1_IRQ\t\t0U", file=config)

        if vuart1_setting[i_type]['base'] != "INVALID_COM_BASE":
            print("#define SOS_COM2_BASE\t\t{}U".format(vuart1_port_base), file=config)
            print("#define SOS_COM2_IRQ\t\t{}U".format(vuart1_irq), file=config)

    # sos boot command line
    print("", file=config)
    if "SOS_VM" in common.VM_TYPES.values():
        sos_bootarg_diff(sos_cmdlines, config)

    # set macro for HIDDEN PTDEVS
    print("", file=config)
    if board_cfg_lib.BOARD_NAME in list(board_cfg_lib.KNOWN_HIDDEN_PDEVS_BOARD_DB):
        print("#define MAX_HIDDEN_PDEVS_NUM	{}U".format(len(board_cfg_lib.KNOWN_HIDDEN_PDEVS_BOARD_DB[board_cfg_lib.BOARD_NAME])), file=config)
    else:
        print("#define MAX_HIDDEN_PDEVS_NUM	0U", file=config)

    # generate HI_MMIO_START/HI_MMIO_END
    find_hi_mmio_window(config)

    print("", file=config)

    print("{}".format(MISC_CFG_END), file=config)

    return err_dic
コード例 #9
0
def write_vuart_vbar(mmiolist_per_vm, sos_mmio_range, config):
    # get legacy vuart information
    vuart0_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 0)
    vuart1_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 1)
    # get pci vuart information
    vuarts = common.get_vuart_info(common.SCENARIO_INFO_FILE)
    for vm_id in vuarts.keys():
        vm_type = common.VM_TYPES[vm_id]
        if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "POST_LAUNCHED_VM":
            continue
        for vuart_id in vuarts[vm_id].keys():
            if vuarts[vm_id][vuart_id]['base'] == "INVALID_PCI_BASE":
                continue
            # Skip pci vuart 0 if the legacy vuart 0 is enabled
            if vuart_id == 0 and vm_id in vuart0_setting \
                 and vuart0_setting[vm_id]['base'] != "INVALID_COM_BASE":
                continue
            # Skip pci vuart 1 if the legacy vuart 1 is enabled
            if vuart_id == 1 and vm_id in vuart1_setting \
                 and vuart1_setting[vm_id]['base'] != "INVALID_COM_BASE":
                continue
            free_bar0 = []
            free_bar1 = []
            # vuart decice requires 2 bars
            if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SERVICE_VM":
                free_bar0 = get_free_mmio(sos_mmio_range, mmiolist_per_vm[vm_id], \
                             PCI_VUART_VBAR0_SIZE + PCI_VUART_VBAR0_ALIGNMENT)
                free_bar0_start_addr = common.round_up(
                    free_bar0.start, PCI_VUART_VBAR0_ALIGNMENT)
                free_bar0_end_addr = free_bar0_start_addr + PCI_VUART_VBAR0_SIZE - 1
                free_bar0 = MmioWindow(free_bar0_start_addr,
                                       free_bar0_end_addr)
                mmiolist_per_vm[vm_id].append(free_bar0)
                mmiolist_per_vm[vm_id].sort()
                free_bar1 = get_free_mmio(sos_mmio_range, mmiolist_per_vm[vm_id], \
                             PCI_VUART_VBAR1_SIZE + PCI_VUART_VBAR1_ALIGNMENT)
                free_bar1_start_addr = common.round_up(
                    free_bar1.start, PCI_VUART_VBAR1_ALIGNMENT)
                free_bar1_end_addr = free_bar1_start_addr + PCI_VUART_VBAR1_SIZE - 1
                free_bar1 = MmioWindow(free_bar1_start_addr,
                                       free_bar1_end_addr)
                mmiolist_per_vm[vm_id].append(free_bar1)
                mmiolist_per_vm[vm_id].sort()
            elif scenario_cfg_lib.VM_DB[vm_type][
                    'load_type'] == "PRE_LAUNCHED_VM":
                free_bar0 = get_free_mmio([MmioWindow(start=common.SIZE_2G, end=common.SIZE_4G-1)], \
                                mmiolist_per_vm[vm_id], PCI_VUART_VBAR0_SIZE + PCI_VUART_VBAR0_ALIGNMENT)
                free_bar0_start_addr = common.round_up(
                    free_bar0.start, PCI_VUART_VBAR0_ALIGNMENT)
                free_bar0_end_addr = free_bar0_start_addr + PCI_VUART_VBAR0_SIZE - 1
                free_bar0 = MmioWindow(free_bar0_start_addr,
                                       free_bar0_end_addr)
                mmiolist_per_vm[vm_id].append(free_bar0)
                mmiolist_per_vm[vm_id].sort()
                free_bar1 = get_free_mmio([MmioWindow(start=common.SIZE_2G, end=common.SIZE_4G-1)], \
                                mmiolist_per_vm[vm_id], PCI_VUART_VBAR1_SIZE + PCI_VUART_VBAR1_ALIGNMENT)
                free_bar1_start_addr = common.round_up(
                    free_bar1.start, PCI_VUART_VBAR1_ALIGNMENT)
                free_bar1_end_addr = free_bar1_start_addr + PCI_VUART_VBAR1_SIZE - 1
                free_bar1 = MmioWindow(free_bar1_start_addr,
                                       free_bar1_end_addr)
                mmiolist_per_vm[vm_id].append(free_bar1)
                mmiolist_per_vm[vm_id].sort()
            print("#define VM%s" % (str(vm_id) + "_VUART_%-28s") %
                  (str(vuart_id) + "_VBAR"),
                  "       .vbar_base[0] = {:#x}UL, \\".format(free_bar0.start),
                  file=config)
            print("{}.vbar_base[1] = {:#x}UL".format(' ' * 54,
                                                     free_bar1.start),
                  file=config)
            print("", file=config)
コード例 #10
0
def generate_file(vm_info, config):
    """
    Generate pci_dev.c for Pre-Launched VMs in a scenario.
    :param config: it is pointer for for file write to
    :return: None
    """
    board_cfg_lib.parser_pci()
    board_cfg_lib.parse_mem()

    compared_bdf = []
    sos_used_bdf = []

    for cnt_sub_name in board_cfg_lib.SUB_NAME_COUNT.keys():
        i_cnt = 0
        for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items(
        ):
            if cnt_sub_name == bar_attr.name and bdf not in compared_bdf:
                compared_bdf.append(bdf)
            else:
                continue

            add_instance_to_name(i_cnt, bdf, bar_attr)

            i_cnt += 1

    for bdf in compared_bdf:
        bdf_tuple = BusDevFunc.from_str(bdf)
        sos_used_bdf.append(bdf_tuple)
    bdf_tuple = BusDevFunc(bus=0, dev=1, func=0)
    sos_used_bdf.append(bdf_tuple)

    vuarts = common.get_vuart_info(common.SCENARIO_INFO_FILE)
    vuarts_num = scenario_cfg_lib.get_vuart_num(vuarts)
    pci_vuart_enabled = False
    for vm_i in common.VM_TYPES:
        if vuarts_num[vm_i] > 0:
            pci_vuart_enabled = True
            break

    print("{}".format(scenario_cfg_lib.HEADER_LICENSE), file=config)
    print("", file=config)
    print("#include <vm_config.h>", file=config)
    print("#include <pci_devices.h>", file=config)
    print("#include <vpci.h>", file=config)
    print("#include <vbar_base.h>", file=config)
    print("#include <mmu.h>", file=config)
    print("#include <page.h>", file=config)
    if pci_vuart_enabled:
        print("#include <vmcs9900.h>", file=config)
    # Insert header for share memory
    if vm_info.shmem.shmem_enabled == 'y':
        print("#include <ivshmem_cfg.h>", file=config)

    # Insert comments and macros for passthrough devices
    if any((p for _, p in vm_info.cfg_pci.pci_devs.items())):
        print("", file=config)
        print("/*", file=config)
        print(
            " * TODO: remove PTDEV macro and add DEV_PRIVINFO macro to initialize pbdf for",
            file=config)
        print(
            " * passthrough device configuration and shm_name for ivshmem device configuration.",
            file=config)
        print(" */", file=config)
        print("#define PTDEV(PCI_DEV)\t\tPCI_DEV, PCI_DEV##_VBAR", file=config)
        print("", file=config)
        print("/*", file=config)
        print(
            " * TODO: add DEV_PCICOMMON macro to initialize emu_type, vbdf and vdev_ops",
            file=config)
        print(" * to simplify the code.", file=config)
        print(" */", file=config)
    if pci_vuart_enabled:
        print("#define INVALID_PCI_BASE\t0U", file=config)

    for vm_i, vm_type in common.VM_TYPES.items():
        vm_used_bdf = []
        # Skip this vm if there is no any pci device and virtual device
        if not scenario_cfg_lib.get_pci_dev_num_per_vm()[vm_i] and \
             scenario_cfg_lib.VM_DB[vm_type]['load_type'] != "SOS_VM":
            continue
        if not scenario_cfg_lib.get_pci_dev_num_per_vm()[vm_i] and \
             scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM":
            print("", file=config)
            print("struct acrn_vm_pci_dev_config " +
                  "sos_pci_devs[CONFIG_MAX_PCI_DEV_NUM];",
                  file=config)
            continue

        pci_cnt = 1
        # Insert device structure and bracket
        print("", file=config)
        if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM":
            print("struct acrn_vm_pci_dev_config " +
                  "sos_pci_devs[CONFIG_MAX_PCI_DEV_NUM] = {",
                  file=config)
        else:
            print("struct acrn_vm_pci_dev_config " +
                  "vm{}_pci_devs[VM{}_CONFIG_PCI_DEV_NUM] = {{".format(
                      vm_i, vm_i),
                  file=config)

        # Insert passtrough devices data
        if vm_i in vm_info.cfg_pci.pci_devs.keys():
            pci_bdf_devs_list = vm_info.cfg_pci.pci_devs[vm_i]
            if pci_bdf_devs_list:
                # Insert pci hostbridge for passtrough devices:
                if pci_cnt == 1:
                    print("\t{", file=config)
                    print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]),
                          file=config)
                    print(
                        "\t\t.vbdf.bits = {.b = 0x00U, .d = 0x00U, .f = 0x00U},",
                        file=config)
                    print("\t\t.vdev_ops = &vhostbridge_ops,", file=config)
                    print("\t},", file=config)
                    bdf_tuple = BusDevFunc.from_str("00:00.0")
                    vm_used_bdf.append(bdf_tuple)

                for pci_bdf_dev in pci_bdf_devs_list:
                    if not pci_bdf_dev:
                        continue
                    bus = int(pci_bdf_dev.split(':')[0], 16)
                    dev = int(pci_bdf_dev.split(':')[1].split('.')[0], 16)
                    fun = int(pci_bdf_dev.split('.')[1], 16)
                    print("\t{", file=config)
                    print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[1]),
                          file=config)
                    print(
                        "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02x}U, .f = 0x00U}},"
                        .format(pci_cnt),
                        file=config)
                    for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items(
                    ):
                        if bdf == pci_bdf_dev:
                            print("\t\tPTDEV({}),".format(
                                board_cfg_lib.PCI_DEV_BAR_DESC.
                                pci_dev_dic[bdf].name_w_i_cnt),
                                  file=config)
                        else:
                            continue
                    print("\t},", file=config)
                    bdf_tuple = BusDevFunc(0, pci_cnt, 0)
                    vm_used_bdf.append(bdf_tuple)
                    pci_cnt += 1

        # Insert ivshmem information
        if vm_info.shmem.shmem_enabled == 'y' and vm_i in vm_info.shmem.shmem_regions.keys() \
             and len(vm_info.shmem.shmem_regions[vm_i]) > 0:
            raw_shm_list = vm_info.shmem.shmem_regions[vm_i]
            for shm in raw_shm_list:
                shm_splited = shm.split(',')
                print("\t{", file=config)
                print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]),
                      file=config)
                if vm_i in vm_info.cfg_pci.pci_devs.keys():
                    if scenario_cfg_lib.VM_DB[vm_type][
                            'load_type'] == "SOS_VM":
                        free_bdf = find_unused_bdf(sos_used_bdf, "ivshmem")
                        print("\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{:02x}U, .f = 0x{:02x}U}}," \
                             .format(free_bdf.dev,free_bdf.func), file=config)
                        print("\t\t.vdev_ops = &vpci_ivshmem_ops,",
                              file=config)
                        sos_used_bdf.append(free_bdf)
                    else:
                        print(
                            "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02x}U, .f = 0x00U}},"
                            .format(pci_cnt),
                            file=config)
                        print("\t\t.vdev_ops = &vpci_ivshmem_ops,",
                              file=config)
                        bdf_tuple = BusDevFunc(0, pci_cnt, 0)
                        vm_used_bdf.append(bdf_tuple)
                elif vm_i not in vm_info.cfg_pci.pci_devs.keys():
                    if scenario_cfg_lib.VM_DB[vm_type][
                            'load_type'] == "PRE_LAUNCHED_VM":
                        print(
                            "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02x}U, .f = 0x00U}},"
                            .format(pci_cnt),
                            file=config)
                        bdf_tuple = BusDevFunc(0, pci_cnt, 0)
                        vm_used_bdf.append(bdf_tuple)
                    elif scenario_cfg_lib.VM_DB[vm_type][
                            'load_type'] == "POST_LAUNCHED_VM":
                        print("\t\t.vbdf.value = UNASSIGNED_VBDF,",
                              file=config)
                    print("\t\t.vdev_ops = &vpci_ivshmem_ops,", file=config)
                for shm_name, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic.items(
                ):
                    index = shm_name[:shm_name.find('_')]
                    shm_name = shm_name[shm_name.find('_') + 1:]
                    if shm_name == shm_splited[0].strip():
                        if scenario_cfg_lib.VM_DB[vm_type][
                                'load_type'] == "PRE_LAUNCHED_VM":
                            print(
                                "\t\t.shm_region_name = IVSHMEM_SHM_REGION_{},"
                                .format(index),
                                file=config)
                            print("\t\tIVSHMEM_DEVICE_{}_VBAR".format(index),
                                  file=config)
                            break
                        elif scenario_cfg_lib.VM_DB[vm_type][
                                'load_type'] == "SOS_VM":
                            print(
                                "\t\t.shm_region_name = IVSHMEM_SHM_REGION_{},"
                                .format(index),
                                file=config)
                            print(
                                "\t\tSOS_IVSHMEM_DEVICE_{}_VBAR".format(index),
                                file=config)
                            break
                        else:
                            print(
                                "\t\t.shm_region_name = IVSHMEM_SHM_REGION_{}".
                                format(index),
                                file=config)
                            break
                pci_cnt += 1
                print("\t},", file=config)

        if vm_i in vuarts.keys():
            # get legacy vuart information
            vuart0_setting = common.get_vuart_info_id(
                common.SCENARIO_INFO_FILE, 0)
            vuart1_setting = common.get_vuart_info_id(
                common.SCENARIO_INFO_FILE, 1)

            for vuart_id in vuarts[vm_i].keys():
                if vuarts[vm_i][vuart_id]['base'] == "INVALID_PCI_BASE":
                    continue
                # skip pci vuart 0 for post-launched vm
                if vuart_id == 0 and scenario_cfg_lib.VM_DB[vm_type][
                        'load_type'] == "POST_LAUNCHED_VM":
                    continue
                # Skip pci vuart 0 if the legacy vuart 0 is enabled
                if vuart_id == 0 and vm_i in vuart0_setting and vuart0_setting[
                        vm_i]['base'] != "INVALID_COM_BASE":
                    continue
                # Skip pci vuart 1 if the legacy vuart 1 is enabled
                if vuart_id == 1 and vm_i in vuart1_setting and vuart1_setting[
                        vm_i]['base'] != "INVALID_COM_BASE":
                    continue

                print("\t{", file=config)
                print("\t\t.vuart_idx = {:1d},".format(vuart_id), file=config)
                print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]),
                      file=config)
                print("\t\t.vdev_ops = &vmcs9900_ops,", file=config)

                if vuart_id != 0 and scenario_cfg_lib.VM_DB[vm_type][
                        'load_type'] == "POST_LAUNCHED_VM":
                    print("\t\t.vbar_base[0] = INVALID_PCI_BASE,", file=config)
                    print("\t\t.vbdf.value = UNASSIGNED_VBDF,", file=config)

                if scenario_cfg_lib.VM_DB[vm_type][
                        'load_type'] != "POST_LAUNCHED_VM":
                    print("\t\tVM{:1d}_VUART_{:1d}_VBAR,".format(
                        vm_i, vuart_id),
                          file=config)
                    if scenario_cfg_lib.VM_DB[vm_type][
                            'load_type'] == "PRE_LAUNCHED_VM":
                        free_bdf = find_unused_bdf(vm_used_bdf, "vuart")
                        vm_used_bdf.append(free_bdf)
                    elif scenario_cfg_lib.VM_DB[vm_type][
                            'load_type'] == "SOS_VM":
                        free_bdf = find_unused_bdf(sos_used_bdf, "vuart")
                        sos_used_bdf.append(free_bdf)
                    print(
                        "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{:02x}U, .f = 0x00U}},"
                        .format(free_bdf.dev, free_bdf.func),
                        file=config)

                if vuart_id != 0:
                    print("\t\t.t_vuart.vm_id = {},".format(
                        vuarts[vm_i][vuart_id]['target_vm_id']),
                          file=config)
                    print("\t\t.t_vuart.vuart_id = {},".format(
                        vuarts[vm_i][vuart_id]['target_uart_id']),
                          file=config)
                pci_cnt += 1
                print("\t},", file=config)

        # Insert the end bracket of the pci_dev.c file
        print("};", file=config)