def share_mem_check(shmem_regions, raw_shmem_regions, vm_type_info, prime_item,
                    item, sub_item):

    shmem_names = {}

    MAX_SHMEM_REGION_NUM = 8
    shmem_region_num = 0
    for raw_shmem_region in raw_shmem_regions:
        if raw_shmem_region is not None and raw_shmem_region.strip() != '':
            shmem_region_num += 1
    if shmem_region_num > MAX_SHMEM_REGION_NUM:
        key = "hv,{},{},{},{}".format(prime_item, item, sub_item,
                                      MAX_SHMEM_REGION_NUM)
        ERR_LIST[
            key] = "The number of hv-land shmem regions should not be greater than {}.".format(
                MAX_SHMEM_REGION_NUM)
        return

    for shm_i, shm_list in shmem_regions.items():
        for shm_str in shm_list:
            index = -1
            if shm_i == 'err':
                for i in range(len(raw_shmem_regions)):
                    if raw_shmem_regions[i] == shm_str:
                        index = i
                        break
            if index == -1:
                try:
                    for i in range(len(raw_shmem_regions)):
                        if raw_shmem_regions[i].split(',')[0].strip(
                        ) == shm_str.split(',')[0].strip():
                            index = i
                            break
                except:
                    index = 0
            key = "hv,{},{},{},{}".format(prime_item, item, sub_item, index)

            shm_str_splited = shm_str.split(',')
            if len(shm_str_splited) < 3:
                ERR_LIST[key] = "The name, size, communication VM IDs of the share memory should be separated " \
                                "by comma and not be empty."
                return
            try:
                curr_vm_id = int(shm_i)
            except:
                ERR_LIST[
                    key] = "share memory region should be configured with format like this: hv:/shm_region_0, 0x200000, 0:2"
                return
            name = shm_str_splited[0].strip()
            size = shm_str_splited[1].strip()
            vmid_list = shm_str_splited[2].split(':')
            int_vmid_list = []
            for vmid in vmid_list:
                try:
                    int_vmid = int(vmid)
                    int_vmid_list.append(int_vmid)
                except:
                    ERR_LIST[
                        key] = "The communication VM IDs of the share memory should be decimal and separated by comma."
                    return
            if not int_vmid_list:
                ERR_LIST[
                    key] = "The communication VM IDs of the share memory should be decimal and separated by comma."
                return
            if curr_vm_id in int_vmid_list or len(
                    set(int_vmid_list)) != len(int_vmid_list):
                ERR_LIST[
                    key] = "The communication VM IDs of the share memory should not be duplicated."
                return
            for target_vm_id in int_vmid_list:
                if curr_vm_id not in vm_type_info.keys() or target_vm_id not in vm_type_info.keys() \
                        or vm_type_info[curr_vm_id] in ['SOS_VM'] or vm_type_info[target_vm_id] in ['SOS_VM']:
                    ERR_LIST[
                        key] = "Shared Memory can be only configured for existed Pre-launched VMs and Post-launched VMs."
                    return

            if name == '' or size == '':
                ERR_LIST[
                    key] = "The name, size of the share memory should not be empty."
                return
            name_len = len(name)
            if name_len > 32 or name_len == 0:
                ERR_LIST[
                    key] = "The size of share Memory name should be in range [1,32] bytes."
                return

            int_size = 0
            try:
                if size.isdecimal():
                    int_size = int(size)
                else:
                    int_size = int(size, 16)
            except:
                ERR_LIST[
                    key] = "The size of share Memory region should be decimal or hexadecimal."
                return
            if int_size < 0x200000 or int_size > 0x20000000:
                ERR_LIST[
                    key] = "The size of share Memory region should be in [2MB, 512MB]."
                return
            if not ((int_size & (int_size - 1) == 0) and int_size != 0):
                ERR_LIST[
                    key] = "The size of share Memory region should be a power of 2."
                return

            if name in shmem_names.keys():
                shmem_names[name] += 1
            else:
                shmem_names[name] = 1
            if shmem_names[name] > len(vmid_list) + 1:
                ERR_LIST[
                    key] = "The names of share memory regions should not be duplicated: {}".format(
                        name)
                return

    board_cfg_lib.parse_mem()
    for shm_i, shm_list in shmem_regions.items():
        for shm_str in shm_list:
            shm_str_splited = shm_str.split(',')
            name = shm_str_splited[0].strip()
            index = 0
            try:
                for i in range(len(raw_shmem_regions)):
                    if raw_shmem_regions[i].split(
                            ',')[0].strip() == shm_str.split(',')[0].strip():
                        index = i
                        break
            except:
                index = 0
            key = "hv,{},{},{},{}".format(prime_item, item, sub_item, index)
            if 'IVSHMEM_' + name in board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic.keys(
            ):
                bar_attr_dic = board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic[
                    'IVSHMEM_' + name]
                if (0 in bar_attr_dic.keys() and int(bar_attr_dic[0].addr, 16) < 0x80000000) \
                    or (2 in bar_attr_dic.keys() and int(bar_attr_dic[2].addr, 16) < 0x100000000):
                    ERR_LIST[
                        key] = "Failed to get the start address of the shared memory, please check the size of it."
                    return
def generate_file(vm_info, config):
    """
    Generate pci_dev.c for Pre-Launched VMs in a scenario.
    :param config: it is pointer for for file write to
    :return: None
    """
    board_cfg_lib.parser_pci()
    board_cfg_lib.parse_mem()

    compared_bdf = []

    for cnt_sub_name in board_cfg_lib.SUB_NAME_COUNT.keys():
        i_cnt = 0
        for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items(
        ):
            if cnt_sub_name == bar_attr.name and bdf not in compared_bdf:
                compared_bdf.append(bdf)
            else:
                continue

            add_instance_to_name(i_cnt, bdf, bar_attr)

            i_cnt += 1

    idx = 0
    print("{}".format(scenario_cfg_lib.HEADER_LICENSE), file=config)
    print("", file=config)
    print("#include <vm_config.h>", file=config)
    print("#include <pci_devices.h>", file=config)
    print("#include <vpci.h>", file=config)
    print("#include <vbar_base.h>", file=config)
    print("#include <mmu.h>", file=config)
    print("#include <page.h>", file=config)
    if vm_info.shmem.shmem_enabled == 'y':
        print("#include <ivshmem.h>", file=config)
    for vm_i, pci_bdf_devs_list in vm_info.cfg_pci.pci_devs.items():
        if not pci_bdf_devs_list:
            continue
        pci_cnt = 1
        if idx == 0:
            print("", file=config)
            print("/*", file=config)
            print(
                " * TODO: remove PTDEV macro and add DEV_PRIVINFO macro to initialize pbdf for",
                file=config)
            print(
                " * passthrough device configuration and shm_name for ivshmem device configuration.",
                file=config)
            print(" */", file=config)
            print("#define PTDEV(PCI_DEV)\t\tPCI_DEV, PCI_DEV##_VBAR",
                  file=config)
        print("", file=config)
        print("/*", file=config)
        print(
            " * TODO: add DEV_PCICOMMON macro to initialize emu_type, vbdf and vdev_ops",
            file=config)
        print(" * to simplify the code.", file=config)
        print(" */", file=config)
        print("struct acrn_vm_pci_dev_config " +
              "vm{}_pci_devs[VM{}_CONFIG_PCI_DEV_NUM] = {{".format(vm_i, vm_i),
              file=config)
        print("\t{", file=config)
        print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]), file=config)
        print("\t\t.vbdf.bits = {.b = 0x00U, .d = 0x00U, .f = 0x00U},",
              file=config)
        print("\t\t.vdev_ops = &vhostbridge_ops,", file=config)
        print("\t},", file=config)

        idx += 1
        for pci_bdf_dev in pci_bdf_devs_list:
            if not pci_bdf_dev:
                continue
            bus = int(pci_bdf_dev.split(':')[0], 16)
            dev = int(pci_bdf_dev.split(':')[1].split('.')[0], 16)
            fun = int(pci_bdf_dev.split('.')[1], 16)
            print("\t{", file=config)
            print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[1]), file=config)
            print(
                "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02d}U, .f = 0x00U}},"
                .format(pci_cnt),
                file=config)
            for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items(
            ):
                if bdf == pci_bdf_dev:
                    print(
                        "\t\tPTDEV({}),".format(board_cfg_lib.PCI_DEV_BAR_DESC.
                                                pci_dev_dic[bdf].name_w_i_cnt),
                        file=config)
                else:
                    continue
            print("\t},", file=config)
            pci_cnt += 1

        if vm_info.shmem.shmem_enabled == 'y' and vm_i in vm_info.shmem.shmem_regions.keys() \
                and len(vm_info.shmem.shmem_regions[vm_i]) > 0:
            raw_shm_list = vm_info.shmem.shmem_regions[vm_i]
            for shm in raw_shm_list:
                shm_splited = shm.split(',')
                print("\t{", file=config)
                print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]),
                      file=config)
                print(
                    "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02d}U, .f = 0x00U}},"
                    .format(pci_cnt),
                    file=config)
                print("\t\t.vdev_ops = &vpci_ivshmem_ops,", file=config)
                for shm_name, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic.items(
                ):
                    index = shm_name[:shm_name.find('_')]
                    shm_name = shm_name[shm_name.find('_') + 1:]
                    if shm_name == shm_splited[0].strip():
                        print("\t\t.shm_region_name = IVSHMEM_SHM_REGION_{},".
                              format(index),
                              file=config)
                        print("\t\tIVSHMEM_DEVICE_{}_VBAR".format(index),
                              file=config)
                # print("\t\t.vbar_size[0] = 0x100,", file=config)
                # print("\t\t.vbar_size[2] = {},".format(shm_splited[1].strip()), file=config)
                # print('\t\t.shm_name = "{}",'.format(shm_splited[0].strip()), file=config)
                print("\t},", file=config)
                pci_cnt += 1

        print("};", file=config)

    if vm_info.shmem.shmem_enabled == 'y':
        for shm_i, raw_shm_list in vm_info.shmem.shmem_regions.items():
            shm_cnt = 0
            if shm_i not in vm_info.cfg_pci.pci_devs.keys(
            ) and len(raw_shm_list) > 0:
                print("", file=config)
                print("struct acrn_vm_pci_dev_config " +
                      "vm{}_pci_devs[VM{}_CONFIG_PCI_DEV_NUM] = {{".format(
                          shm_i, shm_i),
                      file=config)
                for shm in raw_shm_list:
                    shm_splited = shm.split(',')
                    print("\t{", file=config)
                    print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]),
                          file=config)
                    if shm_i in common.VM_TYPES.keys(
                    ) and common.VM_TYPES[shm_i] in [
                            'PRE_RT_VM', 'PRE_STD_VM', 'SAFETY_VM'
                    ]:
                        print(
                            "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02d}U, .f = 0x00U}},"
                            .format(shm_cnt),
                            file=config)
                    else:
                        print("\t\t.vbdf.value = UNASSIGNED_VBDF,".format(
                            shm_cnt),
                              file=config)
                    print("\t\t.vdev_ops = &vpci_ivshmem_ops,", file=config)
                    for shm_name, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic.items(
                    ):
                        index = shm_name[:shm_name.find('_')]
                        shm_name = shm_name[shm_name.find('_') + 1:]
                        if shm_name == shm_splited[0].strip():
                            if shm_i in common.VM_TYPES.keys(
                            ) and common.VM_TYPES[shm_i] in [
                                    'PRE_RT_VM', 'PRE_STD_VM', 'SAFETY_VM'
                            ]:
                                print(
                                    "\t\t.shm_region_name = IVSHMEM_SHM_REGION_{},"
                                    .format(index),
                                    file=config)
                                print(
                                    "\t\tIVSHMEM_DEVICE_{}_VBAR".format(index),
                                    file=config)
                                break
                            else:
                                print(
                                    "\t\t.shm_region_name = IVSHMEM_SHM_REGION_{}"
                                    .format(index),
                                    file=config)
                                break
                    shm_cnt += 1
                    print("\t},", file=config)
                print("};", file=config)
Beispiel #3
0
def generate_file(config):
    # start to generate board_info.h
    print("{0}".format(board_cfg_lib.HEADER_LICENSE), file=config)
    print(VBAR_INFO_DEFINE, file=config)
    common.get_vm_types()
    pre_vm = False
    for vm_type in common.VM_TYPES.values():
        if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "PRE_LAUNCHED_VM":
            pre_vm = True

    if not pre_vm:
        print(VBAR_INFO_ENDIF, file=config)
        return

    compared_bdf = []
    for cnt_sub_name in board_cfg_lib.SUB_NAME_COUNT.keys():
        i_cnt = 0
        for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items(
        ):
            if cnt_sub_name == bar_attr.name and bdf not in compared_bdf:
                compared_bdf.append(bdf)
            else:
                continue

            write_vbar(i_cnt, bdf, board_cfg_lib.PCI_DEV_BAR_DESC.pci_bar_dic,
                       bar_attr, config)

            i_cnt += 1

    ivshmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE,
                                             "FEATURES", "IVSHMEM",
                                             "IVSHMEM_ENABLED")
    if ivshmem_enabled == 'y':
        board_cfg_lib.parse_mem()
        for shm_name, bar_attr_dic in board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic.items(
        ):
            index = shm_name[:shm_name.find('_')]
            i_cnt = 0
            for bar_i, bar_attr in bar_attr_dic.items():
                i_cnt += 1
                if bar_i == 0:
                    if len(bar_attr_dic.keys()) == 1:
                        print("#define IVSHMEM_DEVICE_%-23s" %
                              (str(index) + "_VBAR"),
                              "       .vbar_base[{}] = {}UL".format(
                                  bar_i, bar_attr.addr),
                              file=config)
                    else:
                        print("#define IVSHMEM_DEVICE_%-23s" %
                              (str(index) + "_VBAR"),
                              "       .vbar_base[{}] = {}UL, \\".format(
                                  bar_i, bar_attr.addr),
                              file=config)
                elif i_cnt == len(bar_attr_dic.keys()):
                    print("{}.vbar_base[{}] = {}UL".format(
                        ' ' * 54, bar_i, bar_attr.addr),
                          file=config)
                else:
                    print("{}.vbar_base[{}] = {}UL, \\".format(
                        ' ' * 54, bar_i, bar_attr.addr),
                          file=config)

            print("", file=config)

    print(VBAR_INFO_ENDIF, file=config)
Beispiel #4
0
def generate_file(vm_info, config):
    """
    Generate pci_dev.c for Pre-Launched VMs in a scenario.
    :param config: it is pointer for for file write to
    :return: None
    """
    board_cfg_lib.parser_pci()
    board_cfg_lib.parse_mem()

    compared_bdf = []
    sos_used_bdf = []

    for cnt_sub_name in board_cfg_lib.SUB_NAME_COUNT.keys():
        i_cnt = 0
        for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items(
        ):
            if cnt_sub_name == bar_attr.name and bdf not in compared_bdf:
                compared_bdf.append(bdf)
            else:
                continue

            add_instance_to_name(i_cnt, bdf, bar_attr)

            i_cnt += 1

    for bdf in compared_bdf:
        bdf_tuple = BusDevFunc.from_str(bdf)
        sos_used_bdf.append(bdf_tuple)
    bdf_tuple = BusDevFunc(bus=0, dev=1, func=0)
    sos_used_bdf.append(bdf_tuple)

    vuarts = common.get_vuart_info(common.SCENARIO_INFO_FILE)
    vuarts_num = scenario_cfg_lib.get_vuart_num(vuarts)
    pci_vuart_enabled = False
    for vm_i in common.VM_TYPES:
        if vuarts_num[vm_i] > 0:
            pci_vuart_enabled = True
            break

    print("{}".format(scenario_cfg_lib.HEADER_LICENSE), file=config)
    print("", file=config)
    print("#include <vm_config.h>", file=config)
    print("#include <pci_devices.h>", file=config)
    print("#include <vpci.h>", file=config)
    print("#include <vbar_base.h>", file=config)
    print("#include <mmu.h>", file=config)
    print("#include <page.h>", file=config)
    if pci_vuart_enabled:
        print("#include <vmcs9900.h>", file=config)
    # Insert header for share memory
    if vm_info.shmem.shmem_enabled == 'y':
        print("#include <ivshmem_cfg.h>", file=config)

    # Insert comments and macros for passthrough devices
    if any((p for _, p in vm_info.cfg_pci.pci_devs.items())):
        print("", file=config)
        print("/*", file=config)
        print(
            " * TODO: remove PTDEV macro and add DEV_PRIVINFO macro to initialize pbdf for",
            file=config)
        print(
            " * passthrough device configuration and shm_name for ivshmem device configuration.",
            file=config)
        print(" */", file=config)
        print("#define PTDEV(PCI_DEV)\t\tPCI_DEV, PCI_DEV##_VBAR", file=config)
        print("", file=config)
        print("/*", file=config)
        print(
            " * TODO: add DEV_PCICOMMON macro to initialize emu_type, vbdf and vdev_ops",
            file=config)
        print(" * to simplify the code.", file=config)
        print(" */", file=config)
    if pci_vuart_enabled:
        print("#define INVALID_PCI_BASE\t0U", file=config)

    for vm_i, vm_type in common.VM_TYPES.items():
        vm_used_bdf = []
        # Skip this vm if there is no any pci device and virtual device
        if not scenario_cfg_lib.get_pci_dev_num_per_vm()[vm_i] and \
             scenario_cfg_lib.VM_DB[vm_type]['load_type'] != "SOS_VM":
            continue
        if not scenario_cfg_lib.get_pci_dev_num_per_vm()[vm_i] and \
             scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM":
            print("", file=config)
            print("struct acrn_vm_pci_dev_config " +
                  "sos_pci_devs[CONFIG_MAX_PCI_DEV_NUM];",
                  file=config)
            continue

        pci_cnt = 1
        # Insert device structure and bracket
        print("", file=config)
        if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM":
            print("struct acrn_vm_pci_dev_config " +
                  "sos_pci_devs[CONFIG_MAX_PCI_DEV_NUM] = {",
                  file=config)
        else:
            print("struct acrn_vm_pci_dev_config " +
                  "vm{}_pci_devs[VM{}_CONFIG_PCI_DEV_NUM] = {{".format(
                      vm_i, vm_i),
                  file=config)

        # Insert passtrough devices data
        if vm_i in vm_info.cfg_pci.pci_devs.keys():
            pci_bdf_devs_list = vm_info.cfg_pci.pci_devs[vm_i]
            if pci_bdf_devs_list:
                # Insert pci hostbridge for passtrough devices:
                if pci_cnt == 1:
                    print("\t{", file=config)
                    print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]),
                          file=config)
                    print(
                        "\t\t.vbdf.bits = {.b = 0x00U, .d = 0x00U, .f = 0x00U},",
                        file=config)
                    print("\t\t.vdev_ops = &vhostbridge_ops,", file=config)
                    print("\t},", file=config)
                    bdf_tuple = BusDevFunc.from_str("00:00.0")
                    vm_used_bdf.append(bdf_tuple)

                for pci_bdf_dev in pci_bdf_devs_list:
                    if not pci_bdf_dev:
                        continue
                    bus = int(pci_bdf_dev.split(':')[0], 16)
                    dev = int(pci_bdf_dev.split(':')[1].split('.')[0], 16)
                    fun = int(pci_bdf_dev.split('.')[1], 16)
                    print("\t{", file=config)
                    print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[1]),
                          file=config)
                    print(
                        "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02x}U, .f = 0x00U}},"
                        .format(pci_cnt),
                        file=config)
                    for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items(
                    ):
                        if bdf == pci_bdf_dev:
                            print("\t\tPTDEV({}),".format(
                                board_cfg_lib.PCI_DEV_BAR_DESC.
                                pci_dev_dic[bdf].name_w_i_cnt),
                                  file=config)
                        else:
                            continue
                    print("\t},", file=config)
                    bdf_tuple = BusDevFunc(0, pci_cnt, 0)
                    vm_used_bdf.append(bdf_tuple)
                    pci_cnt += 1

        # Insert ivshmem information
        if vm_info.shmem.shmem_enabled == 'y' and vm_i in vm_info.shmem.shmem_regions.keys() \
             and len(vm_info.shmem.shmem_regions[vm_i]) > 0:
            raw_shm_list = vm_info.shmem.shmem_regions[vm_i]
            for shm in raw_shm_list:
                shm_splited = shm.split(',')
                print("\t{", file=config)
                print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]),
                      file=config)
                if vm_i in vm_info.cfg_pci.pci_devs.keys():
                    if scenario_cfg_lib.VM_DB[vm_type][
                            'load_type'] == "SOS_VM":
                        free_bdf = find_unused_bdf(sos_used_bdf, "ivshmem")
                        print("\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{:02x}U, .f = 0x{:02x}U}}," \
                             .format(free_bdf.dev,free_bdf.func), file=config)
                        print("\t\t.vdev_ops = &vpci_ivshmem_ops,",
                              file=config)
                        sos_used_bdf.append(free_bdf)
                    else:
                        print(
                            "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02x}U, .f = 0x00U}},"
                            .format(pci_cnt),
                            file=config)
                        print("\t\t.vdev_ops = &vpci_ivshmem_ops,",
                              file=config)
                        bdf_tuple = BusDevFunc(0, pci_cnt, 0)
                        vm_used_bdf.append(bdf_tuple)
                elif vm_i not in vm_info.cfg_pci.pci_devs.keys():
                    if scenario_cfg_lib.VM_DB[vm_type][
                            'load_type'] == "PRE_LAUNCHED_VM":
                        print(
                            "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{0:02x}U, .f = 0x00U}},"
                            .format(pci_cnt),
                            file=config)
                        bdf_tuple = BusDevFunc(0, pci_cnt, 0)
                        vm_used_bdf.append(bdf_tuple)
                    elif scenario_cfg_lib.VM_DB[vm_type][
                            'load_type'] == "POST_LAUNCHED_VM":
                        print("\t\t.vbdf.value = UNASSIGNED_VBDF,",
                              file=config)
                    print("\t\t.vdev_ops = &vpci_ivshmem_ops,", file=config)
                for shm_name, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic.items(
                ):
                    index = shm_name[:shm_name.find('_')]
                    shm_name = shm_name[shm_name.find('_') + 1:]
                    if shm_name == shm_splited[0].strip():
                        if scenario_cfg_lib.VM_DB[vm_type][
                                'load_type'] == "PRE_LAUNCHED_VM":
                            print(
                                "\t\t.shm_region_name = IVSHMEM_SHM_REGION_{},"
                                .format(index),
                                file=config)
                            print("\t\tIVSHMEM_DEVICE_{}_VBAR".format(index),
                                  file=config)
                            break
                        elif scenario_cfg_lib.VM_DB[vm_type][
                                'load_type'] == "SOS_VM":
                            print(
                                "\t\t.shm_region_name = IVSHMEM_SHM_REGION_{},"
                                .format(index),
                                file=config)
                            print(
                                "\t\tSOS_IVSHMEM_DEVICE_{}_VBAR".format(index),
                                file=config)
                            break
                        else:
                            print(
                                "\t\t.shm_region_name = IVSHMEM_SHM_REGION_{}".
                                format(index),
                                file=config)
                            break
                pci_cnt += 1
                print("\t},", file=config)

        if vm_i in vuarts.keys():
            # get legacy vuart information
            vuart0_setting = common.get_vuart_info_id(
                common.SCENARIO_INFO_FILE, 0)
            vuart1_setting = common.get_vuart_info_id(
                common.SCENARIO_INFO_FILE, 1)

            for vuart_id in vuarts[vm_i].keys():
                if vuarts[vm_i][vuart_id]['base'] == "INVALID_PCI_BASE":
                    continue
                # skip pci vuart 0 for post-launched vm
                if vuart_id == 0 and scenario_cfg_lib.VM_DB[vm_type][
                        'load_type'] == "POST_LAUNCHED_VM":
                    continue
                # Skip pci vuart 0 if the legacy vuart 0 is enabled
                if vuart_id == 0 and vm_i in vuart0_setting and vuart0_setting[
                        vm_i]['base'] != "INVALID_COM_BASE":
                    continue
                # Skip pci vuart 1 if the legacy vuart 1 is enabled
                if vuart_id == 1 and vm_i in vuart1_setting and vuart1_setting[
                        vm_i]['base'] != "INVALID_COM_BASE":
                    continue

                print("\t{", file=config)
                print("\t\t.vuart_idx = {:1d},".format(vuart_id), file=config)
                print("\t\t.emu_type = {},".format(PCI_DEV_TYPE[0]),
                      file=config)
                print("\t\t.vdev_ops = &vmcs9900_ops,", file=config)

                if vuart_id != 0 and scenario_cfg_lib.VM_DB[vm_type][
                        'load_type'] == "POST_LAUNCHED_VM":
                    print("\t\t.vbar_base[0] = INVALID_PCI_BASE,", file=config)
                    print("\t\t.vbdf.value = UNASSIGNED_VBDF,", file=config)

                if scenario_cfg_lib.VM_DB[vm_type][
                        'load_type'] != "POST_LAUNCHED_VM":
                    print("\t\tVM{:1d}_VUART_{:1d}_VBAR,".format(
                        vm_i, vuart_id),
                          file=config)
                    if scenario_cfg_lib.VM_DB[vm_type][
                            'load_type'] == "PRE_LAUNCHED_VM":
                        free_bdf = find_unused_bdf(vm_used_bdf, "vuart")
                        vm_used_bdf.append(free_bdf)
                    elif scenario_cfg_lib.VM_DB[vm_type][
                            'load_type'] == "SOS_VM":
                        free_bdf = find_unused_bdf(sos_used_bdf, "vuart")
                        sos_used_bdf.append(free_bdf)
                    print(
                        "\t\t.vbdf.bits = {{.b = 0x00U, .d = 0x{:02x}U, .f = 0x00U}},"
                        .format(free_bdf.dev, free_bdf.func),
                        file=config)

                if vuart_id != 0:
                    print("\t\t.t_vuart.vm_id = {},".format(
                        vuarts[vm_i][vuart_id]['target_vm_id']),
                          file=config)
                    print("\t\t.t_vuart.vuart_id = {},".format(
                        vuarts[vm_i][vuart_id]['target_uart_id']),
                          file=config)
                pci_cnt += 1
                print("\t},", file=config)

        # Insert the end bracket of the pci_dev.c file
        print("};", file=config)
Beispiel #5
0
def generate_file(config):
    matching_mmios, non_matching_mmios = get_mmio_windows_with_key(
        ['PCI Bus 0000:00'])
    matching_mmios = removed_nested(matching_mmios, non_matching_mmios)
    non_matching_mmios = [
        w for w in non_matching_mmios
        if any((w.overlaps(w2) for w2 in matching_mmios))
    ]
    non_matching_mmios = merged_windows(non_matching_mmios)

    # list of all vmsix supported device list in bdf format
    bdf_list = board_cfg_lib.get_known_caps_pci_devs().get('VMSIX', [])
    # list of all PRE_LAUNCHED_VMs' vmsix supported passthrough devices in bdf format
    pci_items = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "pci_devs",
                                        "pci_dev")
    pci_devs = scenario_cfg_lib.get_pci_devs(pci_items)
    pci_devs_per_vm = get_devs_per_vm_with_key(pci_devs, bdf_list)
    # list SOS vmsix supported devices without other PRE_LAUNCHED_VMs' in bdf format
    sos_bdf_list = [
        d for d in bdf_list
        if all((d not in pci_devs_per_vm[i] for i in pci_devs_per_vm))
    ]

    for vm_i in pci_devs_per_vm:
        vm_type = common.VM_TYPES[vm_i]
        if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM":
            pci_devs_per_vm[vm_i] = sos_bdf_list

    mmiolist_per_vm = {}
    for vm_i, vm_type in common.VM_TYPES.items():
        if vm_i not in mmiolist_per_vm.keys():
            mmiolist_per_vm[vm_i] = []
        if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM":
            mmiolist_per_vm[vm_i] = non_matching_mmios
        else:
            if vm_i in pci_devs.keys():
                match, _ = get_mmio_windows_with_key(pci_devs[vm_i])
                mmiolist_per_vm[vm_i] = match
            if scenario_cfg_lib.VM_DB[vm_type][
                    'load_type'] == "PRE_LAUNCHED_VM":
                if vm_i not in mmiolist_per_vm.keys():
                    mmiolist_per_vm[vm_i] = []
                # TSN reserved region
                mmiolist_per_vm[vm_i].append(
                    MmioWindow(start=0xffff0000, end=0xffffffff))
                # For the pre-launched vm, if the TPM is passtrough, this address is used
                if vm_i == 0 and board_cfg_lib.is_tpm_passthru():
                    mmiolist_per_vm[vm_i].append(
                        MmioWindow(start=0xfed40000,
                                   end=0xfed40000 + 0x5000 - 1))
                # For the pre-launched vm o ehl-crb-b, if the p2sb is passtrough, this address is used
                if board_cfg_lib.is_matched_board(('ehl-crb-b')):
                    p2sb_start = board_cfg_lib.find_p2sb_bar_addr()
                    mmiolist_per_vm[vm_i].append(
                        MmioWindow(start=p2sb_start,
                                   end=p2sb_start + 0x1000000 - 1))
                mmiolist_per_vm[vm_i].sort()

    # start to generate board_info.h
    print("{0}".format(board_cfg_lib.HEADER_LICENSE), file=config)
    print(VBAR_INFO_DEFINE, file=config)
    common.get_vm_types()
    pre_vm = False
    sos_vm = False
    for vm_type in common.VM_TYPES.values():
        if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "PRE_LAUNCHED_VM":
            pre_vm = True
        if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM":
            sos_vm = True

    if not pre_vm and not sos_vm:
        print(VBAR_INFO_ENDIF, file=config)
        return

    ivshmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE,
                                             "FEATURES", "IVSHMEM",
                                             "IVSHMEM_ENABLED")
    if ivshmem_enabled == 'y':
        for vm_id, vm_type in common.VM_TYPES.items():
            free_bar = []
            if scenario_cfg_lib.VM_DB[vm_type][
                    'load_type'] == "PRE_LAUNCHED_VM":
                board_cfg_lib.parse_mem()
                for shm_name, bar_attr_dic in board_cfg_lib.PCI_DEV_BAR_DESC.shm_bar_dic.items(
                ):
                    index = shm_name[:shm_name.find('_')]
                    i_cnt = 0
                    for bar_i, bar_attr in bar_attr_dic.items():
                        i_cnt += 1
                        if bar_i == 2:
                            raw_shmem_regions = common.get_hv_item_tag(
                                common.SCENARIO_INFO_FILE, "FEATURES",
                                "IVSHMEM", "IVSHMEM_REGION")
                            for shm in raw_shmem_regions:
                                if shm is None or shm.strip() == '':
                                    continue
                                shm_splited = shm.split(',')
                                name = shm_splited[0].strip()
                                size = shm_splited[1].strip()

                                try:
                                    int_size = int(size) * 0x100000
                                except:
                                    int_size = 0
                            bar_2 = int(bar_attr.addr, 16)
                            mmiolist_per_vm[vm_id].append(
                                MmioWindow(start=bar_2,
                                           end=bar_2 + int_size - 1))
                            mmiolist_per_vm[vm_id].sort()
                        if bar_i == 0:
                            bar_0 = MmioWindow(start=int(bar_attr.addr, 16),
                                               end=int(bar_attr.addr, 16) +
                                               0x100 - 1)
                            mmiolist_per_vm[vm_id].append(bar_0)
                            mmiolist_per_vm[vm_id].sort()
                            if len(bar_attr_dic.keys()) == 1:
                                print("#define IVSHMEM_DEVICE_%-23s" %
                                      (str(index) + "_VBAR"),
                                      "       .vbar_base[{}] = {}UL".format(
                                          bar_i, bar_attr.addr),
                                      file=config)
                            else:
                                print(
                                    "#define IVSHMEM_DEVICE_%-23s" %
                                    (str(index) + "_VBAR"),
                                    "       .vbar_base[{}] = {}UL, \\".format(
                                        bar_i, bar_attr.addr),
                                    file=config)
                                # vbar[1] for share memory is fix to 4K
                                free_bar = get_free_mmio([MmioWindow(start=common.SIZE_2G, end=common.SIZE_4G-1)], \
                                                mmiolist_per_vm[vm_id], BAR1_SHEMEM_ALIGNMENT + BAR1_SHEMEM_SIZE)
                                free_bar_start_addr = common.round_up(
                                    free_bar.start, BAR1_SHEMEM_ALIGNMENT)
                                free_bar_end_addr = free_bar_start_addr + BAR1_SHEMEM_SIZE - 1
                                free_bar = MmioWindow(free_bar_start_addr,
                                                      free_bar_end_addr)
                                mmiolist_per_vm[vm_id].append(free_bar)
                                mmiolist_per_vm[vm_id].sort()
                                print("{}.vbar_base[1] = {:#x}UL, \\".format(
                                    ' ' * 54, free_bar.start),
                                      file=config)
                        elif i_cnt == len(bar_attr_dic.keys()):
                            print("{}.vbar_base[{}] = {}UL".format(
                                ' ' * 54, bar_i, bar_attr.addr),
                                  file=config)
                        else:
                            print("{}.vbar_base[{}] = {}UL, \\".format(
                                ' ' * 54, bar_i, bar_attr.addr),
                                  file=config)
                    print("", file=config)
            elif scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SOS_VM":
                ivshmem_region = common.get_hv_item_tag(
                    common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM",
                    "IVSHMEM_REGION")
                shmem_regions = scenario_cfg_lib.get_shmem_regions(
                    ivshmem_region)
                if vm_id not in shmem_regions.keys():
                    continue
                idx = 0
                for shm in ivshmem_region:
                    if shm is None or shm.strip() == '':
                        continue
                    shm_splited = shm.split(',')
                    name = shm_splited[0].strip()
                    size = shm_splited[1].strip()
                    try:
                        int_size = int(size) * 0x100000
                    except:
                        int_size = 0
                    # vbar[0] for shared memory is 0x100
                    free_bar0 = get_free_mmio(
                        matching_mmios, mmiolist_per_vm[vm_id],
                        BAR0_SHEMEM_ALIGNMENT + BAR0_SHEMEM_SIZE)
                    free_bar0_start_addr = common.round_up(
                        free_bar0.start, BAR0_SHEMEM_ALIGNMENT)
                    free_bar0_end_addr = free_bar0_start_addr + BAR0_SHEMEM_SIZE - 1
                    free_bar0 = MmioWindow(free_bar0_start_addr,
                                           free_bar0_end_addr)
                    mmiolist_per_vm[vm_id].append(free_bar0)
                    mmiolist_per_vm[vm_id].sort()
                    # vbar[1] for shared memory is 4K
                    free_bar1 = get_free_mmio(
                        matching_mmios, mmiolist_per_vm[vm_id],
                        BAR1_SHEMEM_ALIGNMENT + BAR1_SHEMEM_SIZE)
                    free_bar1_start_addr = common.round_up(
                        free_bar1.start, BAR1_SHEMEM_ALIGNMENT)
                    free_bar1_end_addr = free_bar1_start_addr + BAR1_SHEMEM_SIZE - 1
                    free_bar1 = MmioWindow(free_bar1_start_addr,
                                           free_bar1_end_addr)
                    mmiolist_per_vm[vm_id].append(free_bar1)
                    mmiolist_per_vm[vm_id].sort()
                    # vbar[2] for shared memory is specified size in MB
                    free_bar2 = get_free_mmio(matching_mmios,
                                              mmiolist_per_vm[vm_id],
                                              BAR2_SHEMEM_ALIGNMENT + int_size)
                    free_bar2_start_addr = common.round_up(
                        free_bar2.start, BAR2_SHEMEM_ALIGNMENT) + 0xC
                    free_bar2_end_addr = free_bar2_start_addr + int_size - 1
                    free_bar2 = MmioWindow(free_bar2_start_addr,
                                           free_bar2_end_addr)
                    mmiolist_per_vm[vm_id].append(free_bar2)
                    mmiolist_per_vm[vm_id].sort()
                    print("#define SOS_IVSHMEM_DEVICE_%-19s" %
                          (str(idx) + "_VBAR"),
                          "       .vbar_base[0] = {:#x}UL, \\".format(
                              free_bar0.start),
                          file=config)
                    print("{}.vbar_base[1] = {:#x}UL, \\".format(
                        ' ' * 54, free_bar1.start),
                          file=config)
                    print("{}.vbar_base[2] = {:#x}UL".format(
                        ' ' * 54, free_bar2.start),
                          file=config)
                    print("", file=config)
                    idx += 1

    # Get passthrough devices vbar bases
    compared_bdf = []
    for cnt_sub_name in board_cfg_lib.SUB_NAME_COUNT.keys():
        i_cnt = 0
        for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items(
        ):
            if cnt_sub_name == bar_attr.name and bdf not in compared_bdf:
                compared_bdf.append(bdf)
            else:
                continue

            write_vbar(i_cnt, bdf, board_cfg_lib.PCI_DEV_BAR_DESC.pci_bar_dic, bar_attr, \
                pci_devs_per_vm, mmiolist_per_vm, matching_mmios, config)

            i_cnt += 1

    write_vuart_vbar(mmiolist_per_vm, matching_mmios, config)
    print(VBAR_INFO_ENDIF, file=config)