コード例 #1
0
ファイル: gpa.py プロジェクト: Leo2Chang/acrn-hypervisor
def create_device_node(allocation_etree, vm_id, devdict):
    for dev in devdict:
        dev_name = dev[0]
        bar_region = dev[1].split('bar')[-1]
        bar_base = devdict.get(dev)

        vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']",
                                  allocation_etree)
        if vm_node is None:
            vm_node = common.append_node("/acrn-config/vm",
                                         None,
                                         allocation_etree,
                                         id=vm_id)
        dev_node = common.get_node(f"./device[@name = '{dev_name}']", vm_node)
        if dev_node is None:
            dev_node = common.append_node("./device",
                                          None,
                                          vm_node,
                                          name=dev_name)
        if common.get_node(f"./bar[@id='{bar_region}']", dev_node) is None:
            common.append_node(f"./bar",
                               hex(bar_base),
                               dev_node,
                               id=bar_region)
        if IVSHMEM in dev_name and bar_region == '2':
            common.update_text(
                f"./bar[@id = '2']",
                hex(bar_base | PREFETCHABLE_BIT | MEMORY_BAR_LOCATABLE_64BITS),
                dev_node, True)
コード例 #2
0
ファイル: gpa.py プロジェクト: Leo2Chang/acrn-hypervisor
def allocate_ssram_region(board_etree, scenario_etree, allocation_etree):
    # Guest physical address of the SW SRAM allocated to a pre-launched VM
    enabled = common.get_node("//SSRAM_ENABLED/text()", scenario_etree)
    if enabled == "y":
        pre_rt_vms = common.get_node("//vm[vm_type ='PRE_RT_VM']",
                                     scenario_etree)
        if pre_rt_vms is not None:
            vm_id = pre_rt_vms.get("id")
            l3_sw_sram = board_etree.xpath(
                "//cache[@level='3']/capability[@id='Software SRAM']")
            if l3_sw_sram:
                start = min(
                    map(lambda x: int(x.find("start").text, 16), l3_sw_sram))
                end = max(
                    map(lambda x: int(x.find("end").text, 16), l3_sw_sram))

                allocation_vm_node = common.get_node(
                    f"/acrn-config/vm[@id = '{vm_id}']", allocation_etree)
                if allocation_vm_node is None:
                    allocation_vm_node = common.append_node("/acrn-config/vm",
                                                            None,
                                                            allocation_etree,
                                                            id=vm_id)
                common.append_node("./ssram/start_gpa", hex(start),
                                   allocation_vm_node)
                common.append_node("./ssram/end_gpa", hex(end),
                                   allocation_vm_node)
コード例 #3
0
ファイル: gpa.py プロジェクト: Leo2Chang/acrn-hypervisor
def allocate_log_area(board_etree, scenario_etree, allocation_etree):
    tpm2_enabled = common.get_node(
        f"//vm[@id = '0']/mmio_resources/TPM2/text()", scenario_etree)
    if tpm2_enabled is None or tpm2_enabled == 'n':
        return

    if common.get_node("//capability[@id='log_area']",
                       board_etree) is not None:
        # VIRT_ACPI_DATA_ADDR
        log_area_min_len = int(
            common.get_node(f"//log_area_minimum_length/text()", board_etree),
            16)
        log_area_end_address = 0x7FFF0000
        log_area_start_address = log_area_end_address - log_area_min_len
        allocation_vm_node = common.get_node(f"/acrn-config/vm[@id = '0']",
                                             allocation_etree)
        if allocation_vm_node is None:
            allocation_vm_node = common.append_node("/acrn-config/vm",
                                                    None,
                                                    allocation_etree,
                                                    id='0')
        common.append_node("./log_area_start_address",
                           hex(log_area_start_address).upper(),
                           allocation_vm_node)
        common.append_node("./log_area_minimum_length",
                           hex(log_area_min_len).upper(), allocation_vm_node)
コード例 #4
0
def allocate_log_area(board_etree, scenario_etree, allocation_etree):
    tpm2_enabled = common.get_node(
        f"//vm[@id = '0']/mmio_resources/TPM2/text()", scenario_etree)
    if tpm2_enabled is None or tpm2_enabled == 'n':
        return

    if common.get_node("//capability[@id='log_area']",
                       board_etree) is not None:
        log_area_min_len_native = int(
            common.get_node(f"//log_area_minimum_length/text()", board_etree),
            16)
        log_area_start_address = common.round_up(VIRT_ACPI_NVS_ADDR,
                                                 0x10000) + RESERVED_NVS_AREA
        allocation_vm_node = common.get_node(f"/acrn-config/vm[@id = '0']",
                                             allocation_etree)
        if allocation_vm_node is None:
            allocation_vm_node = common.append_node("/acrn-config/vm",
                                                    None,
                                                    allocation_etree,
                                                    id='0')
        common.append_node("./log_area_start_address",
                           hex(log_area_start_address).upper(),
                           allocation_vm_node)
        common.append_node("./log_area_minimum_length",
                           hex(log_area_min_len_native).upper(),
                           allocation_vm_node)
コード例 #5
0
ファイル: clos.py プロジェクト: kingfisherht/acrn-hypervisor
def create_clos_node(scenario_etree, vm_id, index_list):
    allocation_vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", scenario_etree)
    if allocation_vm_node is None:
        allocation_vm_node = common.append_node("/acrn-config/vm", None, scenario_etree, id = vm_id)
    if common.get_node("./clos", allocation_vm_node) is None:
        clos_node = common.append_node("./clos", None, allocation_vm_node)
        for index in index_list:
            common.append_node(f"./vcpu_clos", str(index), clos_node)
コード例 #6
0
ファイル: pio.py プロジェクト: NanlinXie/acrn-hypervisor-1
def create_vuart_base_node(etree, vm_id, vuart_id, vuart_base):
    vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", etree)
    if vm_node is None:
        vm_node = common.append_node("/acrn-config/vm", None, etree, id = vm_id)
    vuart_node = common.get_node(f"./legacy_vuart[@id = '{vuart_id}']", vm_node)
    if vuart_node is None:
        vuart_node = common.append_node("./legacy_vuart", None, vm_node, id = vuart_id)
    if common.get_node(f"./base", vuart_node) is None:
        common.append_node(f"./base", vuart_base, vuart_node)
コード例 #7
0
def fn(board_etree, scenario_etree, allocation_etree):
    cpus_for_sos = sos_cpu_affinity(scenario_etree)
    if cpus_for_sos:
        if common.get_node("//vm[vm_type = 'SOS_VM']", scenario_etree) is not None:
            vm_id = common.get_node("//vm[vm_type = 'SOS_VM']/@id", scenario_etree)
            allocation_sos_vm_node = common.get_node(f"/acrn-config/vm[@id='{vm_id}']", allocation_etree)
            if allocation_sos_vm_node is None:
                allocation_sos_vm_node = common.append_node("/acrn-config/vm", None, allocation_etree, id = vm_id)
            if common.get_node("./vm_type", allocation_sos_vm_node) is None:
                common.append_node("./vm_type", "SOS_VM", allocation_sos_vm_node)
        for pcpu_id in cpus_for_sos:
            common.append_node("./cpu_affinity/pcpu_id", str(pcpu_id), allocation_sos_vm_node)
コード例 #8
0
def create_vuart_node(allocation_etree, vm_id, devdict):
    for dev in devdict:
        vuart_id = dev[0][-1]
        bar_base = devdict.get(dev)

        vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", allocation_etree)
        if vm_node is None:
            vm_node = common.append_node("/acrn-config/vm", None, allocation_etree, id = vm_id)
        vuart_node = common.get_node(f"./legacy_vuart[@id = '{vuart_id}']", vm_node)
        if vuart_node is None:
            vuart_node = common.append_node("./legacy_vuart", None, vm_node, id = vuart_id)
        if common.get_node(f"./base", vuart_node) is None:
            common.append_node(f"./base", hex(bar_base), vuart_node)
コード例 #9
0
ファイル: hv_ram.py プロジェクト: wenlingz/acrn-hypervisor
def fn(board_etree, scenario_etree, allocation_etree):
    # this dictonary mapped with 'address start':'mem range'
    ram_range = {}

    post_launched_vm_num = 0
    for id in common.VM_TYPES:
        if common.VM_TYPES[id] in scenario_cfg_lib.VM_DB and \
                        scenario_cfg_lib.VM_DB[common.VM_TYPES[id]]["load_type"] == "POST_LAUNCHED_VM":
            post_launched_vm_num += 1
    hv_ram_size = common.HV_BASE_RAM_SIZE + common.POST_LAUNCHED_VM_RAM_SIZE * post_launched_vm_num

    ivshmem_enabled = common.get_node("//IVSHMEM_ENABLED/text()",
                                      scenario_etree)
    total_shm_size = 0
    if ivshmem_enabled == 'y':
        raw_shmem_regions = scenario_etree.xpath("//IVSHMEM_REGION/text()")
        for raw_shm in raw_shmem_regions:
            if raw_shm.strip() == '':
                continue
            raw_shm_splited = raw_shm.split(',')
            if len(raw_shm_splited) == 3 and raw_shm_splited[0].strip() != '' \
                    and raw_shm_splited[1].strip() != '' and len(raw_shm_splited[2].strip().split(':')) >= 1:
                try:
                    size = raw_shm_splited[1].strip()
                    int_size = int(size) * 0x100000
                    total_shm_size += int_size
                except Exception as e:
                    print(e)
    hv_ram_size += 2 * max(total_shm_size, 0x200000)
    assert (hv_ram_size <= HV_RAM_SIZE_MAX)

    # reseve 16M memory for hv sbuf, ramoops, etc.
    reserved_ram = 0x1000000
    # We recommend to put hv ram start address high than 0x10000000 to
    # reduce memory conflict with GRUB/SOS Kernel.
    hv_start_offset = 0x10000000
    total_size = reserved_ram + hv_ram_size
    for start_addr in list(board_cfg_lib.USED_RAM_RANGE):
        if hv_start_offset <= start_addr < 0x80000000:
            del board_cfg_lib.USED_RAM_RANGE[start_addr]
    ram_range = board_cfg_lib.get_ram_range()
    avl_start_addr = board_cfg_lib.find_avl_memory(ram_range, str(total_size),
                                                   hv_start_offset)
    hv_start_addr = int(avl_start_addr, 16) + int(hex(reserved_ram), 16)
    hv_start_addr = common.round_up(hv_start_addr, MEM_ALIGN)
    board_cfg_lib.USED_RAM_RANGE[hv_start_addr] = total_size

    common.append_node("/acrn-config/hv/MEMORY/HV_RAM_START",
                       hex(hv_start_addr), allocation_etree)
    common.append_node("/acrn-config/hv/MEMORY/HV_RAM_SIZE", hex(hv_ram_size),
                       allocation_etree)
コード例 #10
0
def fn(board_etree, scenario_etree, allocation_etree):
    for vm_node in scenario_etree.xpath("//vm"):
        vm_id = vm_node.get('id')
        allocation_vm_node = common.get_node(
            f"/acrn-config/vm[@id = '{vm_id}']", allocation_etree)
        if allocation_vm_node is None:
            allocation_vm_node = common.append_node("/acrn-config/vm",
                                                    None,
                                                    allocation_etree,
                                                    id=vm_id)
        for policy in policies:
            if vm_node.xpath(policy.condition):
                common.append_node("./guest_flags/guest_flag",
                                   str(policy.guest_flag), allocation_vm_node)
        common.append_node("./guest_flags/guest_flag", 'GUEST_FLAG_STATIC_VM',
                           allocation_vm_node)
コード例 #11
0
ファイル: hv_ram.py プロジェクト: lishua1x/acrn-hypervisor
def fn(board_etree, scenario_etree, allocation_etree):
    # this dictonary mapped with 'address start':'mem range'
    ram_range = {}

    vm_count = common.count_nodes("//*[local-name() = 'vm']", scenario_etree)
    hv_ram_size = VM_NUM_MAP_TOTAL_HV_RAM_SIZE[vm_count]

    ivshmem_enabled = common.get_text("//IVSHMEM_ENABLED", scenario_etree)
    total_shm_size = 0
    if ivshmem_enabled == 'y':
        raw_shmem_regions = scenario_etree.xpath("//IVSHMEM_REGION/text()")
        for raw_shm in raw_shmem_regions:
            if raw_shm.strip() == '':
                continue
            raw_shm_splited = raw_shm.split(',')
            if len(raw_shm_splited) == 3 and raw_shm_splited[0].strip() != '' \
                    and raw_shm_splited[1].strip() != '' and len(raw_shm_splited[2].strip().split(':')) >= 1:
                try:
                    size = raw_shm_splited[1].strip()
                    int_size = int(size) * 0x100000
                    total_shm_size += int_size
                except Exception as e:
                    print(e)
    hv_ram_size += total_shm_size
    assert (hv_ram_size <= HV_RAM_SIZE_MAX)

    # reseve 16M memory for hv sbuf, ramoops, etc.
    reserved_ram = 0x1000000
    # We recommend to put hv ram start address high than 0x10000000 to
    # reduce memory conflict with GRUB/SOS Kernel.
    hv_start_offset = 0x10000000
    total_size = reserved_ram + hv_ram_size
    for start_addr in list(board_cfg_lib.USED_RAM_RANGE):
        if hv_start_offset <= start_addr < 0x80000000:
            del board_cfg_lib.USED_RAM_RANGE[start_addr]
    ram_range = board_cfg_lib.get_ram_range()
    avl_start_addr = board_cfg_lib.find_avl_memory(ram_range, str(total_size),
                                                   hv_start_offset)
    hv_start_addr = int(avl_start_addr, 16) + int(hex(reserved_ram), 16)
    hv_start_addr = common.round_up(hv_start_addr, MEM_ALIGN)
    board_cfg_lib.USED_RAM_RANGE[hv_start_addr] = total_size

    common.append_node("/acrn-config/hv/MEMORY/HV_RAM_START",
                       hex(hv_start_addr), allocation_etree)
    common.append_node("/acrn-config/hv/MEMORY/HV_RAM_SIZE", hex(hv_ram_size),
                       allocation_etree)
コード例 #12
0
ファイル: clos.py プロジェクト: kingfisherht/acrn-hypervisor
def create_mask_list_node(board_etree, scenario_etree, allocation_etree, rdt_policy_list):
    allocation_hv_node = common.get_node(f"//hv", allocation_etree)
    if allocation_hv_node is None:
        allocation_hv_node = common.append_node(f"/acrn-config/hv", None, allocation_etree)

    if common.get_node("./clos_mask[@id = l3]", allocation_hv_node) is None:
        clos_mask = common.append_node("./clos_mask", None, allocation_hv_node, id="l3")
        length = common.get_node(f"//cache[@level='3']/capability/capacity_mask_length/text()", board_etree)
        if length is not None:
            value = hex((1 << int(length)) - 1)
        else:
            value = "0xffff"
        for i in range(0, len(rdt_policy_list)):
            if rdt_policy_list[i].l3policy.get_clos_mask() is not None:
                value = str(rdt_policy_list[i].l3policy.get_clos_mask())
            common.append_node(f"./clos", value, clos_mask)
        for index,cache2 in enumerate(L2Policy.cache2_id_list):
            length = common.get_node(f"//cache[@level='2' and @id = '{cache2}']/capability/capacity_mask_length/text()", board_etree)
            value = hex((1 << int(length)) - 1)
            if common.get_node("./clos_mask[@id = '{cache2}']", allocation_hv_node) is None:
                clos_mask = common.append_node("./clos_mask", None, allocation_hv_node, id=cache2)
            for i in range(0, len(rdt_policy_list)):
                if rdt_policy_list[i].l2policy.get_clos_mask(index) is not None:
                    value = str(rdt_policy_list[i].l2policy.get_clos_mask(index))
                common.append_node(f"./clos", value, clos_mask)
コード例 #13
0
ファイル: clos.py プロジェクト: yfliuuu/acrn-hypervisor
def creat_mask_list_node(board_etree, scenario_etree, allocation_etree,
                         mask_list):
    allocation_hv_node = common.get_node(f"//hv", allocation_etree)
    if allocation_hv_node is None:
        allocation_hv_node = common.append_node(f"/acrn-config/hv", None,
                                                allocation_etree)
    cache2_id_list = scenario_etree.xpath(
        "//CACHE_ALLOCATION[CACHE_LEVEL = 2]/CACHE_ID/text()")
    cache2_id_list.sort()
    if common.get_node("./clos_mask[@id = l3]", allocation_hv_node) is None:
        clos_mask = common.append_node("./clos_mask",
                                       None,
                                       allocation_hv_node,
                                       id="l3")
        for i in range(0, len(mask_list)):
            if mask_list[i]["l3"] == "None":
                value = "0xffff"
            else:
                value = str(mask_list[i]["l3"])
            common.append_node(f"./clos", value, clos_mask)

        for cache2 in cache2_id_list:
            if common.get_node("./clos_mask[@id = '{cache2}']",
                               allocation_hv_node) is None:
                clos_mask = common.append_node("./clos_mask",
                                               None,
                                               allocation_hv_node,
                                               id=cache2)
            for i in range(0, len(mask_list)):
                if mask_list[i][cache2] == "None":
                    value = "0xffff"
                else:
                    value = str(mask_list[i][cache2])
                common.append_node(f"./clos", value, clos_mask)
コード例 #14
0
ファイル: bdf.py プロジェクト: yfliuuu/acrn-hypervisor
def create_igd_sbdf(board_etree, allocation_etree):
    """
    Extract the integrated GPU bdf from board.xml. If the device is not present, set bdf to "0xFFFF" which indicates the device
    doesn't exist.
    """
    bus = "0x0"
    device_node = common.get_node(
        f"//bus[@type='pci' and @address='{bus}']/device[vendor='0x8086' and class='0x030000']",
        board_etree)
    if device_node is None:
        common.append_node("/acrn-config/hv/MISC_CFG/IGD_SBDF", '0xFFFF',
                           allocation_etree)
    else:
        address = device_node.get('address')
        dev = int(address, 16) >> 16
        func = int(address, 16) & 0xffff
        common.append_node("/acrn-config/hv/MISC_CFG/IGD_SBDF",
                           f"{(int(bus, 16) << 8) | (dev << 3) | func:#06x}",
                           allocation_etree)
コード例 #15
0
def create_vuart_irq_node(etree, vm_id, vuart_id, irq):
    allocation_sos_vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", etree)
    if allocation_sos_vm_node is None:
        allocation_sos_vm_node = common.append_node("/acrn-config/vm", None, etree, id = vm_id)
    if common.get_node("./vm_type", allocation_sos_vm_node) is None:
        common.append_node("./vm_type", "SOS_VM", allocation_sos_vm_node)
    if common.get_node(f"./legacy_vuart[@id = '{vuart_id}']", allocation_sos_vm_node) is None:
        common.append_node("./legacy_vuart", None, allocation_sos_vm_node, id = vuart_id)

    common.append_node(f"./legacy_vuart[@id = '{vuart_id}']/irq", irq, allocation_sos_vm_node)
コード例 #16
0
def create_device_node(allocation_etree, vm_id, devdict):
    for dev in devdict:
        dev_name = dev
        bdf = devdict.get(dev)
        vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", allocation_etree)
        if vm_node is None:
            vm_node = common.append_node("/acrn-config/vm", None, allocation_etree, id = vm_id)
        dev_node = common.get_node(f"./device[@name = '{dev_name}']", vm_node)
        if dev_node is None:
            dev_node = common.append_node("./device", None, vm_node, name = dev_name)
        if common.get_node(f"./bus", dev_node) is None:
            common.append_node(f"./bus",  f"{bdf.bus:#04x}".upper(), dev_node)
        if common.get_node(f"./dev", dev_node) is None:
            common.append_node(f"./dev", f"{bdf.dev:#04x}".upper(), dev_node)
        if common.get_node(f"./func", dev_node) is None:
            common.append_node(f"./func", f"{bdf.func:#04x}".upper(), dev_node)
コード例 #17
0
def allocate_hugepages(board_etree, scenario_etree, allocation_etree):
    hugepages_1gb = 0
    hugepages_2mb = 0
    ram_range_info = import_memory_info(board_etree)
    total_hugepages = sum(ram_range_info[i] for i in ram_range_info if i >= 0x100000000)/(1024*1024*1024) \
                      - sum(int(i) for i in scenario_etree.xpath("//vm[load_order = 'PRE_LAUNCHED_VM']/memory/hpa_region/size_hpa/text()"))/1024 \
                      - 4 - 300/1024 * len(scenario_etree.xpath("//virtio_devices/gpu"))

    post_launch_vms = scenario_etree.xpath(
        "//vm[load_order = 'POST_LAUNCHED_VM']")
    if len(post_launch_vms) > 0:
        for post_launch_vm in post_launch_vms:
            size = common.get_node("./memory/size/text()", post_launch_vm)
            if size is not None:
                mb, gb = math.modf(int(size) / 1024)
                hugepages_1gb = int(hugepages_1gb + gb)
                hugepages_2mb = int(hugepages_2mb + math.ceil(mb * 1024 / 2))

    post_vms_memory = sum(
        int(i) for i in scenario_etree.xpath(
            "//vm[load_order = 'POST_LAUNCHED_VM']/memory/size/text()")) / 1024
    correction_mb, correction_gb = math.modf(total_hugepages - post_vms_memory)
    if total_hugepages - post_vms_memory < 0:
        logging.warning(f"The sum {post_vms_memory} of memory configured in post launch VMs should not be larger than " \
        f"the calculated total hugepages {total_hugepages} of service VMs. Please update the configuration in post launch VMs")

    hugepages_1gb = hugepages_1gb + correction_gb
    hugepages_2mb = hugepages_2mb + math.ceil(correction_mb * 1024 / 2)

    allocation_service_vm_node = common.get_node(
        "/acrn-config/vm[load_order = 'SERVICE_VM']", allocation_etree)
    if allocation_service_vm_node is not None:
        common.append_node("./hugepages/gb", int(hugepages_1gb),
                           allocation_service_vm_node)
        common.append_node("./hugepages/mb", int(hugepages_2mb),
                           allocation_service_vm_node)
コード例 #18
0
def allocate_ssram_region(board_etree, scenario_etree, allocation_etree):
    # Guest physical address of the SW SRAM allocated to a pre-launched VM
    ssram_area_max_size = 0
    enabled = common.get_node("//SSRAM_ENABLED/text()", scenario_etree)
    if enabled == "y":
        pre_rt_vms = common.get_node(
            "//vm[load_order = 'PRE_LAUNCHED_VM' and vm_type = 'RTVM']",
            scenario_etree)
        if pre_rt_vms is not None:
            vm_id = pre_rt_vms.get("id")
            l3_sw_sram = board_etree.xpath(
                "//cache[@level='3']/capability[@id='Software SRAM']")
            if l3_sw_sram:
                # Calculate SSRAM area size. Containing all cache parts
                top = 0
                base = 0
                for ssram in board_etree.xpath(
                        "//cache/capability[@id='Software SRAM']"):
                    entry_base = int(common.get_node("./start/text()", ssram),
                                     16)
                    entry_size = int(common.get_node("./size/text()", ssram))
                    top = (
                        entry_base +
                        entry_size) if top < (entry_base + entry_size) else top
                    base = entry_base if base == 0 or entry_base < base else base
                ssram_area_max_size = math.ceil((top - base) / 0x1000) * 0x1000

            allocation_vm_node = common.get_node(
                f"/acrn-config/vm[@id = '{vm_id}']", allocation_etree)
            if allocation_vm_node is None:
                allocation_vm_node = common.append_node("/acrn-config/vm",
                                                        None,
                                                        allocation_etree,
                                                        id=vm_id)
            common.append_node(
                "./ssram/start_gpa",
                hex(PRE_RTVM_SW_SRAM_END_GPA - ssram_area_max_size + 1),
                allocation_vm_node)
            common.append_node("./ssram/end_gpa",
                               hex(PRE_RTVM_SW_SRAM_END_GPA),
                               allocation_vm_node)
            common.append_node("./ssram/max_size", str(ssram_area_max_size),
                               allocation_vm_node)
コード例 #19
0
def write_hpa_info(allocation_etree, mem_info_list, vm_node_index_list):
    for i in range(len(vm_node_index_list)):
        vm_id = vm_node_index_list[i]
        hpa_info = mem_info_list[i]
        vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']",
                                  allocation_etree)
        if vm_node is None:
            vm_node = common.append_node("/acrn-config/vm",
                                         None,
                                         allocation_etree,
                                         id=vm_id)
        memory_node = common.get_node("./memory", vm_node)
        if memory_node is None:
            memory_node = common.append_node(f"./memory", None, vm_node)
        region_index = 1
        start_key = sorted(hpa_info)
        for start_hpa in start_key:
            hpa_region_node = common.get_node(
                f"./hpa_region[@id='{region_index}']", memory_node)
            if hpa_region_node is None:
                hpa_region_node = common.append_node(
                    "./hpa_region",
                    None,
                    memory_node,
                    id=str(region_index).encode('UTF-8'))

                start_hpa_node = common.get_node("./start_hpa",
                                                 hpa_region_node)
                if start_hpa_node is None:
                    common.append_node("./start_hpa", hex(start_hpa),
                                       hpa_region_node)

                size_hpa_node = common.get_node("./size_hpa", hpa_region_node)
                if size_hpa_node is None:
                    common.append_node("./size_hpa",
                                       hex(hpa_info[start_hpa] * 0x100000),
                                       hpa_region_node)
            region_index = region_index + 1
コード例 #20
0
def alloc_device_irqs(board_etree, scenario_etree, allocation_etree):
    service_vm_id = -1
    irq_allocation = defaultdict(lambda: defaultdict(lambda: []))  # vm_id -> irq -> [device]

    # Collect the list of devices that have to use INTx, excluding legacy UART which is to be emulated.
    device_nodes = set(board_etree.xpath("//device[count(resource[@type='irq' or @type='interrupt_pin']) > 0 and count(capability[@id='MSI' or @id='MSI-X']) = 0]"))
    uart_nodes = set(board_etree.xpath("//device[@id='PNP0501']"))
    device_nodes -= uart_nodes

    #
    # Identify the interrupt lines each pre-launched VM uses
    #
    for vm in scenario_etree.xpath("//vm"):
        vm_type = vm.find("vm_type").text
        vm_id = int(vm.get("id"))
        if lib.lib.is_pre_launched_vm(vm_type):
            pt_intx_text = common.get_node("pt_intx/text()", vm)
            if pt_intx_text is not None:
                pt_intx_mapping = dict(eval(f"[{pt_intx_text.replace(')(', '), (')}]"))
                for irq in pt_intx_mapping.keys():
                    irq_allocation[vm_id][irq].append("(Explicitly assigned in scenario configuration)")
            for pci_dev in vm.xpath("pci_devs/pci_dev/text()"):
                bdf = lib.lib.BusDevFunc.from_str(pci_dev.split(" ")[0])
                address = hex((bdf.dev << 16) | (bdf.func))
                device_node = common.get_node(f"//bus[@address='{hex(bdf.bus)}']/device[@address='{address}']", board_etree)
                if device_node in device_nodes:
                    irqs = get_irqs_of_device(device_node)
                    for irq in irqs:
                        irq_allocation[vm_id][irq].append(pci_dev)
                    device_nodes.discard(device_node)

            # Raise error when any pre-launched VM with LAPIC passthrough requires any interrupt line.
            lapic_passthru_flag = common.get_node("guest_flags[guest_flag='GUEST_FLAG_LAPIC_PASSTHROUGH']", vm)
            if lapic_passthru_flag is not None and irq_allocation[vm_id]:
                for irq, devices in irq_allocation[vm_id].items():
                    print(f"Interrupt line {irq} is used by the following device(s).")
                    for device in devices:
                        print(f"\t{device}")
                raise lib.error.ResourceError(f"Pre-launched VM {vm_id} with LAPIC_PASSTHROUGH flag cannot use interrupt lines.")
        elif lib.lib.is_sos_vm(vm_type):
            service_vm_id = vm_id

    #
    # Detect interrupt line conflicts
    #
    conflicts = defaultdict(lambda: defaultdict(lambda: set())) # irq -> vm_id -> devices

    # If a service VM exists, collect its interrupt lines as well
    if service_vm_id >= 0:
        # Collect the interrupt lines that may be used by the service VM
        for device_node in device_nodes:
            acpi_object = device_node.find("acpi_object")
            description = ""
            if acpi_object is not None:
                description = acpi_object.text
            description = device_node.get("description", description)

            # Guess BDF of the device
            bus = device_node.getparent()
            if bus.tag == "bus" and bus.get("type") == "pci" and device_node.get("address") is not None:
                bus_number = int(bus.get("address"), 16)
                address = int(device_node.get("address"), 16)
                device_number = address >> 16
                function_number = address & 0xffff
                description = f"{bus_number:02x}:{device_number:02x}.{function_number} {description}"

                for irq in get_irqs_of_device(device_node):
                    irq_allocation[service_vm_id][irq].append(description)

    # Identify and report conflicts among interrupt lines of the VMs
    for vm1, vm2 in combinations(irq_allocation.keys(), 2):
        common_irqs = set(irq_allocation[vm1].keys()) & set(irq_allocation[vm2].keys())
        for irq in common_irqs:
            conflicts[irq][vm1].update(set(irq_allocation[vm1][irq]))
            conflicts[irq][vm2].update(set(irq_allocation[vm2][irq]))

    if conflicts:
        print("Interrupt line conflicts detected!")
        for irq, vm_devices in sorted(conflicts.items()):
            print(f"Interrupt line {irq} is shared by the following devices.")
            for vm_id, devices in vm_devices.items():
                for device in sorted(devices):
                    print(f"\tVM {vm_id}: {device}")
        raise lib.error.ResourceError(f"VMs have conflicting interrupt lines.")

    #
    # Dump allocations to allocation_etree. The virtual interrupt line is the same as the physical one unless otherwise
    # stated in the scenario configuration.
    #
    for vm_id, alloc in irq_allocation.items():
        vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", allocation_etree)
        if vm_node is None:
            vm_node = common.append_node("/acrn-config/vm", None, allocation_etree, id = str(vm_id))
        pt_intx_text = common.get_node(f"//vm[@id='{vm_id}']/pt_intx/text()", scenario_etree)
        pt_intx_mapping = dict(eval(f"[{pt_intx_text.replace(')(', '), (')}]")) if pt_intx_text is not None else {}
        for irq, devs in alloc.items():
            for dev in devs:
                if dev.startswith("("):  # Allocation in the scenario configuration need not go to allocation.xml
                    continue
                bdf = dev.split(" ")[0]
                dev_name = f"PTDEV_{bdf}"
                dev_node = common.get_node(f"device[@name = '{dev_name}']", vm_node)
                if dev_node is None:
                    dev_node = common.append_node("./device", None, vm_node, name = dev_name)
                pt_intx_node = common.get_node(f"pt_intx", dev_node)
                virq = pt_intx_mapping.get(irq, irq)
                if pt_intx_node is None:
                    common.append_node(f"./pt_intx", f"({irq}, {virq})", dev_node)
                else:
                    pt_intx_node.text += f" ({irq}, {virq})"
コード例 #21
0
def fn(board_etree, scenario_etree, allocation_etree):
    pci_bus_nums =  board_etree.xpath("//bus[@type='pci']/@address")
    common.append_node("/acrn-config/platform/MAX_PCI_BUS_NUM", hex(max(map(lambda x: int(x, 16), pci_bus_nums)) + 1), allocation_etree)
コード例 #22
0
def create_s5_vuart_connection(allocation_etree, service_vm_name, service_vm_port, user_vm_name, user_vm_port):
    vuart_connections_node = common.get_node(f"/acrn-config/hv/vuart_connections", allocation_etree)
    if vuart_connections_node is None:
        vuart_connections_node = common.append_node("/acrn-config/hv/vuart_connections", None, allocation_etree)

    connection_name = service_vm_name + "_"  + user_vm_name

    vuart_connection_node = common.append_node(f"./vuart_connection", None, vuart_connections_node)
    common.append_node(f"./name", connection_name, vuart_connection_node)
    common.append_node(f"./type", "type", vuart_connection_node)

    service_vm_endpoint = common.append_node(f"./endpoint", None, vuart_connection_node)
    common.append_node(f"./vm_name", service_vm_name, service_vm_endpoint)
    common.append_node(f"./io_port", service_vm_port, service_vm_endpoint)

    user_vm_endpoint = common.append_node(f"./endpoint", None, vuart_connection_node)
    common.append_node(f"./vm_name", user_vm_name, user_vm_endpoint)
    common.append_node(f"./io_port", user_vm_port, user_vm_endpoint)
コード例 #23
0
ファイル: gpa.py プロジェクト: Leo2Chang/acrn-hypervisor
def create_native_pci_hole_node(allocation_etree, low_mem, high_mem):
    common.append_node("/acrn-config/hv/MMIO/MMIO32_START",
                       hex(low_mem[0].start).upper(), allocation_etree)
    common.append_node("/acrn-config/hv/MMIO/MMIO32_END",
                       hex(low_mem[0].end + 1).upper(), allocation_etree)
    if len(high_mem):
        common.append_node("/acrn-config/hv/MMIO/MMIO64_START",
                           hex(high_mem[0].start).upper(), allocation_etree)
        common.append_node("/acrn-config/hv/MMIO/MMIO64_END",
                           hex(high_mem[0].end + 1).upper(), allocation_etree)
        common.append_node("/acrn-config/hv/MMIO/HI_MMIO_START",
                           hex(high_mem[0].start).upper(), allocation_etree)
        common.append_node("/acrn-config/hv/MMIO/HI_MMIO_END",
                           hex(high_mem[0].end + 1).upper(), allocation_etree)
    else:
        common.append_node("/acrn-config/hv/MMIO/MMIO64_START", "~0".upper(),
                           allocation_etree)
        common.append_node("/acrn-config/hv/MMIO/MMIO64_END", "~0",
                           allocation_etree)
        common.append_node("/acrn-config/hv/MMIO/HI_MMIO_START", "~0".upper(),
                           allocation_etree)
        common.append_node("/acrn-config/hv/MMIO/HI_MMIO_END", "0",
                           allocation_etree)