Example #1
0
def fn(board_etree, scenario_etree, allocation_etree):
    irq_list = get_native_valid_irq()
    hv_debug_console = lib.lib.parse_hv_console(scenario_etree)
    native_ttys = lib.lib.get_native_ttys()
    vuart_valid = ['ttyS0', 'ttyS1', 'ttyS2', 'ttyS3']

    scenario_sos_vm_node = common.get_node("//vm[vm_type = 'SOS_VM']",
                                           scenario_etree)
    if scenario_sos_vm_node is not None:
        vm_id = common.get_node("./@id", scenario_sos_vm_node)
        if common.get_node("./legacy_vuart[@id = '0']/base/text()",
                           scenario_sos_vm_node) != "INVALID_COM_BASE":
            vuart0_irq = -1
            if hv_debug_console in vuart_valid and hv_debug_console in native_ttys.keys(
            ) and native_ttys[hv_debug_console]['irq'] < LEGACY_IRQ_MAX:
                vuart0_irq = native_ttys[hv_debug_console]['irq']
            else:
                vuart0_irq = alloc_irq(irq_list)

            create_vuart_irq_node(allocation_etree, vm_id, "0", vuart0_irq)

        if common.get_node("./legacy_vuart[@id = '1']/base/text()",
                           scenario_sos_vm_node) != "INVALID_COM_BASE":
            vuart1_irq = alloc_irq(irq_list)

            create_vuart_irq_node(allocation_etree, vm_id, "1", vuart1_irq)
Example #2
0
def insert_vmsix_to_dev_dict(pt_dev_node, devdict):
    """
    Allocate an unused mmio window for the first free bar region of a vmsix supported passthrough device.
    1. Check if this passtrhough device is in the list "KNOWN_CAPS_PCI_DEVS_DB" of "VMSIX" suppoeted device.
    2. Find the first unused region index for the vmsix bar.
    3. Allocate an unused mmio window for this bar.
    """
    vendor = common.get_node("./vendor/text()", pt_dev_node)
    identifier = common.get_node("./identifier/text()", pt_dev_node)
    if vendor is None or identifier is None:
        return

    if (vendor, identifier) in KNOWN_CAPS_PCI_DEVS_DB.get('VMSIX'):
        bar_regions = pt_dev_node.xpath(".//resource[@type = 'memory' and @width]")
        bar_32bits = [bar_region.get('id') for bar_region in bar_regions if bar_region.get('width') == '32']
        bar_32bits_idx_list = [int(bar.split('bar')[-1]) for bar in bar_32bits]
        bar_64bits = [bar_region.get('id') for bar_region in bar_regions if bar_region.get('width') == '64']
        bar_64bits_idx_list_1 = [int(bar.split('bar')[-1]) for bar in bar_64bits]
        bar_64bits_idx_list_2 = [idx + 1 for idx in bar_64bits_idx_list_1]

        bar_regions_io_port = pt_dev_node.xpath(".//resource[@type = 'io_port' and @id[starts-with(., 'bar')]]/@id")
        bar_io_port_idx_list = [int(bar.split('bar')[-1]) for bar in bar_regions_io_port]

        used_bar_index = set(bar_32bits_idx_list + bar_64bits_idx_list_1 + bar_64bits_idx_list_2 + bar_io_port_idx_list)
        unused_bar_index = [i for i in range(6) if i not in used_bar_index]
        try:
            next_bar_region = unused_bar_index.pop(0)
        except IndexError:
            raise lib.error.ResourceError(f"Cannot allocate a bar index for vmsix supported device: {vendor}:{identifier}, used bar idx list: {used_bar_index}")
        address = common.get_node("./@address", pt_dev_node)
        bus = common.get_node(f"../@address", pt_dev_node)
        if bus is not None and address is not None:
            bdf = lib.lib.BusDevFunc(bus=int(bus, 16), dev=int(address, 16) >> 16, func=int(address, 16) & 0xffff)
            dev_name = str(bdf)
            devdict[(f"{dev_name}", f"bar{next_bar_region}")] = VMSIX_VBAR_SIZE
Example #3
0
def get_ivshmem_regions_by_tree(etree):
    ivshmem_enabled = get_ivshmem_enabled_by_tree(etree)
    if ivshmem_enabled == 'n':
        return {}

    ivshmem_regions = etree.xpath("//IVSHMEM_REGION")
    shmem_regions = {}
    for idx in range(len(ivshmem_regions)):
        shm_name = ivshmem_regions[idx].get('name')
        if shm_name is None:
            continue
        shm_size = common.get_node("./IVSHMEM_SIZE/text()",
                                   ivshmem_regions[idx])
        shm_vm_list = ivshmem_regions[idx].xpath(".//IVSHMEM_VM")
        for shm_vm in shm_vm_list:
            vm_name = common.get_node("./VM_NAME/text()", shm_vm)
            vm_id = common.get_node(f"//vm[name = '{vm_name}']/@id", etree)
            vbdf = common.get_node("./VBDF/text()", shm_vm)
            if vm_id not in shmem_regions:
                shmem_regions[vm_id] = {}
            shmem_regions[vm_id][shm_name] = {
                'id': str(idx),
                'size': shm_size,
                'vbdf': vbdf
            }
    return shmem_regions
Example #4
0
def create_mask_list_node(board_etree, scenario_etree, allocation_etree, rdt_policy_list):
    allocation_hv_node = common.get_node(f"//hv", allocation_etree)
    if allocation_hv_node is None:
        allocation_hv_node = common.append_node(f"/acrn-config/hv", None, allocation_etree)

    if common.get_node("./clos_mask[@id = l3]", allocation_hv_node) is None:
        clos_mask = common.append_node("./clos_mask", None, allocation_hv_node, id="l3")
        length = common.get_node(f"//cache[@level='3']/capability/capacity_mask_length/text()", board_etree)
        if length is not None:
            value = hex((1 << int(length)) - 1)
        else:
            value = "0xffff"
        for i in range(0, len(rdt_policy_list)):
            if rdt_policy_list[i].l3policy.get_clos_mask() is not None:
                value = str(rdt_policy_list[i].l3policy.get_clos_mask())
            common.append_node(f"./clos", value, clos_mask)
        for index,cache2 in enumerate(L2Policy.cache2_id_list):
            length = common.get_node(f"//cache[@level='2' and @id = '{cache2}']/capability/capacity_mask_length/text()", board_etree)
            value = hex((1 << int(length)) - 1)
            if common.get_node("./clos_mask[@id = '{cache2}']", allocation_hv_node) is None:
                clos_mask = common.append_node("./clos_mask", None, allocation_hv_node, id=cache2)
            for i in range(0, len(rdt_policy_list)):
                if rdt_policy_list[i].l2policy.get_clos_mask(index) is not None:
                    value = str(rdt_policy_list[i].l2policy.get_clos_mask(index))
                common.append_node(f"./clos", value, clos_mask)
Example #5
0
def alloc_legacy_vuart_irqs(board_etree, scenario_etree, allocation_etree):
    native_ttys = lib.lib.get_native_ttys()
    hv_debug_console = lib.lib.parse_hv_console(scenario_etree)

    vm_node_list = scenario_etree.xpath("//vm")
    for vm_node in vm_node_list:
        vm_type = common.get_node("./vm_type/text()", vm_node)
        irq_list = get_native_valid_irq() if vm_type == "SERVICE_VM" else [
            f"{d}" for d in list(range(1, 15))
        ]
        legacy_vuart_id_list = vm_node.xpath(
            "legacy_vuart[base != 'INVALID_COM_BASE']/@id")
        legacy_vuart_irq = ''
        for legacy_vuart_id in legacy_vuart_id_list:
            if legacy_vuart_id == '0' and vm_type == "SERVICE_VM":
                if hv_debug_console in native_ttys.keys():
                    if native_ttys[hv_debug_console]['irq'] < LEGACY_IRQ_MAX:
                        legacy_vuart_irq = native_ttys[hv_debug_console]['irq']
                        if legacy_vuart_irq in irq_list:
                            remove_irq(irq_list, legacy_vuart_irq)
                    else:
                        legacy_vuart_irq = assign_legacy_vuart_irqs(
                            vm_node, legacy_vuart_id, irq_list)
                else:
                    raise lib.error.ResourceError(
                        f"{hv_debug_console} is not in the native environment! The ttyS available are: {native_ttys.keys()}"
                    )
            else:
                legacy_vuart_irq = assign_legacy_vuart_irqs(
                    vm_node, legacy_vuart_id, irq_list)

            create_vuart_irq_node(allocation_etree,
                                  common.get_node("./@id", vm_node), vm_type,
                                  legacy_vuart_id, legacy_vuart_irq)
Example #6
0
def allocate_log_area(board_etree, scenario_etree, allocation_etree):
    tpm2_enabled = common.get_node(
        f"//vm[@id = '0']/mmio_resources/TPM2/text()", scenario_etree)
    if tpm2_enabled is None or tpm2_enabled == 'n':
        return

    if common.get_node("//capability[@id='log_area']",
                       board_etree) is not None:
        log_area_min_len_native = int(
            common.get_node(f"//log_area_minimum_length/text()", board_etree),
            16)
        log_area_start_address = common.round_up(VIRT_ACPI_NVS_ADDR,
                                                 0x10000) + RESERVED_NVS_AREA
        allocation_vm_node = common.get_node(f"/acrn-config/vm[@id = '0']",
                                             allocation_etree)
        if allocation_vm_node is None:
            allocation_vm_node = common.append_node("/acrn-config/vm",
                                                    None,
                                                    allocation_etree,
                                                    id='0')
        common.append_node("./log_area_start_address",
                           hex(log_area_start_address).upper(),
                           allocation_vm_node)
        common.append_node("./log_area_minimum_length",
                           hex(log_area_min_len_native).upper(),
                           allocation_vm_node)
Example #7
0
def create_device_node(allocation_etree, vm_id, devdict):
    for dev in devdict:
        dev_name = dev[0]
        bar_region = dev[1].split('bar')[-1]
        bar_base = devdict.get(dev)

        vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']",
                                  allocation_etree)
        if vm_node is None:
            vm_node = common.append_node("/acrn-config/vm",
                                         None,
                                         allocation_etree,
                                         id=vm_id)
        dev_node = common.get_node(f"./device[@name = '{dev_name}']", vm_node)
        if dev_node is None:
            dev_node = common.append_node("./device",
                                          None,
                                          vm_node,
                                          name=dev_name)
        if common.get_node(f"./bar[@id='{bar_region}']", dev_node) is None:
            common.append_node(f"./bar",
                               hex(bar_base),
                               dev_node,
                               id=bar_region)
        if IVSHMEM in dev_name and bar_region == '2':
            common.update_text(
                f"./bar[@id = '2']",
                hex(bar_base | PREFETCHABLE_BIT | MEMORY_BAR_LOCATABLE_64BITS),
                dev_node, True)
Example #8
0
def fn(board_etree, scenario_etree, allocation_etree):
    native_ttys = lib.lib.get_native_ttys()
    hv_debug_console = lib.lib.parse_hv_console(scenario_etree)

    vm_node_list = scenario_etree.xpath("//vm")
    for vm_node in vm_node_list:
        load_order = common.get_node("./load_order/text()", vm_node)
        legacy_vuart_base = ""
        legacy_vuart_id_list = vm_node.xpath(
            "legacy_vuart[base != 'INVALID_COM_BASE']/@id")
        for legacy_vuart_id in legacy_vuart_id_list:
            if legacy_vuart_id == '0' and load_order == "SERVICE_VM":
                if hv_debug_console in native_ttys.keys():
                    if native_ttys[hv_debug_console]['type'] == "portio":
                        legacy_vuart_base = native_ttys[hv_debug_console][
                            'base']
                    else:
                        legacy_vuart_base = assign_legacy_vuart_io_port(
                            vm_node, legacy_vuart_id)
                else:
                    raise lib.error.ResourceError(
                        f"{hv_debug_console} is not in the native environment! The ttyS available are: {native_ttys.keys()}"
                    )
            else:
                legacy_vuart_base = assign_legacy_vuart_io_port(
                    vm_node, legacy_vuart_id)

            if legacy_vuart_base != "":
                create_vuart_base_node(allocation_etree,
                                       common.get_node("./@id", vm_node),
                                       legacy_vuart_id, legacy_vuart_base)
Example #9
0
def allocate_ssram_region(board_etree, scenario_etree, allocation_etree):
    # Guest physical address of the SW SRAM allocated to a pre-launched VM
    enabled = common.get_node("//SSRAM_ENABLED/text()", scenario_etree)
    if enabled == "y":
        pre_rt_vms = common.get_node("//vm[vm_type ='PRE_RT_VM']",
                                     scenario_etree)
        if pre_rt_vms is not None:
            vm_id = pre_rt_vms.get("id")
            l3_sw_sram = board_etree.xpath(
                "//cache[@level='3']/capability[@id='Software SRAM']")
            if l3_sw_sram:
                start = min(
                    map(lambda x: int(x.find("start").text, 16), l3_sw_sram))
                end = max(
                    map(lambda x: int(x.find("end").text, 16), l3_sw_sram))

                allocation_vm_node = common.get_node(
                    f"/acrn-config/vm[@id = '{vm_id}']", allocation_etree)
                if allocation_vm_node is None:
                    allocation_vm_node = common.append_node("/acrn-config/vm",
                                                            None,
                                                            allocation_etree,
                                                            id=vm_id)
                common.append_node("./ssram/start_gpa", hex(start),
                                   allocation_vm_node)
                common.append_node("./ssram/end_gpa", hex(end),
                                   allocation_vm_node)
Example #10
0
def allocate_log_area(board_etree, scenario_etree, allocation_etree):
    tpm2_enabled = common.get_node(
        f"//vm[@id = '0']/mmio_resources/TPM2/text()", scenario_etree)
    if tpm2_enabled is None or tpm2_enabled == 'n':
        return

    if common.get_node("//capability[@id='log_area']",
                       board_etree) is not None:
        # VIRT_ACPI_DATA_ADDR
        log_area_min_len = int(
            common.get_node(f"//log_area_minimum_length/text()", board_etree),
            16)
        log_area_end_address = 0x7FFF0000
        log_area_start_address = log_area_end_address - log_area_min_len
        allocation_vm_node = common.get_node(f"/acrn-config/vm[@id = '0']",
                                             allocation_etree)
        if allocation_vm_node is None:
            allocation_vm_node = common.append_node("/acrn-config/vm",
                                                    None,
                                                    allocation_etree,
                                                    id='0')
        common.append_node("./log_area_start_address",
                           hex(log_area_start_address).upper(),
                           allocation_vm_node)
        common.append_node("./log_area_minimum_length",
                           hex(log_area_min_len).upper(), allocation_vm_node)
def threaded_function(thread_data, thread_static, env, addons, hWnd, log):
  while not thread_data.stop:
    time.sleep(0.01)
    will_update_ca_bundle = False
    will_update_addons    = False
    force_update_addons   = False
    activate_launch       = False
    with thread_static.lock:
      if thread_static.need_update_ca_bundle:
        will_update_ca_bundle = common.locate_ca_bundle(env['script_dir']) is None or thread_static.force_update_ca_bundle
        thread_static.need_update_ca_bundle   = False
        thread_static.force_update_ca_bundle = False
      if thread_static.need_update_addons:
        will_update_addons = True
        force_update_addons = thread_static.force_update_addons
        thread_static.need_update_addons  = False
        thread_static.force_update_addons = False
      activate_launch = thread_static.queue_launch
      thread_static.queue_launch = False
    if will_update_ca_bundle:
      try:
        common.get_node({'src': 'https://curl.haxx.se/ca/cacert.pem', 'dest': 'cacert.pem'}, False, False, env['script_dir'], log)
        log.log_ts('CA Bundle updated', info = True)
      except:
        exc = sys.exc_info()
        log.log_ts('{}: {}'.format(exc[0], exc[1]))
        win32gui.SendMessage(hWnd, win32con.WM_COMMAND, common.commands.SHOW_LOG, 0)
        continue
    if will_update_addons:
      update_context = {'launch': activate_launch, 'error': False}
      common.update_addons(env, addons, log, force_update_addons, update_context)
      if update_context['error']:
        win32gui.SendMessage(hWnd, win32con.WM_COMMAND, common.commands.SHOW_LOG, 0)
      elif update_context['launch']:
        win32gui.SendMessage(hWnd, win32con.WM_COMMAND, common.commands.LAUNCH, 0)
Example #12
0
def creat_mask_list_node(board_etree, scenario_etree, allocation_etree,
                         mask_list):
    allocation_hv_node = common.get_node(f"//hv", allocation_etree)
    if allocation_hv_node is None:
        allocation_hv_node = common.append_node(f"/acrn-config/hv", None,
                                                allocation_etree)
    cache2_id_list = scenario_etree.xpath(
        "//CACHE_ALLOCATION[CACHE_LEVEL = 2]/CACHE_ID/text()")
    cache2_id_list.sort()
    if common.get_node("./clos_mask[@id = l3]", allocation_hv_node) is None:
        clos_mask = common.append_node("./clos_mask",
                                       None,
                                       allocation_hv_node,
                                       id="l3")
        for i in range(0, len(mask_list)):
            if mask_list[i]["l3"] == "None":
                value = "0xffff"
            else:
                value = str(mask_list[i]["l3"])
            common.append_node(f"./clos", value, clos_mask)

        for cache2 in cache2_id_list:
            if common.get_node("./clos_mask[@id = '{cache2}']",
                               allocation_hv_node) is None:
                clos_mask = common.append_node("./clos_mask",
                                               None,
                                               allocation_hv_node,
                                               id=cache2)
            for i in range(0, len(mask_list)):
                if mask_list[i][cache2] == "None":
                    value = "0xffff"
                else:
                    value = str(mask_list[i][cache2])
                common.append_node(f"./clos", value, clos_mask)
Example #13
0
def create_clos_node(scenario_etree, vm_id, index_list):
    allocation_vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", scenario_etree)
    if allocation_vm_node is None:
        allocation_vm_node = common.append_node("/acrn-config/vm", None, scenario_etree, id = vm_id)
    if common.get_node("./clos", allocation_vm_node) is None:
        clos_node = common.append_node("./clos", None, allocation_vm_node)
        for index in index_list:
            common.append_node(f"./vcpu_clos", str(index), clos_node)
Example #14
0
def create_vuart_base_node(etree, vm_id, vuart_id, vuart_base):
    vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", etree)
    if vm_node is None:
        vm_node = common.append_node("/acrn-config/vm", None, etree, id = vm_id)
    vuart_node = common.get_node(f"./legacy_vuart[@id = '{vuart_id}']", vm_node)
    if vuart_node is None:
        vuart_node = common.append_node("./legacy_vuart", None, vm_node, id = vuart_id)
    if common.get_node(f"./base", vuart_node) is None:
        common.append_node(f"./base", vuart_base, vuart_node)
Example #15
0
def gen_policy_owner_list(scenario_etree):
    policy_owner_list = []
    vm_list = scenario_etree.xpath("//POLICY/VM")
    for vm in vm_list:
        vm_name = common.get_node("./text()", vm)
        vcpu = common.get_node("../VCPU/text()", vm)
        cache_type = common.get_node("../TYPE/text()", vm)
        policy_owner_list.append(policy_owner(vm_name, vcpu, cache_type))
    return policy_owner_list
Example #16
0
def asl_to_aml(dest_vm_acpi_path, dest_vm_acpi_bin_path, scenario_etree, allocation_etree):
    '''
    compile asl code of ACPI table to aml code.
    :param dest_vm_acpi_path: the path of the asl code of ACPI tables
    :param dest_vm_acpi_bin_path: the path of the aml code of ACPI tables
    :param passthru_devices: passthrough devce list
    :return:
    '''
    curr_path = os.getcwd()
    rmsg = ''

    os.chdir(dest_vm_acpi_path)
    for acpi_table in ACPI_TABLE_LIST:
        if acpi_table[0] == 'tpm2.asl':
            if 'tpm2.asl' in os.listdir(dest_vm_acpi_path):
                rc = exec_command('iasl {}'.format(acpi_table[0]))
                if rc == 0 and os.path.isfile(os.path.join(dest_vm_acpi_path, acpi_table[1])):
                    shutil.move(os.path.join(dest_vm_acpi_path, acpi_table[1]),
                                os.path.join(dest_vm_acpi_bin_path, acpi_table[1]))
                else:
                    if os.path.isfile(os.path.join(dest_vm_acpi_path, acpi_table[1])):
                        os.remove(os.path.join(dest_vm_acpi_path, acpi_table[1]))
                    rmsg = 'failed to compile {}'.format(acpi_table[0])
                    break
        elif acpi_table[0] in ['PTCT', 'RTCT']:
            if acpi_table[0] in os.listdir(dest_vm_acpi_path):
                rtct = acpiparser.rtct.RTCT(os.path.join(dest_vm_acpi_path, acpi_table[0]))
                outfile = os.path.join(dest_vm_acpi_bin_path, acpi_table[1])
                # move the guest ssram area to the area next to ACPI region
                pre_rt_vms = common.get_node("//vm[load_order ='PRE_LAUNCHED_VM' and vm_type ='RTVM']", scenario_etree)
                vm_id = pre_rt_vms.get("id")
                allocation_vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", allocation_etree)
                ssram_start_gpa = common.get_node("./ssram/start_gpa/text()", allocation_vm_node)
                ssram_max_size = common.get_node("./ssram/max_size/text()", allocation_vm_node)
                move_rtct_ssram_and_bin_entries(rtct, int(ssram_start_gpa, 16), int(ssram_max_size, 16))
                fp = open(outfile, mode='wb')
                fp.write(rtct)
                fp.close()
        else:
            if acpi_table[0].endswith(".asl"):
                rc = exec_command('iasl {}'.format(acpi_table[0]))
                if rc == 0 and os.path.isfile(os.path.join(dest_vm_acpi_path, acpi_table[1])):
                    shutil.move(os.path.join(dest_vm_acpi_path, acpi_table[1]),
                                os.path.join(dest_vm_acpi_bin_path, acpi_table[1]))
                else:
                    if os.path.isfile(os.path.join(dest_vm_acpi_path, acpi_table[1])):
                        os.remove(os.path.join(dest_vm_acpi_path, acpi_table[1]))
                    rmsg = 'failed to compile {}'.format(acpi_table[0])
                    break
            elif acpi_table[0].endswith(".aml"):
                shutil.copy(os.path.join(dest_vm_acpi_path, acpi_table[0]),
                            os.path.join(dest_vm_acpi_bin_path, acpi_table[1]))

    os.chdir(curr_path)
    if not rmsg:
        print('compile ACPI ASL code to {} successfully'.format(dest_vm_acpi_bin_path))
    return rmsg
Example #17
0
def create_vuart_irq_node(etree, vm_id, vuart_id, irq):
    allocation_sos_vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", etree)
    if allocation_sos_vm_node is None:
        allocation_sos_vm_node = common.append_node("/acrn-config/vm", None, etree, id = vm_id)
    if common.get_node("./vm_type", allocation_sos_vm_node) is None:
        common.append_node("./vm_type", "SOS_VM", allocation_sos_vm_node)
    if common.get_node(f"./legacy_vuart[@id = '{vuart_id}']", allocation_sos_vm_node) is None:
        common.append_node("./legacy_vuart", None, allocation_sos_vm_node, id = vuart_id)

    common.append_node(f"./legacy_vuart[@id = '{vuart_id}']/irq", irq, allocation_sos_vm_node)
Example #18
0
def fn(board_etree, scenario_etree, allocation_etree):
    cpus_for_sos = sos_cpu_affinity(scenario_etree)
    if cpus_for_sos:
        if common.get_node("//vm[vm_type = 'SOS_VM']", scenario_etree) is not None:
            vm_id = common.get_node("//vm[vm_type = 'SOS_VM']/@id", scenario_etree)
            allocation_sos_vm_node = common.get_node(f"/acrn-config/vm[@id='{vm_id}']", allocation_etree)
            if allocation_sos_vm_node is None:
                allocation_sos_vm_node = common.append_node("/acrn-config/vm", None, allocation_etree, id = vm_id)
            if common.get_node("./vm_type", allocation_sos_vm_node) is None:
                common.append_node("./vm_type", "SOS_VM", allocation_sos_vm_node)
        for pcpu_id in cpus_for_sos:
            common.append_node("./cpu_affinity/pcpu_id", str(pcpu_id), allocation_sos_vm_node)
Example #19
0
def sos_cpu_affinity(etree):
    if common.get_node("//vm[vm_type = 'SOS_VM']", etree) is None:
        return None

    if common.get_node("//vm[vm_type = 'SOS_VM' and count(cpu_affinity)]", etree) is not None:
        return None

    sos_extend_all_cpus = board_cfg_lib.get_processor_info()
    pre_all_cpus = etree.xpath("//vm[vm_type = 'PRE_RT_VM' or vm_type = 'PRE_STD_VM' or vm_type = 'SAFETY_VM']/cpu_affinity/pcpu_id/text()")

    cpus_for_sos = list(set(sos_extend_all_cpus) - set(pre_all_cpus))
    return sorted(cpus_for_sos)
Example #20
0
def create_vuart_node(allocation_etree, vm_id, devdict):
    for dev in devdict:
        vuart_id = dev[0][-1]
        bar_base = devdict.get(dev)

        vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", allocation_etree)
        if vm_node is None:
            vm_node = common.append_node("/acrn-config/vm", None, allocation_etree, id = vm_id)
        vuart_node = common.get_node(f"./legacy_vuart[@id = '{vuart_id}']", vm_node)
        if vuart_node is None:
            vuart_node = common.append_node("./legacy_vuart", None, vm_node, id = vuart_id)
        if common.get_node(f"./base", vuart_node) is None:
            common.append_node(f"./base", hex(bar_base), vuart_node)
Example #21
0
def fn(board_etree, scenario_etree, allocation_etree):
    native_ttys = lib.lib.get_native_ttys()
    pio_list = [
        base for base in VALID_PIO
        if all(native_ttys[tty]['base'] != base for tty in native_ttys.keys())
    ]
    # This pio_list is workaround. Since there are whl-ipc-i7 and whl-ipc-i5 which occupy all valid pio ports.
    # It would fail to allocate pio base for enabled sos legacy vuart1. In that case, we allow vuart1 take one pio
    # base which is used in native.
    full = False
    if len(pio_list) == 0:
        full = True
        pio_list = VALID_PIO

    vuart_valid = ['ttyS0', 'ttyS1', 'ttyS2', 'ttyS3']
    hv_debug_console = lib.lib.parse_hv_console(scenario_etree)

    scenario_sos_vm_node = common.get_node("//vm[vm_type = 'SOS_VM']",
                                           scenario_etree)
    if scenario_sos_vm_node is not None:
        vm_id = common.get_node("./@id", scenario_sos_vm_node)
        if common.get_node("./legacy_vuart[@id = '0']/base/text()",
                           scenario_sos_vm_node) != "INVALID_COM_BASE":
            vuart0_base = ""
            if hv_debug_console in vuart_valid and hv_debug_console in native_ttys.keys(
            ) and native_ttys[hv_debug_console]['type'] == "portio":
                vuart0_base = native_ttys[hv_debug_console]['base']
                if vuart0_base in pio_list:
                    remove_pio(pio_list, vuart0_base)
            else:
                vuart0_base = alloc_pio(pio_list)
                if full:
                    common.print_yel(
                        "All available pio bases are used by native fully. '{}' is taken by sos legacy vuart 0."
                        .format(vuart0_base),
                        warn=True)

            create_vuart_base_node(allocation_etree, str(vm_id), "0",
                                   vuart0_base)

        if common.get_node("./legacy_vuart[@id = '1']/base/text()",
                           scenario_sos_vm_node) != "INVALID_COM_BASE":
            vuart1_base = alloc_pio(pio_list)
            if full:
                common.print_yel(
                    "All available pio bases are used by native fully. '{}' is taken by sos legacy vuart 1."
                    .format(vuart1_base),
                    warn=True)

            create_vuart_base_node(allocation_etree, str(vm_id), "1",
                                   vuart1_base)
Example #22
0
def alloc_clos_index(board_etree, scenario_etree, allocation_etree, mask_list):
    vm_node_list = scenario_etree.xpath("//vm")
    for vm_node in vm_node_list:
        vm_name = common.get_node("./name/text()", vm_node)
        vcpu_list = scenario_etree.xpath(f"//POLICY[VM = '{vm_name}']/VCPU/text()")
        index_list = []
        for vcpu in sorted(list(set(vcpu_list))):
            type_list = scenario_etree.xpath(f"//POLICY[VM = '{vm_name}' and VCPU = '{vcpu}']/TYPE/text()")
            for cache_type in sorted(list(set(type_list))):
                if cache_type == "Data":
                    continue
                index = get_clos_id(mask_list, policy_owner(vm_name, vcpu, cache_type))
                index_list.append(index)
        create_clos_node(allocation_etree, common.get_node("./@id", vm_node), index_list)
Example #23
0
def alloc_clos_index(board_etree, scenario_etree, allocation_etree, mask_list):
    vm_node_list = scenario_etree.xpath("//vm")
    for vm_node in vm_node_list:
        vmname = common.get_node("./name/text()", vm_node)
        allocation_list = scenario_etree.xpath(f"//CACHE_ALLOCATION[POLICY/VM = '{vmname}']")
        for allocation in allocation_list:
            index_list = []
            cache_level = common.get_node("./CACHE_LEVEL/text()", allocation)
            cache_id = common.get_node("./CACHE_ID/text()", allocation)
            clos_mask_list = allocation.xpath(f".//POLICY[VM = '{vmname}']/CLOS_MASK/text()")

            for clos_mask in clos_mask_list:
                index = get_clos_id(mask_list, cache_id, clos_mask, "None")
                index_list.append(index)
            create_clos_node(allocation_etree, common.get_node("./@id", vm_node), index_list)
Example #24
0
def sos_cpu_affinity(etree):
    if common.get_node("//vm[load_order = 'SERVICE_VM']", etree) is None:
        return None

    if common.get_node(
            "//vm[load_order = 'SERVICE_VM' and count(cpu_affinity//pcpu_id)]",
            etree) is not None:
        return None

    sos_extend_all_cpus = board_cfg_lib.get_processor_info()
    pre_all_cpus = etree.xpath(
        "//vm[load_order = 'PRE_LAUNCHED_VM']/cpu_affinity//pcpu_id/text()")

    cpus_for_sos = list(set(sos_extend_all_cpus) - set(pre_all_cpus))
    return sorted(cpus_for_sos)
def main(args):
    """
    Generate serial configuration file for service VM
    :param args: command line args
    """
    scenario_etree = lxml.etree.parse(args.scenario)
    allocation_etree = lxml.etree.parse(args.allocation)
    vuart_target_vmid = {}

    vm_list = scenario_etree.xpath("//vm[load_order = 'SERVICE_VM']")
    for vm in vm_list:
        vuart_list = find_non_standard_uart(vm, scenario_etree,
                                            allocation_etree)
        vmname = common.get_node("./name/text()", vm)
        if len(vuart_list) != 0:
            with open(args.out, "w+") as config_f:
                for uart_start_num, vuart in enumerate(
                        vuart_list, start=START_VUART_DEV_NAME_NO):
                    base = " port " + vuart["io_port"]
                    vm_id_note = "# User_VM_id: " + str(
                        vuart["target_vm_id"]) + '\n'
                    config_f.write(vm_id_note)
                    conf = "/dev/ttyS" + str(
                        uart_start_num) + base + UART_IRQ_BAUD + '\n'
                    config_f.write(conf)
Example #26
0
def create_device_node(allocation_etree, vm_id, devdict):
    for dev in devdict:
        dev_name = dev
        bdf = devdict.get(dev)
        vm_node = common.get_node(f"/acrn-config/vm[@id = '{vm_id}']", allocation_etree)
        if vm_node is None:
            vm_node = common.append_node("/acrn-config/vm", None, allocation_etree, id = vm_id)
        dev_node = common.get_node(f"./device[@name = '{dev_name}']", vm_node)
        if dev_node is None:
            dev_node = common.append_node("./device", None, vm_node, name = dev_name)
        if common.get_node(f"./bus", dev_node) is None:
            common.append_node(f"./bus",  f"{bdf.bus:#04x}".upper(), dev_node)
        if common.get_node(f"./dev", dev_node) is None:
            common.append_node(f"./dev", f"{bdf.dev:#04x}".upper(), dev_node)
        if common.get_node(f"./func", dev_node) is None:
            common.append_node(f"./func", f"{bdf.func:#04x}".upper(), dev_node)
Example #27
0
def fn(board_etree, scenario_etree, allocation_etree):
    allocate_ssram_region(board_etree, scenario_etree, allocation_etree)

    native_low_mem, native_high_mem = get_pci_hole_native(board_etree)
    create_native_pci_hole_node(allocation_etree, native_low_mem,
                                native_high_mem)

    vm_nodes = scenario_etree.xpath("//vm")
    for vm_node in vm_nodes:
        vm_id = vm_node.get('id')

        devdict_32bits = {}
        devdict_64bits = {}
        insert_vuart_to_dev_dict(vm_node, devdict_32bits)
        insert_ivsheme_to_dev_dict(scenario_etree, devdict_32bits,
                                   devdict_64bits, vm_id)
        insert_pt_devs_to_dev_dict(board_etree, vm_node, devdict_32bits,
                                   devdict_64bits)

        low_mem = []
        high_mem = []
        used_low_mem = []
        used_high_mem = []

        vm_type = common.get_node("./vm_type/text()", vm_node)
        if vm_type is not None and lib.lib.is_pre_launched_vm(vm_type):
            low_mem = [
                MmioWindow(start=PRE_LAUNCHED_VM_LOW_MEM_START,
                           end=PRE_LAUNCHED_VM_LOW_MEM_END - 1)
            ]
            high_mem = [
                MmioWindow(start=PRE_LAUNCHED_VM_HIGH_MEM_START,
                           end=PRE_LAUNCHED_VM_HIGH_MEM_END - 1)
            ]
        elif vm_type is not None and lib.lib.is_sos_vm(vm_type):
            low_mem = native_low_mem
            high_mem = native_high_mem
            mem_passthrough = get_devs_mem_passthrough(board_etree,
                                                       scenario_etree)
            used_low_mem_native = get_devs_mem_native(board_etree, low_mem)
            used_high_mem_native = get_devs_mem_native(board_etree, high_mem)
            # release the passthrough devices mmio windows from SOS
            used_low_mem = [
                mem for mem in used_low_mem_native
                if mem not in mem_passthrough
            ]
            used_high_mem = [
                mem for mem in used_high_mem_native
                if mem not in mem_passthrough
            ]
        else:
            # fall into else when the vm_type is post-launched vm, no mmio allocation is needed
            continue

        devdict_base_32_bits = alloc_mmio(low_mem, devdict_32bits,
                                          used_low_mem)
        devdict_base_64_bits = alloc_mmio(low_mem + high_mem, devdict_64bits,
                                          used_low_mem + used_high_mem)
        create_device_node(allocation_etree, vm_id, devdict_base_32_bits)
        create_device_node(allocation_etree, vm_id, devdict_base_64_bits)
Example #28
0
def insert_pt_devs_to_dev_dict(board_etree, vm_node_etree, devdict_32bits,
                               devdict_64bits):
    pt_devs = vm_node_etree.xpath(f".//pci_dev/text()")
    for pt_dev in pt_devs:
        bdf = pt_dev.split()[0]
        bus = int(bdf.split(':')[0], 16)
        dev = int(bdf.split(":")[1].split('.')[0], 16)
        func = int(bdf.split(":")[1].split('.')[1], 16)
        bdf = lib.lib.BusDevFunc(bus=bus, dev=dev, func=func)
        pt_dev_node = common.get_node(
            f"//bus[@type = 'pci' and @address = '{hex(bus)}']/device[@address = '{hex((dev << 16) | func)}']",
            board_etree)
        if pt_dev_node is not None:
            insert_vmsix_to_dev_dict(pt_dev_node, devdict_32bits)
            pt_dev_resources = pt_dev_node.xpath(
                ".//resource[@type = 'memory' and @len != '0x0' and @id and @width]"
            )
            for pt_dev_resource in pt_dev_resources:
                if int(pt_dev_resource.get('min'), 16) < PCI_HOLE_THRESHOLD:
                    continue
                dev_name = str(bdf)
                bar_len = pt_dev_resource.get('len')
                bar_region = pt_dev_resource.get('id')
                bar_width = pt_dev_resource.get('width')
                if bar_width == "32":
                    devdict_32bits[(f"{dev_name}",
                                    f"{bar_region}")] = int(bar_len, 16)
                else:
                    devdict_64bits[(f"{dev_name}",
                                    f"{bar_region}")] = int(bar_len, 16)
Example #29
0
def allocate_io_port(board_etree, scenario_etree, allocation_etree):
    io_port_range_list_native = get_io_port_range_native(board_etree)

    vm_nodes = scenario_etree.xpath("//vm")
    for vm_node in vm_nodes:
        vm_id = vm_node.get('id')

        devdict_io_port = {}
        insert_legacy_vuart_to_dev_dict(vm_node, devdict_io_port)

        io_port_range_list = []
        used_io_port_list = []

        load_order = common.get_node("./load_order/text()", vm_node)
        if load_order is not None and lib.lib.is_service_vm(load_order):
            io_port_range_list = io_port_range_list_native
            io_port_passthrough = get_pt_devs_io_port_passthrough(board_etree, scenario_etree)
            used_io_port_list_native = get_devs_io_port_native(board_etree, io_port_range_list_native)
            # release the passthrough devices io port address from Service VM
            used_io_port_list = [io_port for io_port in used_io_port_list_native if io_port not in io_port_passthrough]
        else:
            io_port_range_list = [AddrWindow(start = IO_PORT_THRESHOLD, end = IO_PORT_MAX_ADDRESS)]
            used_io_port_list = get_pt_devs_io_port_passthrough_per_vm(board_etree, vm_node)

        devdict_base_io_port = alloc_addr(io_port_range_list, devdict_io_port, used_io_port_list, 0)
        create_vuart_node(allocation_etree, vm_id, devdict_base_io_port)
Example #30
0
def alloc_vuart_connection_info(board_etree, scenario_etree, allocation_etree):
    user_vm_list = scenario_etree.xpath(f"//vm[load_order != 'SERVICE_VM']")
    service_vm_id = common.get_node(f"//vm[load_order = 'SERVICE_VM']/@id", scenario_etree)
    service_vm_name = common.get_node(f"//vm[load_order = 'SERVICE_VM']/name/text()", scenario_etree)

    if (service_vm_id is None) or (service_vm_name is None):
        return

    for index,vm_node in enumerate(user_vm_list):
        vm_id = common.get_node("./@id", vm_node)
        load_order = common.get_node("./load_order/text()", vm_node)
        user_vm_name = common.get_node(f"./name/text()", vm_node)
        service_vm_port = alloc_free_port(scenario_etree, "SERVICE_VM", user_vm_name)
        user_vm_port = alloc_free_port(scenario_etree, load_order, user_vm_name)

        create_s5_vuart_connection(allocation_etree, service_vm_name, service_vm_port, user_vm_name, user_vm_port)