def parse_fixed_io_port(item, elem): add_child(elem, "resource", type="io_port", min=hex(item._BAS), max=hex(item._BAS + item._LEN - 1), len=hex(item._LEN))
def parse_fixed_memory_range(item, elem): add_child(elem, "resource", type="memory", min=hex(item._BAS), max=hex(item._BAS + item._LEN - 1), len=hex(item._LEN))
def get_device_element(devices_node, namepath, hid): assert namepath.startswith("\\") namesegs = namepath[1:].split(".") element = devices_node for i,nameseg in enumerate(namesegs): buspath = f"\\{'.'.join(namesegs[:(i+1)])}" tag, typ = "device", None if nameseg in predefined_nameseg.keys(): tag, typ = predefined_nameseg[nameseg] next_element = None for child in element: acpi_object = get_node(child, "acpi_object") if acpi_object is not None and acpi_object.text == buspath: next_element = child break if next_element is None: next_element = add_child(element, tag, None) add_child(next_element, "acpi_object", buspath) if typ: next_element.set("type", typ) element = next_element if hid: element.set("id", hid) return element
def parse_io_port(item, elem): add_child(elem, "resource", type="io_port", min=hex(item._MIN), max=hex(item._MAX), len=hex(item._LEN))
def extract_ttys(device_classes_node): ttys_node = add_child(device_classes_node, "ttys", None) for serial_dev in get_serial_devs(): serial_node = add_child(ttys_node, "serial") add_child(serial_node, "dev_path", f"/dev/{serial_dev}") add_child_with_file_contents( serial_node, "type", f"{SYS_TTY_DEVICES_CLASS_PATH}{serial_dev}/type")
def parse_fixed_memory_range(idx, item, elem): add_child(elem, "resource", id=f"res{idx}", type="memory", min=hex(item._BAS), max=hex(item._BAS + item._LEN - 1 if item._LEN else 0), len=hex(item._LEN))
def parse_fixed_io_port(idx, item, elem): add_child(elem, "resource", id=f"res{idx}", type="io_port", min=hex(item._BAS), max=hex(item._BAS + item._LEN - 1 if item._LEN else 0), len=hex(item._LEN))
def extract_layout(memory_node): e820_table = parse_e820() for e820_entry in e820_table: if e820_entry.type == e820.E820_TYPE_RAM: start = "0x{:016x}".format(e820_entry.start) end = "0x{:016x}".format(e820_entry.end) size = e820_entry.end - e820_entry.start + 1 add_child(memory_node, "range", start=start, end=end, size=str(size))
def extract_inputs(device_classes_node): inputs_node = add_child(device_classes_node, "inputs", None) input_ids = get_input_ids() for id in input_ids: input_node = add_child(inputs_node, "input", None) add_child_with_file_contents(input_node, "name", f"/sys/class/input/input{id}/name") add_child_with_file_contents(input_node, "phys", f"/sys/class/input/input{id}/phys")
def add_child_with_file_contents(parent_node, tag, filepath, translations={}): try: with open(filepath, "r") as f: res = f.read().strip() if res in translations.keys(): add_child(parent_node, tag, translations[res]) else: add_child(parent_node, tag, res) except Exception as e: logging.warning(f"Failed to read the data from {filepath}: {e}")
def parse_address_space_resource(idx, item, elem): if item._TYP == 0: typ = "memory" elif item._TYP == 1: typ = "io_port" elif item._TYP == 2: typ = "bus_number" else: typ = "custom" add_child(elem, "resource", id=f"res{idx}", type=typ, min=hex(item._MIN), max=hex(item._MIN + item._LEN - 1), len=hex(item._LEN))
def extract_topology(ioapics_node, tables): for subtable in tables.interrupt_controller_structures: if subtable.subtype == apic.MADT_TYPE_IO_APIC: apic_id = subtable.io_apic_id ioapic_node = add_child(ioapics_node, "ioapic", None, id=hex(apic_id)) add_child(ioapic_node, "address", hex(subtable.io_apic_addr)) add_child(ioapic_node, "gsi_base", hex(subtable.global_sys_int_base)) extract_gsi_number(ioapic_node, apic_id)
def collect_hostbridge_resources(bus_node, bus_number): with open("/proc/iomem", "r") as f: for line in f.readlines(): fields = line.strip().split(" : ") if fields[1] == f"PCI Bus 0000:{bus_number:02x}": begin, end = tuple( map(lambda x: int(f"0x{x}", base=16), fields[0].split("-"))) add_child(bus_node, "resource", type="memory", min=hex(begin), max=hex(end), len=hex(end - begin + 1))
def parse_msi(cap_node, cap_struct): add_child(cap_node, "count", str(1 << cap_struct.multiple_message_capable)) if cap_struct.multiple_message_capable > 0: add_child(cap_node, "capability", id="multiple-message") if cap_struct.address_64bit: add_child(cap_node, "capability", id="64-bit address") if cap_struct.per_vector_masking_capable: add_child(cap_node, "capability", id="per-vector masking")
def extract(args, board_etree): devices_node = get_node(board_etree, "//devices") try: namespace = parse_dsdt() except Exception as e: logging.warning(f"Parse ACPI DSDT/SSDT failed: {str(e)}") logging.warning(f"Will not extract information from ACPI DSDT/SSDT") return interpreter = ConcreteInterpreter(namespace) # With IOAPIC, Linux kernel will choose APIC mode as the IRQ model. Evalaute the \_PIC method (if exists) to inform the ACPI # namespace of this. try: interpreter.interpret_method_call("\\_PIC", 1) except: logging.info(f"\\_PIC is not evaluated.") for device in sorted(namespace.devices, key=lambda x:x.name): try: fetch_device_info(devices_node, interpreter, device.name, args) except Exception as e: logging.info(f"Fetch information about device object {device.name} failed: {str(e)}") visitor = GenerateBinaryVisitor() for dev, objs in device_objects.items(): element = get_node(devices_node, f"//device[acpi_object='{dev}']") if element is not None: tree = builder.DefDevice( builder.PkgLength(), dev, builder.TermList(*list(objs.values()))) add_child(element, "aml_template", visitor.generate(tree).hex()) for dev, deps in device_deps.items(): element = get_node(devices_node, f"//device[acpi_object='{dev}']") if element is not None: for kind, targets in deps.items(): for target in targets: if dev != target: add_child(element, "dependency", target, type=kind)
def extract_gsi_number(ioapic_node, apic_id): f = open("/dev/kmsg", 'r') fd = os.dup(f.fileno()) f.close() ret = fcntl.fcntl(fd, fcntl.F_SETFL, os.O_NONBLOCK) if ret != 0: os.close(fd) add_child(ioapic_node, "gsi_number", DEFAULT_MAX_IOAPIC_LINES) return while True: try: line = os.read(fd, 512).decode("utf-8") m = re.match( r"\s*\d+,\d+,\d+,-;IOAPIC\[[\d+]\]:\s+apic_id\s+{},\s+version\s+\d+,\s+address\s+0x[0-9a-f]+,\s+GSI\s+(\d+)-(\d+)" .format(apic_id), line) if m: previous_max = int(m.group(1)) current_max = int(m.group(2)) + 1 add_child(ioapic_node, "gsi_number", str(current_max - previous_max)) break except: add_child(ioapic_node, "gsi_number", DEFAULT_MAX_IOAPIC_LINES) break os.close(fd)
def extract(board_etree): bus_node = get_node(board_etree, "//bus[@type='pci']") if bus_node is None: devices_node = get_node(board_etree, "//devices") bus_node = add_child(devices_node, "bus", type="pci", address="0x0") collect_hostbridge_resources(bus_node) else: # Assume there is only one device object in the ACPI DSDT that represents a PCI bridge (which should be the host # bridge in this case). If the ACPI table does not provide an _ADR object, add the default address of the host # bridge (i.e. bus 0). if bus_node.get("address") is None: bus_node.set("address", "0x0") enum_devices(bus_node, PCI_ROOT_PATH)
def extract(args, board_etree): dev_regex = re.compile(USB_DEVICES_REGEX) for dev in os.listdir(USB_DEVICES_PATH): m = dev_regex.match(dev) if m: d = m.group(0) devpath = os.path.join(USB_DEVICES_PATH, d) with open(os.path.join(devpath, 'devnum'), 'r') as f: devnum = f.read().strip() with open(os.path.join(devpath, 'busnum'), 'r') as f: busnum = f.read().strip() cmd_out = os.popen('lsusb -s {b}:{d}'.format(b=busnum, d=devnum)).read() desc = cmd_out.split(':', maxsplit=1)[1].strip('\n') with open(devpath + '/port/firmware_node/path') as f: acpi_path = f.read().strip() usb_port_node = get_node(board_etree, f"//device[acpi_object='{acpi_path}']") if usb_port_node is not None: add_child(usb_port_node, "usb_device", location=d, description=d + desc)
def extract(args, board_etree): # Assume we only care about PCI devices under domain 0, as the hypervisor only uses BDF (without domain) for device # identification. root_regex = re.compile("pci0000:([0-9a-f]{2})") for root in filter(lambda x: x.startswith("pci"), os.listdir(SYS_DEVICES_PATH)): m = root_regex.match(root) if m: bus_number = int(m.group(1), 16) bus_node = get_node( board_etree, f"//bus[@type='pci' and @address='{hex(bus_number)}']") if bus_node is None: devices_node = get_node(board_etree, "//devices") bus_node = add_child(devices_node, "bus", type="pci", address=hex(bus_number)) collect_hostbridge_resources(bus_node, bus_number) enum_devices(bus_node, os.path.join(SYS_DEVICES_PATH, root))
def extract(args, board_etree): root_node = board_etree.getroot() caches_node = get_node(board_etree, "//caches") extract_topology(root_node, caches_node) extract_tcc_capabilities(caches_node) # Inject the explicitly specified CAT capability if exists if args.add_llc_cat: llc_node = get_node(root_node, "//caches/cache[@level='3']") llc_cat_node = get_node(llc_node, "capability[@id='CAT']") if llc_cat_node is None: llc_cat_node = add_child(llc_node, "capability", None, id="CAT") add_child(llc_cat_node, "capacity_mask_length", str(args.add_llc_cat.capacity_mask_length)) add_child(llc_cat_node, "clos_number", str(args.add_llc_cat.clos_number)) if args.add_llc_cat.has_CDP: add_child(llc_node, "capability", None, id="CDP") else: logging.warning( "The last level cache already reports CAT capability. The explicit settings from the command line options are ignored." )
def parse_device(bus_node, device_path): device_name = os.path.basename(device_path) cfg = parse_config_space(device_path) # There are cases where Linux creates device-like nodes without a file named "config", e.g. when there is a PCIe # non-transparent bridge (NTB) on the physical platform. if cfg is None: return None if device_name == "0000:00:00.0": device_node = bus_node else: m = bdf_regex.match(device_name) device, function = int(m.group(3), base=16), int(m.group(4), base=16) adr = hex((device << 16) + function) device_node = get_node(bus_node, f"./device[@address='{adr}']") if device_node is None: device_node = add_child(bus_node, "device", None, address=adr) for cap in cfg.caps: # If the device is not in D0, power it on and reparse its configuration space. if cap.name == "Power Management" and cap.power_state != 0: logging.info(f"Try resuming {device_path}") try: with open(os.path.join(device_path, "power", "control"), "w") as f: f.write("on") cfg = parse_config_space(device_path) except Exception as e: logging.info(f"Resuming {device_path} failed: {str(e)}") # Device identifiers vendor_id = "0x{:04x}".format(cfg.header.vendor_id) device_id = "0x{:04x}".format(cfg.header.device_id) class_code = "0x{:06x}".format(cfg.header.class_code) if device_node.get("id") is None: device_node.set("id", device_id) add_child(device_node, "vendor", vendor_id) add_child(device_node, "identifier", device_id) add_child(device_node, "class", class_code) if cfg.header.header_type == 0: subvendor_id = "0x{:04x}".format(cfg.header.subsystem_vendor_id) subdevice_id = "0x{:04x}".format(cfg.header.subsystem_device_id) add_child(device_node, "subsystem_vendor", subvendor_id) add_child(device_node, "subsystem_identifier", subdevice_id) # BARs idx = 0 for bar in cfg.header.bars: resource_path = os.path.join(device_path, f"resource{idx}") resource_type = bar.resource_type base = bar.base if os.path.exists(resource_path): if bar.base == 0: logging.debug( f"PCI {device_name}: BAR {idx} exists but is programmed with all 0. This device cannot be passed through to any VM." ) else: resource_node = get_node( device_node, f"./resource[@type = '{resource_type}' and @min = '{hex(base)}']" ) if resource_node is None: size = os.path.getsize(resource_path) resource_node = add_child(device_node, "resource", None, type=resource_type, min=hex(base), max=hex(base + size - 1), len=hex(size)) resource_node.set("id", f"bar{idx}") if isinstance(bar, MemoryBar32): resource_node.set("width", "32") resource_node.set("prefetchable", str(bar.prefetchable)) elif isinstance(bar, MemoryBar64): resource_node.set("width", "64") resource_node.set("prefetchable", str(bar.prefetchable)) elif bar.base != 0: logging.debug( f"PCI {device_name}: Cannot detect the size of BAR {idx}") if isinstance(bar, MemoryBar64): idx += 2 else: idx += 1 # Capabilities for cap in cfg.caps: cap_node = add_child(device_node, "capability", id=cap.name) if cap.name in cap_parsers: cap_parsers[cap.name](cap_node, cap) for cap in cfg.extcaps: cap_node = add_child(device_node, "capability", id=cap.name) if cap.name in cap_parsers: cap_parsers[cap.name](cap_node, cap) # Interrupt pin pin = cfg.header.interrupt_pin if pin > 0 and pin <= 4: pin_name = interrupt_pin_names[pin] res_node = add_child(device_node, "resource", type="interrupt_pin", pin=pin_name) prt_address = hex(int(device_node.get("address"), 16) | 0xffff) mapping = device_node.xpath( f"../interrupt_pin_routing/routing[@address='{prt_address}']/mapping[@pin='{pin_name}']" ) if len(mapping) > 0: res_node.set("source", mapping[0].get("source")) if "index" in mapping[0].keys(): res_node.set("index", mapping[0].get("index")) # Secondary bus if cfg.header.header_type == 1: # According to section 3.2.5.6, PCI to PCI Bridge Architecture Specification, the I/O Limit register contains a # value smaller than the I/O Base register if there are no I/O addresses on the secondary side. io_base = (cfg.header.io_base_upper_16_bits << 16) | ( (cfg.header.io_base >> 4) << 12) io_end = (cfg.header.io_limit_upper_16_bits << 16) | ( (cfg.header.io_limit >> 4) << 12) | 0xfff if io_base <= io_end: add_child(device_node, "resource", type="io_port", min=hex(io_base), max=hex(io_end), len=hex(io_end - io_base + 1)) # According to section 3.2.5.8, PCI to PCI Bridge Architecture Specification, the Memory Limit register contains # a value smaller than the Memory Base register if there are no memory-mapped I/O addresses on the secondary # side. if cfg.header.memory_base <= cfg.header.memory_limit: memory_base = (cfg.header.memory_base >> 4) << 20 memory_end = ((cfg.header.memory_limit >> 4) << 20) | 0xfffff add_child(device_node, "resource", type="memory", min=hex(memory_base), max=hex(memory_end), len=hex(memory_end - memory_base + 1)) secondary_bus_node = add_child(device_node, "bus", type="pci", address=hex( cfg.header.secondary_bus_number)) # If a PCI routing table is provided for the root port / switch, move the routing table down to the bus node, in # order to align the relative position of devices and routing tables. prt = device_node.find("interrupt_pin_routing") if prt is not None: device_node.remove(prt) secondary_bus_node.append(prt) return secondary_bus_node return device_node
def parse_msix(cap_node, cap_struct): add_child(cap_node, "table_size", str(cap_struct.table_size)) add_child(cap_node, "table_bir", str(cap_struct.table_bir)) add_child(cap_node, "table_offset", hex(cap_struct.table_offset_z)) add_child(cap_node, "pba_bir", str(cap_struct.pba_bir)) add_child(cap_node, "pba_offset", hex(cap_struct.pba_offset_z))
def extract_model(processors_node, cpu_id, family_id, model_id, core_type, native_model_id): n = get_node( processors_node, f"//model[family_id='{family_id}' and model_id='{model_id}' and core_type='{core_type}' and native_model_id='{native_model_id}']" ) if n is None: n = add_child(processors_node, "model") add_child(n, "family_id", family_id) add_child(n, "model_id", model_id) add_child(n, "core_type", core_type) add_child(n, "native_model_id", native_model_id) brandstring = b"" for leaf in [0x80000002, 0x80000003, 0x80000004]: leaf_data = parse_cpuid(leaf, 0, cpu_id) brandstring += leaf_data.brandstring n.set("description", brandstring.decode()) leaves = [(1, 0), (7, 0), (0x80000001, 0), (0x80000007, 0)] for leaf in leaves: leaf_data = parse_cpuid(leaf[0], leaf[1], cpu_id) for cap in leaf_data.capability_bits: if getattr(leaf_data, cap) == 1: add_child(n, "capability", id=cap)
def fetch_device_info(devices_node, interpreter, namepath, args): logging.info(f"Fetch information about device object {namepath}") try: # Check if an _INI method exists try: interpreter.interpret_method_call(namepath + "._INI") except UndefinedSymbol: pass sta = None if interpreter.context.has_symbol(f"{namepath}._STA"): result = interpreter.interpret_method_call(f"{namepath}._STA") sta = result.get() if args.check_device_status and sta & 0x1 == 0: return add_object_to_device(interpreter, namepath, "_STA", result) # Hardware ID hid = "" if interpreter.context.has_symbol(f"{namepath}._HID"): result = interpreter.interpret_method_call(f"{namepath}._HID") hid = result.get() if isinstance(hid, str): pass elif isinstance(hid, int): eisa_id = parse_eisa_id(hid) if eisa_id: hid = eisa_id else: hid = hex(hid) else: hid = "<unknown>" add_object_to_device(interpreter, namepath, "_HID", result) # Compatible ID cids = [] if interpreter.context.has_symbol(f"{namepath}._CID"): cid_object = interpreter.interpret_method_call(f"{namepath}._CID") if isinstance(cid_object, (datatypes.String, datatypes.Integer)): cid_data = [cid_object] elif isinstance(cid_object, datatypes.Package): cid_data = cid_object.elements for cid_datum in cid_data: if isinstance(cid_datum, datatypes.Integer): eisa_id = parse_eisa_id(cid_datum.get()) if eisa_id: cids.append(eisa_id) else: cids.append(hex(cid_datum.get())) elif isinstance(cid_datum, datatypes.String): cids.append(cid_datum.get()) # Create the XML element for the device and create its ancestors if necessary element = get_device_element(devices_node, namepath, hid) if hid in buses.keys(): element.tag = "bus" element.set("type", buses[hid]) for cid in cids: add_child(element, "compatible_id", cid) # Unique ID uid = "" if interpreter.context.has_symbol(f"{namepath}._UID"): result = interpreter.interpret_method_call(f"{namepath}._UID") uid = result.get() add_child(element, "acpi_uid", str(uid)) add_object_to_device(interpreter, namepath, "_UID", result) # Description if interpreter.context.has_symbol(f"{namepath}._STR"): result = interpreter.interpret_method_call(f"{namepath}._STR") desc = result.get().decode(encoding="utf-16").strip("\00") element.set("description", desc) add_object_to_device(interpreter, namepath, "_STR", result) if "MSFT0101" in [hid, *cids]: parse_tpm(element) # Address if interpreter.context.has_symbol(f"{namepath}._ADR"): result = interpreter.interpret_method_call(f"{namepath}._ADR") adr = result.get() if isinstance(adr, int): adr = hex(adr) if len(element.xpath(f"../*[@address='{adr}']")) > 0: logging.info(f"{namepath} has siblings with duplicated address {adr}.") else: element.set("address", hex(adr) if isinstance(adr, int) else adr) add_object_to_device(interpreter, namepath, "_ADR", result) # Bus number that overrides _ADR when exists if interpreter.context.has_symbol(f"{namepath}._BBN"): result = interpreter.interpret_method_call(f"{namepath}._BBN") bus_number = result.get() if isinstance(bus_number, int): bus_number = hex(bus_number) # To avoid confusion to the later extractors, do not recognize _BBN for non-present host bridges. if sta == None or (sta & 0x1) != 0: element.set("address", bus_number) add_object_to_device(interpreter, namepath, "_BBN", result) # Status if sta is not None: status = add_child(element, "status") add_child(status, "present", "y" if sta & 0x1 != 0 else "n") add_child(status, "enabled", "y" if sta & 0x2 != 0 else "n") add_child(status, "functioning", "y" if sta & 0x8 != 0 else "n") # Resources if interpreter.context.has_symbol(f"{namepath}._CRS"): result = interpreter.interpret_method_call(f"{namepath}._CRS") data = result.get() rdt = parse_resource_data(data) for idx, item in enumerate(rdt.items): p = (item.type, item.name) if p in resource_parsers.keys(): resource_parsers[p](idx, item, element) else: add_child(element, "resource", type=item.__class__.__name__, id=f"res{idx}") add_object_to_device(interpreter, namepath, "_CRS", result) # PCI interrupt routing if interpreter.context.has_symbol(f"{namepath}._PRT"): pkg = interpreter.interpret_method_call(f"{namepath}._PRT") prt = parse_pci_routing(pkg) prt_info = defaultdict(lambda: {}) for mapping in prt: if isinstance(mapping.source, int): assert mapping.source == 0, "A _PRT mapping package should not contain a byte of non-zero as source" prt_info[mapping.address][mapping.pin] = mapping.source_index elif isinstance(mapping.source, context.DeviceDecl): prt_info[mapping.address][mapping.pin] = (mapping.source.name, mapping.source_index) else: logging.warning(f"The _PRT of {namepath} has a mapping with invalid source {mapping.source}") pin_routing_element = add_child(element, "interrupt_pin_routing") for address, pins in prt_info.items(): mapping_element = add_child(pin_routing_element, "routing", address=hex(address)) pin_names = { 0: "INTA#", 1: "INTB#", 2: "INTC#", 3: "INTD#", } for pin, info in pins.items(): if isinstance(info, int): add_child(mapping_element, "mapping", pin=pin_names[pin], source=str(info)) else: add_child(mapping_element, "mapping", pin=pin_names[pin], source=info[0], index=str(info[1])) except FutureWork: pass
def parse_tpm(elem): try: tpm2 = parse_tpm2() control_area = add_child(elem, "capability", None, id="control_area") add_child(control_area, "address_of_control_area", hex(tpm2.address_of_control_area)) start_method = add_child(elem, "capability", None, id="start_method") add_child(start_method, "value", hex(tpm2.start_method)) for parameter in tpm2.start_method_specific_parameters: add_child(start_method, "parameter", hex(parameter)) if hasattr(tpm2, "log_area_minimum_length"): log_area = add_child(elem, "capability", None, id="log_area") add_child(log_area, "log_area_minimum_length", hex(tpm2.log_area_minimum_length)) add_child(log_area, "log_area_start_address", hex(tpm2.log_area_start_address)) except Exception as e: logging.info(f"Parse ACPI TPM2 failed: {str(e)}") logging.info(f"Will not extract information from ACPI TPM2") return
def extract_tcc_capabilities(caches_node): try: rtct = parse_rtct() if rtct.version == 1: for entry in rtct.entries: if entry.type == acpiparser.rtct.ACPI_RTCT_V1_TYPE_SoftwareSRAM: cache_node = get_node( caches_node, f"cache[@level='{entry.cache_level}' and processors/processor='{hex(entry.apic_id_tbl[0])}']" ) if cache_node is None: logging.warning( f"Cannot find the level {entry.cache_level} cache of physical processor with apic ID {entry.apic_id_tbl[0]}" ) continue cap = add_child(cache_node, "capability", None, id="Software SRAM") add_child(cap, "start", "0x{:08x}".format(entry.base)) add_child(cap, "end", "0x{:08x}".format(entry.base + entry.size - 1)) add_child(cap, "size", str(entry.size)) elif rtct.version == 2: for entry in rtct.entries: if entry.type == acpiparser.rtct.ACPI_RTCT_V2_TYPE_SoftwareSRAM: cache_node = get_node( caches_node, f"cache[@level='{entry.level}' and @id='{hex(entry.cache_id)}']" ) if cache_node is None: logging.warning( f"Cannot find the level {entry.level} cache with cache ID {entry.cache_id}" ) continue cap = add_child(cache_node, "capability", None, id="Software SRAM") add_child(cap, "start", "0x{:08x}".format(entry.base)) add_child(cap, "end", "0x{:08x}".format(entry.base + entry.size - 1)) add_child(cap, "size", str(entry.size)) except FileNotFoundError: pass
def extract_topology(root_node, caches_node): threads = root_node.xpath("//processors//*[cpu_id]") for thread in threads: subleaf = 0 while True: cpu_id = int(get_node(thread, "cpu_id/text()"), base=16) leaf_4 = parse_cpuid(4, subleaf, cpu_id) cache_type = leaf_4.cache_type if cache_type == 0: break cache_level = leaf_4.cache_level shift_width = leaf_4.max_logical_processors_sharing_cache.bit_length( ) - 1 cache_id = hex( int(get_node(thread, "apic_id/text()"), base=16) >> shift_width ) n = get_node( caches_node, f"cache[@id='{cache_id}' and @type='{cache_type}' and @level='{cache_level}']" ) if n is None: n = add_child(caches_node, "cache", None, level=str(cache_level), id=cache_id, type=str(cache_type)) add_child(n, "cache_size", str(leaf_4.cache_size)) add_child(n, "line_size", str(leaf_4.line_size)) add_child(n, "ways", str(leaf_4.ways)) add_child(n, "sets", str(leaf_4.sets)) add_child(n, "partitions", str(leaf_4.partitions)) add_child(n, "self_initializing", str(leaf_4.self_initializing)) add_child(n, "fully_associative", str(leaf_4.fully_associative)) add_child(n, "write_back_invalidate", str(leaf_4.write_back_invalidate)) add_child(n, "cache_inclusiveness", str(leaf_4.cache_inclusiveness)) add_child(n, "complex_cache_indexing", str(leaf_4.complex_cache_indexing)) add_child(n, "processors") # Check support of Cache Allocation Technology leaf_10 = parse_cpuid(0x10, 0, cpu_id) if cache_level == 2: leaf_10 = parse_cpuid( 0x10, 2, cpu_id) if leaf_10.l2_cache_allocation == 1 else None elif cache_level == 3: leaf_10 = parse_cpuid( 0x10, 1, cpu_id) if leaf_10.l3_cache_allocation == 1 else None else: leaf_10 = None if leaf_10 is not None: cap = add_child(n, "capability", None, id="CAT") add_child(cap, "capacity_mask_length", str(leaf_10.capacity_mask_length)) add_child(cap, "clos_number", str(leaf_10.clos_number)) if leaf_10.code_and_data_prioritization == 1: add_child(n, "capability", None, id="CDP") add_child(get_node(n, "processors"), "processor", get_node(thread, "apic_id/text()")) subleaf += 1 def getkey(n): level = int(n.get("level")) id = int(n.get("id"), base=16) type = int(n.get("type")) return (level, id, type) caches_node[:] = sorted(caches_node, key=getkey)
def extract_topology(processors_node): cpu_ids = get_online_cpu_ids() for cpu_id in cpu_ids: subleaf = 0 last_shift = 0 last_node = None leaf_0 = parse_cpuid(0, 0, cpu_id) if leaf_0.max_leaf >= 0x1f: topo_leaf = 0x1f else: topo_leaf = 0xb while True: leaf_topo = parse_cpuid(topo_leaf, subleaf, cpu_id) if leaf_topo.level_type == 0: highest_level = max(level_types.keys()) if last_node.tag != level_types[highest_level]: n, _ = get_or_create_parent(processors_node, level_types[highest_level], "0x0") n.append(last_node) last_node = n processors_node.append(last_node) break topo_level = level_types[leaf_topo.level_type] topo_id = hex(leaf_topo.x2apic_id >> last_shift) n, created = get_or_create_parent(processors_node, topo_level, topo_id) if last_node is None: leaf_1 = parse_cpuid(1, 0, cpu_id) family_id = hex(leaf_1.display_family) model_id = hex(leaf_1.display_model) if leaf_0.max_leaf >= 0x1a: leaf_1a = parse_cpuid(0x1a, 0, cpu_id) core_type = leaf_1a.core_type native_model_id = hex(leaf_1a.native_model_id) else: core_type = "" native_model_id = "" add_child(n, "cpu_id", text=str(cpu_id)) add_child(n, "apic_id", text=hex(leaf_1.initial_apic_id)) add_child(n, "x2apic_id", text=hex(leaf_topo.x2apic_id)) add_child(n, "family_id", text=family_id) add_child(n, "model_id", text=model_id) add_child(n, "stepping_id", text=hex(leaf_1.stepping)) add_child(n, "core_type", text=core_type) add_child(n, "native_model_id", text=native_model_id) extract_model(processors_node, cpu_id, family_id, model_id, core_type, native_model_id) else: n.append(last_node) if not created: break last_node = n last_shift = leaf_topo.num_bit_shift subleaf += 1
def extract_model(processors_node, cpu_id, family_id, model_id, core_type, native_model_id): n = get_node(processors_node, f"//model[family_id='{family_id}' and model_id='{model_id}' and core_type='{core_type}' and native_model_id='{native_model_id}']") if n is None: n = add_child(processors_node, "model") add_child(n, "family_id", family_id) add_child(n, "model_id", model_id) add_child(n, "core_type", core_type) add_child(n, "native_model_id", native_model_id) brandstring = b"" for leaf in [0x80000002, 0x80000003, 0x80000004]: leaf_data = parse_cpuid(leaf, 0, cpu_id) brandstring += leaf_data.brandstring n.set("description", re.sub('[^!-~]+', ' ', brandstring.decode()).strip()) leaves = [(1, 0), (7, 0), (0x80000001, 0), (0x80000007, 0)] for leaf in leaves: leaf_data = parse_cpuid(leaf[0], leaf[1], cpu_id) for cap in leaf_data.capability_bits: if getattr(leaf_data, cap) == 1: add_child(n, "capability", id=cap) msr_regs = [MSR_IA32_MISC_ENABLE, MSR_IA32_FEATURE_CONTROL, MSR_IA32_VMX_BASIC, MSR_IA32_VMX_PINBASED_CTLS, MSR_IA32_VMX_PROCBASED_CTLS, MSR_IA32_VMX_EXIT_CTLS, MSR_IA32_VMX_ENTRY_CTLS, MSR_IA32_VMX_MISC, MSR_IA32_VMX_PROCBASED_CTLS2, MSR_IA32_VMX_EPT_VPID_CAP] for msr_reg in msr_regs: msr_data = msr_reg.rdmsr(cpu_id) for cap in msr_data.capability_bits: if getattr(msr_data, cap) == 1: add_child(n, "capability", id=cap) leaves = [(0, 0), (0x80000008, 0)] for leaf in leaves: leaf_data = parse_cpuid(leaf[0], leaf[1], cpu_id) for cap in leaf_data.attribute_bits: add_child(n, "attribute", str(getattr(leaf_data, cap)), id=cap)
def parse_irq(idx, item, elem): irqs = ", ".join(map(str, item.irqs)) add_child(elem, "resource", id=f"res{idx}", type="irq", int=irqs)