def __init__(self, reason = '', strict = False): if not hasattr(sys, "frozen"): debug.debug("None object instantiated: " + reason, 2) self.reason = reason self.strict = strict if strict: self.bt = get_bt_string()
def calculate(self): addr_space = utils.load_as(self._config) ## Get a sorted list of module addresses mods = dict((addr_space.address_mask(mod.DllBase), mod) for mod in modules.lsmod(addr_space)) mod_addrs = sorted(mods.keys()) ssdts = set() if addr_space.profile.metadata.get('memory_model', '32bit') == '32bit': # Gather up all SSDTs referenced by threads debug.info("[x86] Gathering all referenced SSDTs from KTHREADs...") for proc in tasks.pslist(addr_space): for thread in proc.ThreadListHead.list_of_type( "_ETHREAD", "ThreadListEntry"): ssdt_obj = thread.Tcb.ServiceTable.dereference_as( '_SERVICE_DESCRIPTOR_TABLE') ssdts.add(ssdt_obj) else: debug.info( "[x64] Gathering all referenced SSDTs from KeAddSystemServiceTable..." ) # The NT module always loads first ntos = list(modules.lsmod(addr_space))[0] func_rva = ntos.getprocaddress("KeAddSystemServiceTable") if func_rva == None: raise StopIteration("Cannot locate KeAddSystemServiceTable") KeAddSystemServiceTable = ntos.DllBase + func_rva for table_rva in find_tables(KeAddSystemServiceTable, addr_space): ssdt_obj = obj.Object("_SERVICE_DESCRIPTOR_TABLE", ntos.DllBase + table_rva, addr_space) ssdts.add(ssdt_obj) # Get a list of *unique* SSDT entries. Typically we see only two. tables = set() for ssdt_obj in ssdts: for i, desc in enumerate(ssdt_obj.Descriptors): # Apply some extra checks - KiServiceTable should reside in kernel memory and ServiceLimit # should be greater than 0 but not unbelievably high if not desc.is_valid( ) or desc.ServiceLimit <= 0 or desc.ServiceLimit >= 0xFFFF or desc.KiServiceTable <= 0x80000000: break else: tables.add( (i, desc.KiServiceTable.v(), desc.ServiceLimit.v())) debug.info("Finding appropriate address space for tables...") tables_with_vm = [] procs = list(tasks.pslist(addr_space)) for idx, table, n in tables: vm = tasks.find_space(addr_space, procs, table) if vm: tables_with_vm.append((idx, table, n, vm)) else: debug.debug("[SSDT not resident at 0x{0:08X}]\n".format(table)) for idx, table, n, vm in sorted(tables_with_vm, key=itemgetter(0)): yield idx, table, n, vm, mods, mod_addrs
def emu(self, size): ip = int(self.get_ip(), 16) debug.debug("[emu] - (%x, %x)" % (ip, size)) try: self.mu.emu_start(ip, ip + size, timeout=10000, count=1) except UcError as e: debug.debug("Error %s" % e)
def write_data(self, address, content): address = int(address, 16) if not self.mapped(address): self.mmap(address) debug.debug("[write_data] - at address: %x" % address) debug.debug(repr(content)) self.mu.mem_write(address, content)
def mmap(self, address): size = 32 * 1024 debug.debug("[mmap] - mapping: (%x, %x)" % (address, size)) address_page = address & self.mask debug.debug("[mmap] - addr_page: %x" % address_page) self.mu.mem_map(address_page, size) self.mappings.append((address_page, size))
def reload_file(self, path): def assign_used_file(): self.currentPath = path self.fname = self.used_files[path]['fname'] self.name = self.used_files[path]['fname'] self.mode = self.used_files[path]['mode'] self.fhandle = self.used_files[path]['fhandle'] self.fhandle.seek(0, 2) self.fsize = self.used_files[path]['fsize'] if path in self.used_files: if self.currentPath == path: return assign_used_file() else: debug.debug('read from file: ' + path) path_name = urllib.url2pathname(path) if not os.path.exists(path_name): debug.warning('File not exist: ' + path + ' Returning zero bytes..') currentPath = 'ZERO' return # assert os.path.exists(path_name), 'Filename must be specified and exist' self.used_files[path]['fname'] = os.path.abspath(path_name) self.used_files[path]['mode'] = 'rb' if self._config.WRITE: self.used_files[path]['mode'] += '+' self.used_files[path]['fhandle'] = open( self.used_files[path]['fname'], self.used_files[path]['mode']) self.used_files[path]['fsize'] = self.fhandle.tell() assign_used_file()
def hook_mem_access(self, uc, access, address, size, value, user_data): if access == UC_MEM_WRITE: debug.debug("[hook_mem_access] - write operation - %x %x %x" % (address, size, value)) self.shadow[hex(address).strip("L")] = hex(value).strip("L") else: debug.debug("[hook_mem_access] - read operation - %x %x %x" % (address, size, value)) return True
def __init__(self, ParentClass): """ Search all imported modules for all classes extending ParentClass. These will be considered as implementations and added to our internal registry. """ ## Create instance variables self.classes = [] self.class_names = [] self.order = [] self.ParentClass = ParentClass for Class in self.get_subclasses(self.ParentClass): if Class != self.ParentClass: ## Check the class for consistency try: self.check_class(Class) ## Add the class to ourselves: self.add_class(Class) except NotImplementedError: pass except AttributeError, e: debug.debug("Failed to load {0} '{1}': {2}".format(self.ParentClass, Class, e)) continue else: if hasattr(Class, "register_options"): Class.register_options(config)
def load_sysmap(self): """Loads up the system map data""" arch, _memmodel, sysmapvar = parse_system_map(sysmapdata, "kernel") debug.debug("{2}: Found system file {0} with {1} symbols".format( f.filename, len(sysmapvar.keys()), profilename)) self.sys_map.update(sysmapvar)
def calculate(self): address_space = utils.load_as(self._config, astype = 'physical') if not self.is_valid_profile(address_space.profile): debug.error("This command does not support the selected profile.") scanner = PrefetchScanner(config = self._config, needles = ['SCCA']) scanner_mam = PrefetchScanner(config = self._config, needles = ['MAM\x04']) pf_headers = [] if(address_space.profile.metadata.get('major') == 6 and address_space.profile.metadata.get('minor') == 4): # Win10 scanner_mam.load_libmscompression() debug.debug("Scanning for MAM compressed data, this can take a while.............") if not os.path.isdir(self._config.MAM_DIR): debug.error(self._config.MAM_DIR + " is not a directory. Please specify a mam dump directory (--mam-dir)") for offset in scanner_mam.scan(address_space): pf_header = scanner_mam.carve_mam(address_space, offset, self._config.MAM_DIR) if pf_header > 0 and scanner_mam.is_valid(): pf_headers.append(pf_header) debug.debug("Scanning for Prefetch files, this can take a while.............") for offset in scanner.scan(address_space): pf_header = scanner.carve(address_space, offset) if scanner.is_valid(): pf_headers.append(pf_header) # This list may have duplicate pf_header entries since # we're not doing unique validation, just scanning. # Uniquing makes sense for reducing repetetive entries for unique_pf_entry in scanner.dedup(pf_headers): yield unique_pf_entry
def check_trace_instruction(self, address, instruction, hw_context, cnt): print "[INPUT] %s) %s" % (str(cnt), instruction) if instruction.split()[0] in self.BLACKLIST_INSTRUCTIONS: return None if instruction.split()[0] in self.WHITELIST_INSTRUCTIONS: new_instr = self.check_normal_instruction(instruction, hw_context) if not new_instr: new_instr = instruction print "[OUTPUT] " , new_instr opcodes = self.add_get_opcodes(new_instr, instruction, cnt) return opcodes #self.serialize_opcodes() addr = int(address, 16) data = self._addrspace.read(addr, self.get_buf_size()) print "---[NEW " , instruction if not data: print "[-] Something went wrong. Missing instruction: %s" % instruction return for ins in self.md.disasm(data, addr): if self.is_capstone_branch(ins): if not self.append_mnemonic_instruction_lists(instruction): return None new_instruction = self.check_branch_instruction(instruction, hw_context) print "[OUTPUT] %s" % new_instruction opcodes = self.get_opcodes(new_instruction, cnt) print self.get_nasm_hex(opcodes) debug.debug("%s -- %s" % (new_instruction, self.get_nasm_hex(opcodes))) return opcodes
def __init__(self, fh): if not "b" in fh.mode.lower(): raise ValueError("Invalid file handler: file must be opened in binary mode (and not {0})".format(fh.mode)) self.fh = fh ## Must start with one of the magic values magic = self.reada_long(0) debug("{0:x}".format(magic)) ## Resolve version and magic if magic == 0xbed2bed0: self.version = 0 elif magic == 0xbad1bad1: self.version = 1 elif magic == 0xbed2bed2: self.version = 2 elif magic == 0xbed3bed3: self.version = 3 else: raise ParserException("Header signature invalid", magic) ## determine offset sizes. # this is used whenever the vmsn specifications use 4\8 byte ints dependant of version, so "offset" is a bit misleading. self.offset_size = 4 if self.version == 0 else 8 ## Read group count self.group_count = self.reada_long(8)
def get_autoruns(self): debug.debug('Started get_autoruns()') results = [] hive_key_list = [] try: # Gather all software run keys self.regapi.reset_current() for run_key in SOFTWARE_RUN_KEYS: hive_key_list += [k for k in self.regapi.reg_yield_key(hive_name='software', key=run_key)] # Gather all ntuser run keys self.regapi.reset_current() for run_key in NTUSER_RUN_KEYS: hive_key_list += [k for k in self.regapi.reg_yield_key(hive_name='ntuser.dat', key=run_key)] # hive_key = (key pointer, hive_name) for hive_key in hive_key_list: results += self.parse_autoruns_key(hive_key) except Exception as e: debug.warning('get_autoruns() failed to complete. Exception: {0} {1}'.format(type(e).__name__, e.args)) debug.debug('Finished get_autoruns()') return results
def __init__(self, dump, code, stack, gcounter): self.gcounter = gcounter code = int(code, 16) stack = int(stack, 16) self.fix = 2**64 self.mask = 0xFFFFFFFFFFFFF000 self.mappings = [] self.unicorn_code = code self.unicorn_stack = stack # shadow stack for this emulator instance self.shadow = OrderedDict() debug.debug("[emulator] - init unicorn...") # Volatility interaction self.dump = dump self.current_ip = code # TODO: support other archs and modes self.mu = Uc(UC_ARCH_X86, UC_MODE_64) #size = 128 * 1024 * 1024 size = 1 * 4096 # unicorn code self.mu.mem_map(code & self.mask, size) self.mappings.append((code, size)) #size = 256 * 1024 * 1024 size = 10 * 4096 # unicorn generic stack self.mu.mem_map(stack & self.mask, size) self.mappings.append((stack, size)) self.set_hooks() self.branch_point = []
def __init__(self, reason='', strict=False): if not hasattr(sys, "frozen"): debug.debug("None object instantiated: " + reason, 2) self.reason = reason self.strict = strict if strict: self.bt = get_bt_string()
def parse_task_xml(self, xml, f_name): raw = xml xml = re.sub('\x00\x00+', '', xml) + '\x00' if xml: try: xml = xml.decode('utf-16') xml = re.sub(r"<Task(.*?)>", "<Task>", xml) xml = xml.encode('utf-16') root = ET.fromstring(xml) d = {} for e in root.findall("./RegistrationInfo/Date"): d['Date'] = e.text or '' for e in root.findall("./RegistrationInfo/Description"): d['Description'] = e.text or '' for e in root.findall("./Actions"): d['Actions'] = self.visit_all_children(e) for e in root.findall("./Settings/Enabled"): d['Enabled'] = e.text or '' for e in root.findall("./Settings/Hidden"): d['Hidden'] = e.text or '' for t in root.findall("./Triggers/*"): d['Triggers'] = self.visit_all_children(t) if not d.get("Actions", {}).get('Exec', {}).get("Command", False): return None return d except UnicodeDecodeError as e: debug.warning('Error while parsing the following task: {}'.format(f_name)) debug.debug('UnicodeDecodeError for: {}'.format(repr(raw)))
def process_entry(self, vaddr, pgd, pte): retVal = None if (pgd & 0b100000000) == 0b100000000: pprofiler.profiling[self.name][pprofiler.GLOBAL_PDE] += 1 # If large page flag is set in PGD if self.page_size_flag(pgd): pprofiler.profiling[self.name][pprofiler.LARGE] += 1 retVal = self.get_large_paddr(vaddr, pgd) else: # if PTE still zero, return none if pte == 0: pprofiler.profiling[self.name][pprofiler.EMPTY_PTE] += 1 #Reserved, but not committed page return (0, 0) self.profile_pte_flags(pte) if self.entry_present(pte): pprofiler.profiling[self.name][pprofiler.VALID] += 1 retVal = self.get_paddr(vaddr, pte) # if valid PTE, resolve type elif self.entry_prototype(pte): if self.entry_subsection(pte): pprofiler.profiling[self.name][pprofiler.MAPPED_FILE] += 1 retVal = self.get_subsection(vaddr, pte) else: retVal = self.get_prototype(vaddr, pte) elif self.entry_pagefile(pte): #pprofiler.profiling[self.name][pprofiler.PAGEFILE] += 1 retVal = self.get_pagefile(vaddr, pte) else: pprofiler.profiling[self.name][pprofiler.UNKNOWN] += 1 if vaddr != 0: debug.debug("Unknown PTE: " + hex(vaddr)) return retVal
def __init__(self, fh): if not "b" in fh.mode.lower(): raise ValueError( "Invalid file handler: file must be opened in binary mode (and not {0})" .format(fh.mode)) self.fh = fh ## Must start with one of the magic values magic = self.reada_long(0) debug("{0:x}".format(magic)) ## Resolve version and magic if magic == 0xbed2bed0: self.version = 0 elif magic == 0xbad1bad1: self.version = 1 elif magic == 0xbed2bed2: self.version = 2 elif magic == 0xbed3bed3: self.version = 3 else: raise ParserException("Header signature invalid", magic) ## determine offset sizes. # this is used whenever the vmsn specifications use 4\8 byte ints dependant of version, so "offset" is a bit misleading. self.offset_size = 4 if self.version == 0 else 8 ## Read group count self.group_count = self.reada_long(8)
class VolatilityIA32ValidAS(obj.VolatilityMagic): """An object to check that an address space is a valid IA32 Paged space""" def generate_suggestions(self): """Generates a single response of True or False depending on whether the space is a valid Windows AS""" # This constraint looks for self referential values within # the paging tables try: if self.obj_vm.pae: pde_base = 0xc0600000 pd = self.obj_vm.get_pdpte(0) & 0xffffffffff000 else: pde_base = 0xc0300000 pd = self.obj_vm.dtb if (self.obj_vm.vtop(pde_base) == pd): yield True raise StopIteration except addrspace.ASAssertionError, _e: pass debug.debug("Failed to pass the Moyix Valid IA32 AS test", 3) # This constraint verifies that _KUSER_ SHARED_DATA is shared # between user and kernel address spaces. if (self.obj_vm.vtop(0xffdf0000)) == (self.obj_vm.vtop(0x7ffe0000)): if self.obj_vm.vtop(0xffdf0000) != None: yield True raise StopIteration debug.debug("Failed to pass the labarum_x Valid IA32 AS test", 3) yield False
def generate_suggestions(self): """Generates a single response of True or False depending on whether the space is a valid Windows AS""" # This constraint looks for self referential values within # the paging tables try: if self.obj_vm.pae: pde_base = 0xC0600000 pd = self.obj_vm.get_pdpte(0) & 0xFFFFFFFFFF000 else: pde_base = 0xC0300000 pd = self.obj_vm.dtb if self.obj_vm.vtop(pde_base) == pd: yield True raise StopIteration except addrspace.ASAssertionError as _e: pass debug.debug("Failed to pass the Moyix Valid IA32 AS test", 3) # This constraint verifies that _KUSER_ SHARED_DATA is shared # between user and kernel address spaces. if (self.obj_vm.vtop(0xFFDF0000)) == (self.obj_vm.vtop(0x7FFE0000)): if self.obj_vm.vtop(0xFFDF0000) != None: yield True raise StopIteration debug.debug("Failed to pass the labarum_x Valid IA32 AS test", 3) yield False
def _init_offset_pgd(cls): if not task_struct.is_offset_defined('mm'): return ksymbol_command = linux_auto_ksymbol(cls.vm.get_config()) swapper_task_addr = ksymbol_command.get_symbol('init_task') swapper_task = obj.Object('task_struct', offset=swapper_task_addr, vm=cls.vm) init_task = iter(swapper_task.tasks).next() init_task_mm = init_task.mm.dereference() for pgd_offset in xrange(0, 0x100, 4): pgd = obj.Object('Pointer', offset=init_task_mm.obj_offset + pgd_offset, vm=cls.vm) if not pgd: continue dtb = cls.vm.vtop(pgd.v()) init_task_as = cls.vm.__class__(cls.vm.base, cls.vm.get_config(), dtb=dtb) if init_task_as.vtop(pgd.v()) == dtb: cls.vtypes['mm_struct'][1]['pgd'][0] = pgd_offset cls._update_profile() debug.debug( "Found 'mm_struct->pgd' offset: {0}".format(pgd_offset)) return debug.debug("Can't find 'mm_struct->pgd' offset")
def _init_offset_tasks(cls): if not cls.is_offset_defined('comm'): return ksymbol_command = linux_auto_ksymbol(cls.vm.get_config()) swapper_task_addr = ksymbol_command.get_symbol('init_task') for tasks_offset in xrange(0, cls.MAX_SIZE, 4): cls.vtypes['task_struct'][1]['tasks'][0] = tasks_offset cls._update_profile() swapper_task = obj.Object('task_struct', offset=swapper_task_addr, vm=cls.vm) # Check first two tasks, they should be called 'init' and 'kthreadd' tasks_iterator = iter(swapper_task.tasks) try: init_task = tasks_iterator.next() if str(init_task.comm) == 'init': kthreadd_task = tasks_iterator.next() if str(kthreadd_task.comm) == 'kthreadd': debug.debug( "Found 'task_struct->tasks' offset: {0}".format( tasks_offset)) return except StopIteration: pass debug.debug("Can't find 'task_struct->tasks' offset") # Reset not found 'tasks' offset cls.vtypes['task_struct'][1]['tasks'][0] = None cls._update_profile()
def load_modifications(self): """ Find all subclasses of the modification type and applies them Each modification object can specify the metadata with which it can work Allowing the overlay to decide which profile it should act on """ # Collect together all the applicable modifications mods = {} for i in self._get_subclasses(ProfileModification): modname = i.__name__ instance = i() # Leave abstract modifications out of the dependency tree # Also don't consider the base ProfileModification object if not modname.startswith("Abstract") and i != ProfileModification: if modname in mods: raise RuntimeError( "Duplicate profile modification name {0} found".format( modname)) mods[instance.__class__.__name__] = instance # Run through the modifications in dependency order self._mods = [] for modname in self._resolve_mod_dependencies(mods.values()): mod = mods.get(modname, None) # We check for invalid/mistyped modification names, AbstractModifications should be caught by this too if not mod: # Note, this does not allow for optional dependencies raise RuntimeError( "No concrete ProfileModification found for " + modname) if mod.check(self): debug.debug("Applying modification from " + mod.__class__.__name__) self._mods.append(mod.__class__.__name__) mod.modification(self)
def process_entry(self, vaddr, pte): retVal = None # if PTE still zero, return none if pte == 0: pprofiler.profiling[self.name][pprofiler.EMPTY_PTE] += 1 #Reserved, but not committed page return (0, 0) self.profile_pte_flags(pte) if self.entry_present(pte): pprofiler.profiling[self.name][pprofiler.VALID] += 1 retVal = self.get_paddr(vaddr, pte) # if valid PTE, resolve type elif self.entry_prototype(pte): if self.entry_subsection(pte): pprofiler.profiling[self.name][pprofiler.MAPPED_FILE] += 1 retVal = self.get_subsection(vaddr, pte) else: retVal = self.get_prototype(vaddr, pte) elif self.entry_pagefile(pte): retVal = self.get_pagefile(vaddr, pte) else: pprofiler.profiling[self.name][pprofiler.UNKNOWN] += 1 if vaddr != 0: debug.debug("Unknown PTE: " + hex(vaddr)) return retVal
def _init_ksymtab(self): phys_as = utils.load_as(self._config, astype='physical') start_addr, _ = phys_as.get_available_addresses().next() # First 16 MB of physical memory self.kernel_image = phys_as.read(start_addr, 0x1000000) # Init page_offset if phys_as.profile.metadata.get('memory_model', '32bit') != '32bit': raise NotImplementedError self.ksymtab_initialized = True # Locate the physical offset of the ksymtab_strings section for match in re.finditer('init_task\0', self.kernel_image): offset = match.start() symbol_char = re.compile(r'[0-9a-z_]') if symbol_char.match(self.kernel_image[offset - 1:offset]): # 'init_task' is a substring of another symbol like 'xxx_init_task' continue # TODO: Choose the right one, not the first. # Find the beginning of the ksymtab_strings section char = self.kernel_image[offset] while offset > 0 and (symbol_char.match(char) or char == '\x00'): offset -= 1 char = self.kernel_image[offset] debug.debug("Found the physical offset of the ksymtab_strings " "section: {0:#010x}".format(offset)) self.ksymtab_strings_offset = offset return debug.warning("Can't locate a ksymtab_strings section")
def load_as(config, astype='virtual', **kwargs): """Loads an address space by stacking valid ASes on top of each other (priority order first)""" base_as = None error = exceptions.AddrSpaceError() # Start off requiring another round found = True ## A full iteration through all the classes without anyone ## selecting us means we are done: while found: debug.debug("Voting round") found = False for cls in sorted(registry.get_plugin_classes( addrspace.BaseAddressSpace).values(), key=lambda x: x.order if hasattr(x, 'order') else 10): debug.debug("Trying {0} ".format(cls)) try: base_as = cls(base_as, config, astype=astype, **kwargs) debug.debug("Succeeded instantiating {0}".format(base_as)) found = True break except addrspace.ASAssertionError, e: debug.debug( "Failed instantiating {0}: {1}".format(cls.__name__, e), 2) error.append_reason(cls.__name__, e) continue except Exception, e: debug.debug("Failed instantiating (exception): {0}".format(e)) error.append_reason(cls.__name__ + " - EXCEPTION", e) continue
def load_as(config, astype = 'virtual', **kwargs): """Loads an address space by stacking valid ASes on top of each other (priority order first)""" base_as = None error = exceptions.AddrSpaceError() # Start off requiring another round found = True ## A full iteration through all the classes without anyone ## selecting us means we are done: while found: debug.debug("Voting round") found = False for cls in sorted(registry.get_plugin_classes(addrspace.BaseAddressSpace).values(), key = lambda x: x.order if hasattr(x, 'order') else 10): debug.debug("Trying {0} ".format(cls)) try: base_as = cls(base_as, config, astype = astype, **kwargs) debug.debug("Succeeded instantiating {0}".format(base_as)) found = True break except addrspace.ASAssertionError, e: debug.debug("Failed instantiating {0}: {1}".format(cls.__name__, e), 2) error.append_reason(cls.__name__, e) continue except Exception, e: debug.debug("Failed instantiating (exception): {0}".format(e)) error.append_reason(cls.__name__ + " - EXCEPTION", e) continue
def __init__( self, theType, offset, vm, name=None, members=None, struct_size=0, **kwargs, ): """This must be instantiated with a dict of members. The keys are the offsets, the values are Curried Object classes that will be instantiated when accessed. """ if not members: # Warn rather than raise an error, since some types (_HARDWARE_PTE, for example) are generated without members debug.debug( f"No members specified for CType {theType} named {name}", level=2, ) members = {} self.members = members self.struct_size = struct_size BaseObject.__init__(self, theType, offset, vm, name=name, **kwargs) self.__initialized = True
def __init__( self, theType, offset, vm, parent=None, count=1, targetType=None, target=None, name=None, **kwargs, ): ## Instantiate the first object on the offset: BaseObject.__init__( self, theType, offset, vm, parent=parent, name=name, **kwargs ) if callable(count): count = count(parent) self.count = int(count) self.original_offset = offset if targetType: self.target = Curry(Object, targetType) else: self.target = target self.current = self.target( offset=offset, vm=vm, parent=self, name=name ) if self.current.size() == 0: ## It is an error to have a zero sized element debug.debug("Array with 0 sized members???", level=10) debug.b()
def load_modifications(self): """ Find all subclasses of the modification type and applies them Each modification object can specify the metadata with which it can work Allowing the overlay to decide which profile it should act on """ # Collect together all the applicable modifications mods = {} for i in self._get_subclasses(ProfileModification): modname = i.__name__ instance = i() # Leave abstract modifications out of the dependency tree # Also don't consider the base ProfileModification object if not modname.startswith("Abstract") and i != ProfileModification: if modname in mods: raise RuntimeError("Duplicate profile modification name {0} found".format(modname)) mods[instance.__class__.__name__] = instance # Run through the modifications in dependency order self._mods = [] for modname in self._resolve_mod_dependencies(mods.values()): mod = mods.get(modname, None) # We check for invalid/mistyped modification names, AbstractModifications should be caught by this too if not mod: # Note, this does not allow for optional dependencies raise RuntimeError("No concrete ProfileModification found for " + modname) if mod.check(self): debug.debug("Applying modification from " + mod.__class__.__name__) self._mods.append(mod.__class__.__name__) mod.modification(self)
def render_text(self, outfd, data): for session in data: shared_info = session.find_shared_info() if not shared_info: debug.debug("Cannot find win32k!gSharedInfo") continue outfd.write("*" * 50 + "\n") outfd.write( "SharedInfo: {0:#x}, SessionId: {1} Shared delta: {2}\n". format( shared_info.obj_offset, session.SessionId, shared_info.ulSharedDelta, )) outfd.write( "aheList: {0:#x}, Table size: {1:#x}, Entry size: {2:#x}\n". format( shared_info.aheList.v(), shared_info.psi.cbHandleTable, shared_info.HeEntrySize if hasattr(shared_info, 'HeEntrySize') else shared_info.obj_vm.profile.get_obj_size("_HANDLEENTRY"), )) outfd.write("\n") filters = [] # Should we display freed handles if not self._config.FREE: filters.append(lambda x: not x.Free) # Should we filter by process ID if self._config.PID: filters.append( lambda x: x.Process.UniqueProcessId == self._config.PID) # Should we filter by object type if self._config.TYPE: filters.append(lambda x: str(x.bType) == self._config.TYPE) self.table_header(outfd, [ ("Object(V)", "[addrpad]"), ("Handle", "[addr]"), ("bType", "20"), ("Flags", "^8"), ("Thread", "^8"), ("Process", ""), ]) for handle in shared_info.handles(filters): self.table_row(outfd, handle.phead.v(), handle.phead.h if handle.phead else 0, handle.bType, handle.bFlags, handle.Thread.Cid.UniqueThread, handle.Process.UniqueProcessId)
def render_text(self, outfd, data): """Render the plugin's default text output""" debug.debug(self.params) # Check for data if data: task, vad, params = data # Get a magic object from the buffer buffer_space = addrspace.BufferAddressSpace( config = self._config, data = params['decoded_magic']) magic_obj = obj.Object(self.magic_struct, offset = 0, vm = buffer_space) outfd.write("*" * 50 + "\n") outfd.write("{0:<30} : {1}\n".format("ZBot", self.zbot + self.zbotversion)) outfd.write("{0:<30} : {1}\n".format("Process", task.ImageFileName)) outfd.write("{0:<30} : {1}\n".format("Pid", task.UniqueProcessId)) outfd.write("{0:<30} : {1}\n".format("Address", vad.Start)) # grab the URLs from the decoded buffer decoded_config = params['decoded_config'] urls = [] while "http" in decoded_config: url = decoded_config[decoded_config.find("http"):] urls.append(url[:url.find('\x00')]) decoded_config = url[url.find('\x00'):] for i, url in enumerate(urls): outfd.write("{0:<30} : {1}\n".format("URL {0}".format(i), url)) outfd.write("{0:<30} : {1}\n".format("Identifier", ''.join([chr(c) for c in magic_obj.guid if c != 0]))) outfd.write("{0:<30} : {1}\n".format("Mutant key", magic_obj.guid_xor_key)) outfd.write("{0:<30} : {1}\n".format("XOR key", magic_obj.xorkey)) outfd.write("{0:<30} : {1}\n".format("Registry", "HKEY_CURRENT_USER\\SOFTWARE\\Microsoft\\{0}".format(magic_obj.keyname))) outfd.write("{0:<30} : {1}\n".format(" Value 1", magic_obj.value1)) outfd.write("{0:<30} : {1}\n".format(" Value 2", magic_obj.value2)) outfd.write("{0:<30} : {1}\n".format(" Value 3", magic_obj.value3)) outfd.write("{0:<30} : {1}\n".format("Executable", magic_obj.exefile)) outfd.write("{0:<30} : {1}\n".format("Data file", magic_obj.datfile)) outfd.write("{0:<30} : \n{1}\n".format("Config RC4 key", "\n".join( ["{0:#010x} {1:<48} {2}".format(vad.Start + o, h, ''.join(c)) for o, h, c in utils.Hexdump(params['config_key']) ]))) rc4_offset = task.obj_vm.profile.get_obj_offset(self.magic_struct, 'rc4key') creds_key = params['decoded_magic'][rc4_offset:rc4_offset + RC4_KEYSIZE] outfd.write("{0:<30} : \n{1}\n".format("Credential RC4 key", "\n".join( ["{0:#010x} {1:<48} {2}".format(vad.Start + o, h, ''.join(c)) for o, h, c in utils.Hexdump(creds_key) ])))
def find_right_state(self, ip, sp): debug.debug("[find_right_state]") for k1, v1 in self.trace.items(): for k2, v2 in v1.items(): for k3, v3 in v2.items(): if v3["RIP"] == ip and v3["RSP"] == sp: debug.debug("[find_right_state] - FOUND") return self.trace[k1][k2][k3]
def load_sysmap(self): """Loads up the system map data""" arch, _memmodel, sysmapvar = parse_system_map(sysmapdata, "kernel") debug.debug( "{2}: Found system file {0} with {1} symbols".format(f.filename, len(sysmapvar.keys()), profilename) ) self.sys_map.update(sysmapvar)
def set_registers(self, registers): debug.debug("[set_registers]") if not registers: self.reset_regs() return for reg_index, reg_value in registers.items(): self.mu.reg_write(regs_to_code[reg_index], long(str(reg_value), 16)) debug.debug("%s: %x" % (reg_index, int(reg_value, 16)))
def load(self, url): filename = self.filename(url) debug.debug("Loading from {0}".format(filename)) data = open(filename).read() debug.trace(level = 3) return pickle.loads(data)
def render_text(self, outfd, data): global vmcs_offset, memory debug.debug("debug mode") outfd.write("\n:: Looking for VMCS0N...\n") for i in data: if "EPT_POINTER" in vmcs_offset: outfd.write("\t|_ VMCS at {0:#0{1}x} - EPTP: {2:#0{3}x}\n" .format(i,18,self.get_vmcs_field(i, vmcs_offset["EPT_POINTER"] * 4, 0x08),18)) else: outfd.write("\t|_ VMCS at {0:#0{1}x}\n".format(i,18)) if self._config.VERBOSE: address = i for k,v in vmcs_offset.items(): off = v * 4 size = layouts.vmcs.vmcs_field_size[k] / 8 if k == "VM_EXIT_REASON": outfd.write("\t|_ %s : 0x%08x - %s\n" % (k, self.get_vmcs_field(address, off, size), layouts.vmcs.vmexits[self.get_vmcs_field(address, off, size)])) elif k == "EXCEPTION_BITMAP": bitmap = self.get_vmcs_field(address, off, size) outfd.write("\t|_ %s : 0x%08x - %s\n" % (k, bitmap, bin(bitmap))) self.get_exception_bitmap_bit(outfd, bitmap) elif k == "PIN_BASED_VM_EXEC_CONTROL": pinexec = self.get_vmcs_field(address, off, size) outfd.write("\t|_ %s: 0x%08x - %s\n" % (k, pinexec, bin(pinexec))) self.parsing_pin_based_controls(outfd, pinexec) elif k == "CPU_BASED_VM_EXEC_CONTROL": procexec = self.get_vmcs_field(address, off, size) outfd.write("\t|_ %s: 0x%08x - %s\n" % (k, procexec, bin(procexec))) self.parsing_processor_based_controls(outfd, procexec) elif k == "CR3_TARGET_COUNT": outfd.write("\t|_ %s : 0x%08x\n" % (k, self.get_vmcs_field(address, off, size))) self.check_cr3(outfd, self.get_vmcs_field(address, off, size)) elif k == "VM_EXIT_CONTROLS": outfd.write("\t|_ %s : 0x%08x\n" % (k, self.get_vmcs_field(address, off, size))) self.parsing_vmexit_controls(outfd, self.get_vmcs_field(address, off, size)) else: outfd.write("\t|_ %s : 0x%x\n" % (k, self.get_vmcs_field(address, off, size))) if k == "IO_BITMAP_A": hyper.iobitmapa = self.get_vmcs_field(address, off, size) if k == "IO_BITMAP_B": hyper.iobitmapb = self.get_vmcs_field(address, off, size) if k == "SECONDARY_VM_EXEC_CONTROL" and hyper.use_secondary_control == 1: self.parsing_secondary_vm_exec_control(outfd, self.get_vmcs_field(address, off, size), address) if hyper.iobitmaps == 1: outfd.write("\t|_ Zoom on IO_BITMAPS:\n") self.parse_iobitmaps(outfd) self.check_clts_exit(outfd, address) self.check_rdmsr(outfd, address) self.check_wrmsr(outfd, address) outfd.write("\t==========================\n") if self._config.NESTED: outfd.write("\n:: Looking for VMCS1N...\n") for nest in set(hyper.nvmcs_found): outfd.write("\t|_ Nested VMCS at 0x%08x\n" % nest) self.hierarchy_check(outfd) self.count_hypervisors(outfd) self.countGuests(outfd)
def get_symbol(self, sym_name, nm_type = "", module = "kernel"): """Gets a symbol out of the profile sym_name -> name of the symbol nm_tyes -> types as defined by 'nm' (man nm for examples) module -> which module to get the symbol from, default is kernel, otherwise can be any name seen in 'lsmod' This fixes a few issues from the old static hash table method: 1) Conflicting symbols can be handled, if a symbol is found to conflict on any profile, then the plugin will need to provide the nm_type to differentiate, otherwise the plugin will be errored out 2) Can handle symbols gathered from modules on disk as well from the static kernel symtable is stored as a hash table of: symtable[module][sym_name] = [(symbol address, symbol type), (symbol addres, symbol type), ...] The function has overly verbose error checking on purpose... """ symtable = self.sys_map ret = None # check if the module is there... if module in symtable: mod = symtable[module] # check if the requested symbol is in the module if sym_name in mod: sym_list = mod[sym_name] # if a symbol has multiple definitions, then the plugin needs to specify the type if len(sym_list) > 1: if nm_type == "": debug.error("Requested symbol {0:s} in module {1:s} has multiple definitions and no type given\n".format(sym_name, module)) else: for (addr, stype) in sym_list: if stype == nm_type: ret = addr break if ret == None: debug.error("Requested symbol {0:s} in module {1:s} could not be found\n".format(sym_name, module)) else: # get the address of the symbol ret = sym_list[0][0] else: debug.debug("Requested symbol {0:s} not found in module {1:s}\n".format(sym_name, module)) else: debug.info("Requested module {0:s} not found in symbol table\n".format(module)) if self.shift_address and ret: ret = ret + self.shift_address return ret
def load_vtypes(self): """Loads up the vtypes data""" ntvar = self.metadata.get('memory_model', '32bit') self.native_types = copy.deepcopy(self.native_mapping.get(ntvar)) vtypesvar = dwarf.DWARFParser(dwarfdata).finalize() self._merge_anonymous_members(vtypesvar) self.vtypes.update(vtypesvar) debug.debug("{2}: Found dwarf file {0} with {1} symbols".format(f.filename, len(vtypesvar.keys()), profilename))
def enable_caching(_option, _opt_str, _value, _parser): """Turns off caching by replacing the tree with one that only takes BlockingNodes""" debug.debug("Enabling Caching") # Feels filthy using the global keyword, # but I can't figure another way to ensure that # the code gets called and overwrites the outer scope global CACHE CACHE = CacheTree(CacheStorage(), invalidator = Invalidator()) config.CACHE = True
def read(self, addr, length): if ENABLE_VGA_ZREAD_FIX and ( VGA_START <= addr < VGA_END or VGA_START <= addr + length < VGA_END or (addr < VGA_START and VGA_END <= addr + length)): debug.debug("Redirecting read(%#x, %d) to zread" % (addr, length), level=2) return self.zread(addr, length) return super(VMotionMigrationAddressSpace, self).read(addr, length)
def calculate(self): addr_space = utils.load_as(self._config) ## Get a sorted list of module addresses mods = dict((addr_space.address_mask(mod.DllBase), mod) for mod in modules.lsmod(addr_space)) mod_addrs = sorted(mods.keys()) ssdts = set() if addr_space.profile.metadata.get("memory_model", "32bit") == "32bit": # Gather up all SSDTs referenced by threads print "[x86] Gathering all referenced SSDTs from KTHREADs..." for proc in tasks.pslist(addr_space): for thread in proc.ThreadListHead.list_of_type("_ETHREAD", "ThreadListEntry"): ssdt_obj = thread.Tcb.ServiceTable.dereference_as("_SERVICE_DESCRIPTOR_TABLE") ssdts.add(ssdt_obj) else: print "[x64] Gathering all referenced SSDTs from KeAddSystemServiceTable..." # The NT module always loads first ntos = list(modules.lsmod(addr_space))[0] func_rva = ntos.getprocaddress("KeAddSystemServiceTable") if func_rva == None: raise StopIteration("Cannot locate KeAddSystemServiceTable") KeAddSystemServiceTable = ntos.DllBase + func_rva for table_rva in find_tables(KeAddSystemServiceTable, addr_space): ssdt_obj = obj.Object("_SERVICE_DESCRIPTOR_TABLE", ntos.DllBase + table_rva, addr_space) ssdts.add(ssdt_obj) # Get a list of *unique* SSDT entries. Typically we see only two. tables = set() for ssdt_obj in ssdts: for i, desc in enumerate(ssdt_obj.Descriptors): # Apply some extra checks - KiServiceTable should reside in kernel memory and ServiceLimit # should be greater than 0 but not unbelievably high if ( desc.is_valid() and desc.ServiceLimit > 0 and desc.ServiceLimit < 0xFFFF and desc.KiServiceTable > 0x80000000 ): tables.add((i, desc.KiServiceTable.v(), desc.ServiceLimit.v())) print "Finding appropriate address space for tables..." tables_with_vm = [] procs = list(tasks.pslist(addr_space)) for idx, table, n in tables: vm = tasks.find_space(addr_space, procs, table) if vm: tables_with_vm.append((idx, table, n, vm)) else: debug.debug("[SSDT not resident at 0x{0:08X}]\n".format(table)) for idx, table, n, vm in sorted(tables_with_vm, key=itemgetter(0)): yield idx, table, n, vm, mods, mod_addrs
def get_file_strings(self): for name, children in self.get_children(): if name == 'StringFileInfo': for _codepage, strings in children: for string, value in strings: # Make sure value isn't a generator, and we've a subtree to deal with if isinstance(value, type(strings)): debug.debug(" {0} : Subtrees not yet implemented\n".format(string)) else: yield string, self.display_unicode(value)
def isIA32E(self, offset, phy_space, ishost): debug.debug("checking host VM_EXIT_CONTROLS") if ishost: off = vmcs_offset["VM_EXIT_CONTROLS"] * 4 else: off = vmcs_offset["VM_ENTRY_CONTROLS"] * 4 data = phy_space.read(offset + off, 0x04) vmx_exit_control = struct.unpack('<I', data)[0] mask_host_address_space_size = 0x200 # Checking host address space size -> bit 10 if ((vmx_exit_control & mask_host_address_space_size) >> 9) == 1: return True return False
def __getitem__(self, tag_ident, *tag_indices): debug("searching Tag: {0}{1}".format(tag_ident, tag_indices)) tag_data = self.search_tag(tag_ident, *tag_indices) if not tag_data: raise AttributeError("{0}: tag {1} not found. identifier should be tag name".format(self.name, tag_ident)) # if we're dealling with a meta-tag if tag_data[0] == "MetaTag": return MetaTag(self.parser, self, *tag_data[1:]) # or an actual tag if tag_data[0] == "Tag": return Tag(self.parser, self, *tag_data[1:])
def render_text(self, outfd, data): for session in data: shared_info = session.find_shared_info() if not shared_info: debug.debug("Cannot find win32k!gSharedInfo") continue outfd.write("*" * 50 + "\n") outfd.write("SharedInfo: {0:#x}, SessionId: {1} Shared delta: {2}\n".format( shared_info.obj_offset, session.SessionId, shared_info.ulSharedDelta, )) outfd.write("aheList: {0:#x}, Table size: {1:#x}, Entry size: {2:#x}\n".format( shared_info.aheList.v(), shared_info.psi.cbHandleTable, shared_info.HeEntrySize if hasattr(shared_info, 'HeEntrySize') else shared_info.obj_vm.profile.get_obj_size("_HANDLEENTRY"), )) outfd.write("\n") filters = [] # Should we display freed handles if not self._config.FREE: filters.append(lambda x : not x.Free) # Should we filter by process ID if self._config.PID: filters.append(lambda x : x.Process.UniqueProcessId == self._config.PID) # Should we filter by object type if self._config.TYPE: filters.append(lambda x : str(x.bType) == self._config.TYPE) self.table_header(outfd, [("Object(V)", "[addrpad]"), ("Handle", "[addr]"), ("bType", "20"), ("Flags", "^8"), ("Thread", "^8"), ("Process", ""), ]) for handle in shared_info.handles(filters): self.table_row(outfd, handle.phead.v(), handle.phead.h if handle.phead else 0, handle.bType, handle.bFlags, handle.Thread.Cid.UniqueThread, handle.Process.UniqueProcessId)