def calculate(self): if not has_yara: debug.error("You must install yara") addr_space = utils.load_as(self._config) if not self.is_valid_profile(addr_space.profile): debug.error("This command does not support the selected profile.") rules = yara.compile(sources=self.signatures) for task in self.filter_tasks(tasks.pslist(addr_space)): task_space = task.get_process_address_space() # We must have a process AS if not task_space: continue for vad, process_space in task.get_vads(): if obj.Object("_IMAGE_DOS_HEADER", offset=vad.Start, vm=process_space).e_magic != 0x5A4D: continue data = process_space.zread(vad.Start, vad.Length) # check for the signature with YARA, both hits must be present matches = rules.match(data=data) if len(matches) < 2: continue try: dos_header = obj.Object("_IMAGE_DOS_HEADER", offset=vad.Start, vm=task_space) nt_header = dos_header.get_nt_header() except (ValueError, exceptions.SanityCheckException): continue # There must be more than 2 sections if nt_header.FileHeader.NumberOfSections < 2: continue # Get the last PE section's data sections = list(nt_header.get_sections(False)) last_sec = sections[-1] last_sec_data = task_space.zread( (last_sec.VirtualAddress + vad.Start), last_sec.Misc.VirtualSize) success = self.check_matches(task_space, vad, matches, last_sec_data) if not success: continue success = self.scan_key(task_space) if not success: continue yield task, vad, self.params
def devicetree(self): """Volatility devicetree plugin. @see volatility/plugins/malware/devicetree.py """ log.debug("Executing Volatility devicetree module on {0}".format( self.memdump)) self.__config() results = [] command = self.plugins["devicetree"](self.config) for driver_obj in command.calculate(): new = { "driver_offset": "0x{0:08x}".format(driver_obj.obj_offset), "driver_name": str(driver_obj.DriverName or ""), "devices": [] } for device in driver_obj.devices(): device_header = obj.Object( "_OBJECT_HEADER", offset=device.obj_offset - device.obj_vm.profile.get_obj_offset( "_OBJECT_HEADER", "Body"), vm=device.obj_vm, native_vm=device.obj_native_vm) device_name = str(device_header.NameInfo.Name or "") new_device = { "device_offset": "0x{0:08x}".format(device.obj_offset), "device_name": device_name, "device_type": devicetree.DEVICE_CODES.get(device.DeviceType.v(), "UNKNOWN"), "devices_attached": [] } new["devices"].append(new_device) level = 0 for att_device in device.attached_devices(): device_header = obj.Object( "_OBJECT_HEADER", offset=att_device.obj_offset - att_device.obj_vm.profile.get_obj_offset( "_OBJECT_HEADER", "Body"), vm=att_device.obj_vm, native_vm=att_device.obj_native_vm) device_name = str(device_header.NameInfo.Name or "") name = (device_name + " - " + str(att_device.DriverObject.DriverName or "")) new_device["devices_attached"].append({ "level": level, "attached_device_offset": "0x{0:08x}".format(att_device.obj_offset), "attached_device_name": name, "attached_device_type": devicetree.DEVICE_CODES.get(att_device.DeviceType.v(), "UNKNOWN") }) level += 1 results.append(new) return dict(config={}, data=results)
def calculate(self): blocksize = self._config.BLOCKSIZE self._config.WRITE = True pspace = utils.load_as(self._config, astype='physical') vspace = utils.load_as(self._config) memory_model = pspace.profile.metadata.get('memory_model', '32bit') if memory_model == "64bit": header_format = '_DMP_HEADER64' else: header_format = '_DMP_HEADER' headerlen = pspace.profile.get_obj_size(header_format) headerspace = addrspace.BufferAddressSpace(self._config, 0, "PAGE" * (headerlen / 4)) header = obj.Object(header_format, offset=0, vm=headerspace) kuser = obj.Object("_KUSER_SHARED_DATA", offset=obj.VolMagic(vspace).KUSER_SHARED_DATA.v(), vm=vspace) kdbg = obj.Object("_KDDEBUGGER_DATA64", offset=obj.VolMagic(vspace).KDBG.v(), vm=vspace) # Scanning the memory region near KDDEBUGGER_DATA64 for # DBGKD_GET_VERSION64 dbgkd = kdbg.dbgkd_version64() # Set the correct file magic for i in range(len("PAGE")): header.Signature[i] = [ord(x) for x in "PAGE"][i] # Write the KeDebuggerDataBlock and ValidDump headers dumptext = "DUMP" header.KdDebuggerDataBlock = kdbg.obj_offset if memory_model == "64bit": dumptext = "DU64" header.KdDebuggerDataBlock = kdbg.obj_offset | 0xFFFF000000000000 for i in range(len(dumptext)): header.ValidDump[i] = ord(dumptext[i]) # The PaeEnabled member is essential for x86 crash files if memory_model == "32bit": if hasattr(vspace, "pae") and vspace.pae == True: header.PaeEnabled = 0x1 else: header.PaeEnabled = 0x0 # Set members of the crash header header.MajorVersion = dbgkd.MajorVersion header.MinorVersion = dbgkd.MinorVersion header.DirectoryTableBase = vspace.dtb header.PfnDataBase = kdbg.MmPfnDatabase header.PsLoadedModuleList = kdbg.PsLoadedModuleList header.PsActiveProcessHead = kdbg.PsActiveProcessHead header.MachineImageType = dbgkd.MachineType # Find the number of processors header.NumberProcessors = len(list(kdbg.kpcrs())) # In MS crash dumps, SystemTime will not be set. It will # represent the "Debug session time:". We are # using the member to represent the time the sample was # collected. header.SystemTime = kuser.SystemTime.as_windows_timestamp() # Zero out the BugCheck members header.BugCheckCode = 0x00000000 header.BugCheckCodeParameter[0] = 0x00000000 header.BugCheckCodeParameter[1] = 0x00000000 header.BugCheckCodeParameter[2] = 0x00000000 header.BugCheckCodeParameter[3] = 0x00000000 # Set the sample run information. We used to take the sum of the size # of all runs, but that assumed the base layer was raw. In the case # of base layers such as ELF64 core dump or any other run-based address # space that may have holes for device memory, that would fail because # any runs after the first hole would then be at the wrong offset. last_run = list(pspace.get_available_addresses())[-1] num_pages = (last_run[0] + last_run[1]) / 0x1000 header.PhysicalMemoryBlockBuffer.NumberOfRuns = 0x00000001 header.PhysicalMemoryBlockBuffer.NumberOfPages = num_pages header.PhysicalMemoryBlockBuffer.Run[0].BasePage = 0x0000000000000000 header.PhysicalMemoryBlockBuffer.Run[0].PageCount = num_pages header.RequiredDumpSpace = (num_pages + 2) * 0x1000 # Zero out the remaining non-essential fields ContextRecordOffset = headerspace.profile.get_obj_offset( header_format, "ContextRecord") ExceptionOffset = headerspace.profile.get_obj_offset( header_format, "Exception") headerspace.write(ContextRecordOffset, "\x00" * (ExceptionOffset - ContextRecordOffset)) # Set the "converted" comment CommentOffset = headerspace.profile.get_obj_offset( header_format, "Comment") headerspace.write(CommentOffset, "File was converted with Volatility" + "\x00") # Yield the header yield 0, headerspace.read(0, headerlen) # Write the main body for s, l in pspace.get_available_addresses(): for i in range(s, s + l, blocksize): yield i + headerlen, pspace.read(i, min(blocksize, s + l - i)) # Reset the config so volatility opens the crash dump self._config.LOCATION = "file://" + self._config.OUTPUT_IMAGE # Crash virtual space crash_vspace = utils.load_as(self._config) # The KDBG in the new crash dump crash_kdbg = obj.Object("_KDDEBUGGER_DATA64", offset=obj.VolMagic(crash_vspace).KDBG.v(), vm=crash_vspace) # The KPCR for the first CPU kpcr = list(crash_kdbg.kpcrs())[0] # Set the CPU CONTEXT properly for the architecure if memory_model == "32bit": kpcr.PrcbData.ProcessorState.ContextFrame.SegGs = 0x00 kpcr.PrcbData.ProcessorState.ContextFrame.SegCs = 0x08 kpcr.PrcbData.ProcessorState.ContextFrame.SegDs = 0x23 kpcr.PrcbData.ProcessorState.ContextFrame.SegEs = 0x23 kpcr.PrcbData.ProcessorState.ContextFrame.SegFs = 0x30 kpcr.PrcbData.ProcessorState.ContextFrame.SegSs = 0x10 else: kpcr.Prcb.ProcessorState.ContextFrame.SegGs = 0x00 kpcr.Prcb.ProcessorState.ContextFrame.SegCs = 0x18 kpcr.Prcb.ProcessorState.ContextFrame.SegDs = 0x2b kpcr.Prcb.ProcessorState.ContextFrame.SegEs = 0x2b kpcr.Prcb.ProcessorState.ContextFrame.SegFs = 0x53 kpcr.Prcb.ProcessorState.ContextFrame.SegSs = 0x18
import volatility.obj as obj import volatility.addrspace as addrspace import volatility.registry as registry registry.PluginImporter() registry.register_global_options(config, addrspace.BaseAddressSpace) ## Main program starts here: # Initialize address space (same as a=addrspace() in linux_volshell) a=utils.load_as(config) p=a.profile # Lookup kernel symbol pointing to first task task_addr = p.get_symbol("init_task") # Create python object for this task # Note that the "init_task" symbol does not point to the start of the # task_struct data structure "list" element of the data structures. init_task = obj.Object("task_struct", vm=a, offset=task_addr) l = list(init_task.tasks) from libvmi import Libvmi vmi = Libvmi(sys.argv[5]) t_cred_init = l[0].real_cred for t in l: if (t.pid == int(sys.argv[6])): t_cred_pa = a.vtop(t.real_cred.obj_offset) vmi.write_64_pa(t_cred_pa, t_cred_init)
def ssdt(self): """Volatility ssdt plugin. @see volatility/plugins/malware/ssdt.py """ log.debug("Executing Volatility ssdt plugin on " "{0}".format(self.memdump)) self.__config() results = [] command = self.plugins["ssdt"](self.config) # Comment: this code is pretty much ripped from render_text in volatility. addr_space = self.addr_space syscalls = addr_space.profile.syscalls bits32 = addr_space.profile.metadata.get("memory_model", "32bit") == "32bit" for idx, table, n, vm, mods, mod_addrs in command.calculate(): for i in range(n): if bits32: # These are absolute function addresses in kernel memory. syscall_addr = obj.Object("address", table + (i * 4), vm).v() else: # These must be signed long for x64 because they are RVAs relative # to the base of the table and can be negative. offset = obj.Object("long", table + (i * 4), vm).v() # The offset is the top 20 bits of the 32 bit number. syscall_addr = table + (offset >> 4) try: syscall_name = syscalls[idx][i] except IndexError: syscall_name = "UNKNOWN" syscall_mod = tasks.find_module( mods, mod_addrs, addr_space.address_mask(syscall_addr)) if syscall_mod: syscall_modname = "{0}".format(syscall_mod.BaseDllName) else: syscall_modname = "UNKNOWN" new = { "index": int(idx), "table": hex(int(table)), "entry": "{0:#06x}".format(idx * 0x1000 + i), "syscall_name": syscall_name, "syscall_addr": hex(int(syscall_addr)), "syscall_modname": syscall_modname, } if bits32 and syscall_mod is not None: ret = apihooks.ApiHooks.check_inline( va=syscall_addr, addr_space=vm, mem_start=syscall_mod.DllBase, mem_end=syscall_mod.DllBase + syscall_mod.SizeOfImage) # Could not analyze the memory. if ret is not None: hooked, data, dest_addr = ret if hooked: # We found a hook, try to resolve the hooker. # No mask required because we currently only work # on x86 anyway. hook_mod = tasks.find_module( mods, mod_addrs, dest_addr) if hook_mod: hook_name = "{0}".format(hook_mod.BaseDllName) else: hook_name = "UNKNOWN" # Report it now. new.update({ "hook_dest_addr": "{0:#x}".format(dest_addr), "hook_name": hook_name, }) results.append(new) return dict(config={}, data=results)
def _exported_functions(self): """ Generator for exported functions. @return: tuple (Ordinal, FunctionRVA, Name) Ordinal is an integer and should never be None. If the function is forwarded, FunctionRVA is None. Otherwise, FunctionRVA is an RVA to the function's code (relative to module base). Name is a String containing the exported function's name. If the Name is paged, it will be None. If the function is forwarded, Name is the forwarded function name including the DLL (ntdll.EtwLogTraceEvent). """ mod_base = self.obj_parent.DllBase exp_dir = self.obj_parent.export_dir() # PE files with a large number of functions will have arrays # that spans multiple pages. Thus the first entries may be valid, # last entries may be valid, but middle entries may be invalid # (paged). In the various checks below, we test for None (paged) # and zero (non-paged but invalid RVA). # Array of RVAs to function code address_of_functions = obj.Object('Array', offset = mod_base + self.AddressOfFunctions, targetType = 'unsigned int', count = self.NumberOfFunctions, vm = self.obj_native_vm) # Array of RVAs to function names address_of_names = obj.Object('Array', offset = mod_base + self.AddressOfNames, targetType = 'unsigned int', count = self.NumberOfNames, vm = self.obj_native_vm) # Array of RVAs to function ordinals address_of_name_ordinals = obj.Object('Array', offset = mod_base + self.AddressOfNameOrdinals, targetType = 'unsigned short', count = self.NumberOfNames, vm = self.obj_native_vm) # When functions are exported by Name, it will increase # NumberOfNames by 1 and NumberOfFunctions by 1. When # functions are exported by Ordinal, only the NumberOfFunctions # will increase. First we enum functions exported by Name # and track their corresponding Ordinals, so that when we enum # functions exported by Ordinal only, we don't duplicate. seen_ordinals = [] # Handle functions exported by name *and* ordinal for i in range(self.NumberOfNames): name_rva = address_of_names[i] ordinal = address_of_name_ordinals[i] if name_rva in (0, None): continue # Check the sanity of ordinal values before using it as an index if ordinal == None or ordinal >= self.NumberOfFunctions: continue func_rva = address_of_functions[ordinal] if func_rva in (0, None): continue # Handle forwarded exports. If the function's RVA is inside the exports # section (as given by the VirtualAddress and Size fields in the # DataDirectory), the symbol is forwarded. Return the name of the # forwarded function and None as the function address. if (func_rva >= exp_dir.VirtualAddress and func_rva < exp_dir.VirtualAddress + exp_dir.Size): n = self._name(func_rva) f = obj.NoneObject("Ordinal function {0} in module {1} forwards to {2}".format( ordinal, self.obj_parent.BaseDllName, n)) else: n = self._name(name_rva) f = func_rva # Add the ordinal base and save it ordinal += self.Base seen_ordinals.append(ordinal) yield ordinal, f, n # Handle functions exported by ordinal only for i in range(self.NumberOfFunctions): ordinal = self.Base + i # Skip functions already enumberated above if ordinal not in seen_ordinals: func_rva = address_of_functions[i] if func_rva in (0, None): continue seen_ordinals.append(ordinal) # There is no name RVA yield ordinal, func_rva, obj.NoneObject("Name RVA not accessible")
class _LDR_DATA_TABLE_ENTRY(obj.CType): """ Class for PE file / modules If these classes are instantiated by _EPROCESS.list_*_modules() then its guaranteed to be in the process address space. FIXME: If these classes are found by modscan, ensure we can dereference properly with obj_native_vm. """ def _nt_header(self): """Return the _IMAGE_NT_HEADERS object""" try: dos_header = obj.Object("_IMAGE_DOS_HEADER", offset = self.DllBase, vm = self.obj_native_vm) return dos_header.get_nt_header() except ValueError: return obj.NoneObject("Failed initial sanity checks") except exceptions.SanityCheckException: return obj.NoneObject("Failed initial sanity checks. Try -u or --unsafe") def _directory(self, dir_index): """Return the requested IMAGE_DATA_DIRECTORY""" nt_header = self._nt_header() if nt_header == None: raise ValueError('No directory index {0}'.format(dir_index)) data_dir = nt_header.OptionalHeader.DataDirectory[dir_index] if data_dir == None: raise ValueError('No directory index {0}'.format(dir_index)) # Make sure the directory exists if data_dir.VirtualAddress == 0 or data_dir.Size == 0: raise ValueError('No directory index {0}'.format(dir_index)) # Make sure the directory VA and Size are sane if data_dir.VirtualAddress + data_dir.Size > nt_header.OptionalHeader.SizeOfImage: raise ValueError('Invalid directory for index {0}'.format(dir_index)) return data_dir def export_dir(self): """Return the IMAGE_DATA_DIRECTORY for exports""" return self._directory(0) # DIRECTORY_ENTRY_EXPORT def import_dir(self): """Return the IMAGE_DATA_DIRECTORY for imports""" return self._directory(1) # DIRECTORY_ENTRY_IMPORT def debug_dir(self): """Return the IMAGE_DEBUG_DIRECTORY for debug info""" return self._directory(6) # IMAGE_DEBUG_DIRECTORY def security_dir(self): """Return the IMAGE_SECURITY_DIRECTORY""" return self._directory(4) # IMAGE_DIRECTORY_ENTRY_SECURITY def get_debug_directory(self): """Return the debug directory object for this PE""" try: data_dir = self.debug_dir() except ValueError, why: return obj.NoneObject(str(why)) return obj.Object("_IMAGE_DEBUG_DIRECTORY", offset = self.DllBase + data_dir.VirtualAddress, vm = self.obj_native_vm)
def get_obj(self, ptr, sname, member): offset = self.profile.get_obj_offset(sname, member) addr = ptr - offset return obj.Object(sname, offset=addr, vm=self.addr_space)
def calculate(self): common.set_plugin_members(self) (kernel_symbol_addresses, kmods) = common.get_kernel_addrs(self) gnotify_addr = common.get_cpp_sym("gNotifications", self.addr_space.profile) p = obj.Object("Pointer", offset=gnotify_addr, vm=self.addr_space) gnotifications = p.dereference_as( self._struct_or_class("OSDictionary")) if gnotifications.count > 1024: return ents = obj.Object('Array', offset=gnotifications.dictionary, vm=self.addr_space, targetType=self._struct_or_class("dictEntry"), count=gnotifications.count) # walk the current set of notifications for ent in ents: if ent == None or not ent.is_valid(): continue key = str(ent.key.dereference_as( self._struct_or_class("OSString"))) # get the value valset = ent.value.dereference_as( self._struct_or_class("OSOrderedSet")) if valset == None or valset.count > 1024: continue notifiers_ptrs = obj.Object('Array', offset=valset.array, vm=self.addr_space, targetType='Pointer', count=valset.count) if notifiers_ptrs == None: continue for ptr in notifiers_ptrs: notifier = ptr.dereference_as( self._struct_or_class("_IOServiceNotifier")) if notifier == None: continue matches = self.get_matching(notifier) if matches == []: continue # this is the function that handles whatever the notification is for # this should be only in the kernel or in one of the known IOKit # drivers for the specific kernel handler = notifier.handler.v() ch = notifier.compatHandler.v() if ch: handler = ch (good, module) = common.is_known_address_name( handler, kernel_symbol_addresses, kmods) yield (good, module, key, notifier, matches, handler)
def __str__(self): string_object = obj.Object("String", offset=self.string, vm=self.obj_vm, length=self.length) return str(string_object or '')
def calculate(self): """Search memory for credentials""" kernel_memory = utils.load_as(self._config) # Find all OpenVPN processes processes = tasks.pslist(kernel_memory) processes = filter( lambda p: str(p.ImageFileName).lower() == "openvpn.exe", processes) # Search for credentials in each process for process in processes: process_memory = process.get_process_address_space() # Get some basic process information pid = int(process.UniqueProcessId) image_base = process.Peb.ImageBaseAddress dos_header = obj.Object("_IMAGE_DOS_HEADER", offset=image_base, vm=process_memory) nt_header = dos_header.get_nt_header() # Find the .data and .bss sections sections = nt_header.get_sections(True) sections = filter(lambda s: str(s.Name) in [".data", ".bss"], sections) if len(sections) == 0: # Sections may be unavailable continue # Search each section for credentials for section in sections: # Determine dimensions of section sec_start = section.VirtualAddress + image_base sec_end = sec_start + section.Misc.VirtualSize sec_type = str(section.Name) # Search static user_pass struct # Assumptions: # - Struct is aligned on 16-byte boundary # - Bool fields are 4 bytes long in 2.2.2 # - Bool fields are 2 bytes long in 2.3.2 and 2.3.4 # - Username and password buffers are 4096 bytes long for creds_start in xrange(sec_start, sec_end, 16): creds = process_memory.read(creds_start, 16) if not creds: # Memory may be unavailable continue struct_layout = None struct_length = None # Detect the 2.2.2 struct defined, nocache, username = struct.unpack("II8s", creds) if sec_type == ".data" \ and valid_bool(defined) \ and valid_bool(nocache) \ and username[0] in USERNAME_CHARSET: struct_layout = "II4096s4096s" struct_length = 4 + 4 + 4096 + 4096 # Detect the 2.3.2/2.3.4 struct defined, nocache, username = struct.unpack("BB14s", creds) if sec_type == ".bss" \ and valid_bool(defined) \ and valid_bool(nocache) \ and username[0] in USERNAME_CHARSET: struct_layout = "BB4096s4096s" struct_length = 1 + 1 + 4096 + 4096 if struct_layout is not None: # Read and parse detected structure creds = process_memory.zread(creds_start, struct_length) _, _, username, password = struct.unpack( struct_layout, creds) # Terminate strings at null byte username = terminate_string(username) password = terminate_string(password) yield (pid, username, password) # Stop searching in current section break
def carve(self, address_space, offset): pf_buff = address_space.read(offset - 4, 256) bufferas = addrspace.BufferAddressSpace(self.config, data=pf_buff) self.pf_header = obj.Object('PF_HEADER', vm=bufferas, offset=0) return self.pf_header
def get_heaps(self, ps_ad, peb): """Get the heaps and heap related data structures""" num_heaps = peb.NumberOfHeaps.v() heap_count = 0 if self.wow64: heaps = obj.Object('Array', offset=peb.ProcessHeaps.v(), vm=ps_ad, targetType='unsigned long', count=num_heaps) else: heaps = obj.Object('Array', offset=peb.ProcessHeaps.v(), vm=ps_ad, targetType='unsigned long long', count=num_heaps) heaps_list = list(heaps) #add shared heap to list heaps_list.append(peb.ReadOnlySharedMemoryBase) #get heap objects heap_objects = [] for address in heaps_list: heap = obj.Object('_HEAP', offset=address.v(), vm=ps_ad) heap_objects.append([address, heap]) #process each heap for metadata data = [] for address, heap in heap_objects: if heap_count == len(heaps_list) - 1: #shared heap heap_info = str(heap_count) + " (Shared)" else: heap_info = str(heap_count) #add heap if not (heap.is_valid()): debug.warning("Unreadable heap @ ") heap_text = "Heap {0} (Unreadable)".format(heap_info) data.append([address.v(), heap_text]) heap_count += 1 continue is_nt_heap = False if heap.SegmentSignature == 0xffeeffee: data.append( [address.v(), "Heap {0} NT Heap".format(heap_info)]) is_nt_heap = True else: data.append( [address.v(), "Heap {0} Segment Heap".format(heap_info)]) if is_nt_heap: for virtual_alloc in self.get_heap_virtual_allocs( ps_ad, heap, heap_info): data.append(virtual_alloc) #parse for heap segments for segment in self.get_heap_segments(ps_ad, heap, heap_info): data.append(segment) else: for seg in self.get_seg_heap_seg(ps_ad, heap, heap_info): data.append(seg) for large in self.get_seg_heap_large(ps_ad, heap, heap_info): data.append(large) heap_count += 1 #add heap data to user allocs for addr, text in data: try: self.user_allocs[addr].add_metadata(text) except: pass
def calculate(self): """ This works by walking the IDT table for the entries that Linux uses and verifies that each is a symbol in the kernel """ linux_common.set_plugin_members(self) if self.profile.metadata['arch'] not in ["x64", "x86"]: debug.error( "This plugin is only supported on Intel-based memory captures") tblsz = 256 sym_addrs = self.profile.get_all_addresses() # hw handlers + system call check_idxs = list(range(0, 20)) + [128] if self.profile.metadata.get('memory_model', '32bit') == "32bit": if self.profile.has_type("gate_struct"): idt_type = "gate_struct" else: idt_type = "desc_struct" else: if self.profile.has_type("gate_struct64"): idt_type = "gate_struct64" elif self.profile.has_type("gate_struct"): idt_type = "gate_struct" else: idt_type = "idt_desc" # this is written as a list b/c there are supposdly kernels with per-CPU IDTs # but I haven't found one yet... addrs = [self.addr_space.profile.get_symbol("idt_table")] for tableaddr in addrs: table = obj.Object(theType='Array', offset=tableaddr, vm=self.addr_space, targetType=idt_type, count=tblsz) for i in check_idxs: ent = table[i] if not ent: continue if hasattr(ent, "Address"): idt_addr = ent.Address else: low = ent.offset_low middle = ent.offset_middle if hasattr(ent, "offset_high"): high = ent.offset_high else: high = 0 idt_addr = (high << 32) | (middle << 16) | low if idt_addr != 0: if not idt_addr in sym_addrs: hooked = 1 sym_name = "HOOKED" else: hooked = 0 sym_name = self.profile.get_symbol_by_address( "kernel", idt_addr) yield (i, ent, idt_addr, sym_name, hooked)
def render_text(self, outfd, data): for task in data: imagename = "" outfd.write("*" * 72 + "\n") outfd.write("Process: {0}, Pid: {1}\n".format(task.ImageFileName, task.UniqueProcessId)) task_space = task.get_process_address_space() if task_space == None: result = "Error: Cannot acquire process AS" elif task.Peb == None: result = "Error: PEB at {0:#x} is paged".format(task.m('Peb')) elif task_space.vtop(task.Peb.ImageBaseAddress) == None: result = "Error: ImageBaseAddress at {0:#x} is paged".format(task.Peb.ImageBaseAddress) else: base = task.Peb.ImageBaseAddress try: dos_header = obj.Object("_IMAGE_DOS_HEADER", offset = base, vm = task_space) nt_header = dos_header.get_nt_header() hbase = hex(base).replace("L", "") isize = nt_header.OptionalHeader.SizeOfImage tsize = hex(base+ nt_header.OptionalHeader.SizeOfImage).replace("L", "") outfd.write("Image Base: " + hbase + "\nImagize Size: " + str(isize) + "\nImage Total size: " + tsize +"\n") outfd.write("\nSections: \n") for sec in nt_header.get_sections(True): secname = str(sec.Name) secaddr = hex(sec.VirtualAddress + base) outfd.write("\t" + secname + " " + secaddr + "\n") outfd.write("\n") except: pass self.table_header(outfd, [ ("Current", "[addrpad]"), ("-->", "2"), ("Parent", "[addrpad]"), ("|", "1"), ("StartAddr", "[addrpad]"), ("|", "1"), ("Size", "[addrpad]"), ("|", "1"), ("EndAddr", "[addrpad]"), ("|", "1"), ("Owner", "14"), ("|", "1"), ("MapType", "8"), ("|", "1"), ("Commit", "5"), ("|", "1"), ("Access", "17"), ("|", "1"), ("Filename", "") ]) for vad in task.VadRoot.traverse(): levels = {} imagename = "" filename = "" try: file_obj = vad.ControlArea.FilePointer if file_obj: filename = file_obj.FileName or "Pagefile-backed section" if str(filename) != "Pagefile-backed section": imagename = str(filename).rsplit("\\",1)[-1] parentImageDict[vad.obj_offset] = imagename except AttributeError: pass mapType = commit = accessStr = "" commit = vad.u.VadFlags.CommitCharge if vad.u.VadFlags.CommitCharge < 0x7FFFFFFFFFFFF else -1 mapType = "Private" if vad.u.VadFlags.PrivateMemory > 0 else "Mapped" accessStr = ProtectionDict.get(int(vad.u.VadFlags.Protection)) if "EXECUTE" in accessStr: mapType = "Image" pvad = 0 cvad = 0 pvad = vad.Parent.obj_offset cvad = vad.obj_offset if not imagename: imagename = parentImageDict.get(vad.Parent.obj_offset, "") self.table_row(outfd, cvad, "-->", pvad, "|", vad.Start, "|", vad.Length, "|", vad.End, "|", imagename, "|", mapType, "|", commit, "|", accessStr, "|", filename ) mpi = vad.Parent.obj_offset
def calculate(self): """Parse the control structures""" # Check the output folder exists if self._config.DUMP_DIR and not os.path.isdir(self._config.dump_dir): debug.error('{0} is not a directory'.format(self._config.dump_dir)) # Apply the correct vtypes for the profile addr_space = utils.load_as(self._config) addr_space.profile.object_classes.update(Editbox.editbox_classes) self.apply_types(addr_space) # Build a list of tasks tasks = win32.tasks.pslist(addr_space) if self._config.PID: pids = [int(p) for p in self._config.PID.split(',')] the_tasks = [t for t in tasks if t.UniqueProcessId in pids] else: the_tasks = [t for t in tasks] # In case no PIDs found if len(the_tasks) < 1: return # Iterate through all the window objects matching for supported controls mh = messagehooks.MessageHooks(self._config) for winsta, atom_tables in mh.calculate(): for desktop in winsta.desktops(): for wnd, _level in desktop.windows(desktop.DeskInfo.spwnd): if wnd.Process in the_tasks: atom_class = mh.translate_atom(winsta, atom_tables, wnd.ClassAtom) if atom_class: atom_class = str(atom_class) if '!' in atom_class: comctl_class = atom_class.split( '!')[-1].lower() if comctl_class in supported_controls: # Do we need to fake being 32bit for Wow? if wnd.Process.IsWow64 and not self.fake_32bit: meta = addr_space.profile.metadata meta['memory_model'] = '32bit' self.apply_types(addr_space, meta) self.fake_32bit = True elif not wnd.Process.IsWow64 and self.fake_32bit: self.apply_types(addr_space) self.fake_32bit = False context = '{0}\\{1}\\{2}'.format( winsta.dwSessionId, winsta.Name, desktop.Name) task_vm = wnd.Process.get_process_address_space( ) wndextra_offset = wnd.v( ) + addr_space.profile.get_obj_size( 'tagWND') wndextra = obj.Object( 'address', offset=wndextra_offset, vm=task_vm) ctrl = obj.Object( supported_controls[comctl_class], offset=wndextra, vm=task_vm) if self._config.DUMP_DIR: dump_to_file( ctrl, wnd.Process.UniqueProcessId, wnd.Process.ImageFileName, self._config.DUMP_DIR) yield context, atom_class, wnd.Process.UniqueProcessId, \ wnd.Process.ImageFileName, wnd.Process.IsWow64, ctrl
of _IMAGE_IMPORT_DESCRIPTOR structures. The end is reached when the IID structure is all zeros. """ try: data_dir = self.import_dir() except ValueError, why: raise StopIteration(why) i = 0 desc_size = self.obj_vm.profile.get_obj_size('_IMAGE_IMPORT_DESCRIPTOR') while 1: desc = obj.Object('_IMAGE_IMPORT_DESCRIPTOR', vm = self.obj_native_vm, offset = self.DllBase + data_dir.VirtualAddress + (i * desc_size), parent = self) # Stop if the IID is paged or all zeros if desc == None or desc.is_list_end(): break # Stop if the IID contains invalid fields if not desc.valid(self._nt_header()): break dll_name = desc.dll_name() for o, f, n in desc._imported_functions(): yield dll_name, o, f, n
def calculate(self): """Determines the address space""" profilelist = [ p.__name__ for p in list(registry.get_plugin_classes(obj.Profile).values()) ] encrypted_kdbg_profiles = [] proflens = {} maxlen = 0 origprofile = self._config.PROFILE for p in profilelist: self._config.update('PROFILE', p) buf = addrspace.BufferAddressSpace(self._config) if buf.profile.metadata.get('os', 'unknown') == 'windows': proflens[p] = obj.VolMagic(buf).KDBGHeader.v() maxlen = max(maxlen, len(proflens[p])) if buf.profile.metadata.get( 'memory_model', '64bit') == '64bit' and ( buf.profile.metadata.get('major', 0), buf.profile.metadata.get('minor', 0), ) >= ( 6, 2, ): encrypted_kdbg_profiles.append(p) self._config.update('PROFILE', origprofile) # keep track of the number of potential KDBGs we find count = 0 if origprofile not in encrypted_kdbg_profiles: scanner = KDBGScanner(needles=list(proflens.values())) aspace = utils.load_as(self._config, astype='any') suspects = [] for offset in scanner.scan(aspace): val = aspace.read(offset, maxlen + 0x10) for l in proflens: if val.find(proflens[l]) >= 0: kdbg = obj.Object("_KDDEBUGGER_DATA64", offset=offset, vm=aspace) suspects.append((l, kdbg)) count += 1 for p, k in suspects: if not self._config.FORCE: yield p, k continue self._config.update("PROFILE", p) nspace = utils.load_as(self._config, astype="any") for offset in scanner.scan(nspace): val = nspace.read(offset, maxlen + 0x10) if val.find(proflens[p]) >= 0: kdbg = obj.Object("_KDDEBUGGER_DATA64", offset=offset, vm=nspace) yield p, kdbg self._config.update('PROFILE', origprofile) # only perform the special win8/2012 scan if we didn't find # any others and if a virtual x64 address space is available if count == 0: if origprofile in encrypted_kdbg_profiles: encrypted_kdbg_profiles = [origprofile] for profile in encrypted_kdbg_profiles: self._config.update('PROFILE', profile) aspace = utils.load_as(self._config, astype='any') if hasattr(aspace, 'vtop'): for kdbg in obj.VolMagic( aspace).KDBG.generate_suggestions(): yield profile, kdbg
def _name(self, name_rva): """Return a String object for the name at the given RVA""" return obj.Object("String", offset = self.obj_parent.DllBase + name_rva, vm = self.obj_native_vm, length = 128)
def render_text(self, outfd, data): """Renders the KPCR values as text""" for profile, kdbg in data: outfd.write("*" * 50 + "\n") outfd.write( "Instantiating KDBG using: {0} {1} ({2}.{3}.{4} {5})\n".format( kdbg.obj_vm.name, kdbg.obj_vm.profile.__class__.__name__, kdbg.obj_vm.profile.metadata.get('major', 0), kdbg.obj_vm.profile.metadata.get('minor', 0), kdbg.obj_vm.profile.metadata.get('build', 0), kdbg.obj_vm.profile.metadata.get('memory_model', '32bit'), )) # Will spaces with vtop always have a dtb also? has_vtop = hasattr(kdbg.obj_native_vm, 'vtop') # Always start out with the virtual and physical offsets if has_vtop: outfd.write("{0:<30}: {1:#x}\n".format("Offset (V)", kdbg.obj_offset)) outfd.write("{0:<30}: {1:#x}\n".format( "Offset (P)", kdbg.obj_native_vm.vtop(kdbg.obj_offset))) else: outfd.write("{0:<30}: {1:#x}\n".format("Offset (P)", kdbg.obj_offset)) if hasattr(kdbg, 'KdCopyDataBlock'): outfd.write("{0:<30}: {1:#x}\n".format("KdCopyDataBlock (V)", kdbg.KdCopyDataBlock)) if hasattr(kdbg, 'block_encoded'): outfd.write("{0:<30}: {1}\n".format( "Block encoded", "Yes" if kdbg.block_encoded == 1 else "No", )) if hasattr(kdbg, 'wait_never'): outfd.write("{0:<30}: {1:#x}\n".format("Wait never", kdbg.wait_never)) if hasattr(kdbg, 'wait_always'): outfd.write("{0:<30}: {1:#x}\n".format("Wait always", kdbg.wait_always)) # These fields can be gathered without dereferencing # any pointers, thus they're available always outfd.write("{0:<30}: {1}\n".format("KDBG owner tag check", str(kdbg.is_valid()))) outfd.write("{0:<30}: {1}\n".format( "Profile suggestion (KDBGHeader)", profile)) verinfo = kdbg.dbgkd_version64() if verinfo: outfd.write( "{0:<30}: {1:#x} (Major: {2}, Minor: {3})\n".format( "Version64", verinfo.obj_offset, verinfo.MajorVersion, verinfo.MinorVersion, )) # Print details only available when a DTB can be found # and we have an AS with vtop. if has_vtop: outfd.write("{0:<30}: {1}\n".format( "Service Pack (CmNtCSDVersion)", kdbg.ServicePack)) outfd.write("{0:<30}: {1}\n".format( "Build string (NtBuildLab)", kdbg.NtBuildLab.dereference(), )) try: num_tasks = len(list(kdbg.processes())) except AttributeError: num_tasks = 0 try: num_modules = len(list(kdbg.modules())) except AttributeError: num_modules = 0 cpu_blocks = list(kdbg.kpcrs()) outfd.write("{0:<30}: {1:#x} ({2} processes)\n".format( "PsActiveProcessHead", kdbg.PsActiveProcessHead, num_tasks, )) outfd.write("{0:<30}: {1:#x} ({2} modules)\n".format( "PsLoadedModuleList", kdbg.PsLoadedModuleList, num_modules, )) outfd.write("{0:<30}: {1:#x} (Matches MZ: {2})\n".format( "KernelBase", kdbg.KernBase, str(kdbg.obj_native_vm.read(kdbg.KernBase, 2) == "MZ"), )) try: dos_header = obj.Object( "_IMAGE_DOS_HEADER", offset=kdbg.KernBase, vm=kdbg.obj_native_vm, ) nt_header = dos_header.get_nt_header() except (ValueError, exceptions.SanityCheckException): pass else: outfd.write("{0:<30}: {1}\n".format( "Major (OptionalHeader)", nt_header.OptionalHeader.MajorOperatingSystemVersion, )) outfd.write("{0:<30}: {1}\n".format( "Minor (OptionalHeader)", nt_header.OptionalHeader.MinorOperatingSystemVersion, )) for kpcr in cpu_blocks: outfd.write("{0:<30}: {1:#x} (CPU {2})\n".format( "KPCR", kpcr.obj_offset, kpcr.ProcessorBlock.Number)) else: outfd.write("{0:<30}: {1:#x}\n".format( "PsActiveProcessHead", kdbg.PsActiveProcessHead)) outfd.write("{0:<30}: {1:#x}\n".format( "PsLoadedModuleList", kdbg.PsLoadedModuleList)) outfd.write("{0:<30}: {1:#x}\n".format("KernelBase", kdbg.KernBase)) outfd.write("\n")
def findcookie(self, kernel_space): """Find and read the nt!ObHeaderCookie value. On success, return True and save the cookie value in self._cookie. On Failure, return False. This method must be called before performing any tasks that require object header validation including handles, psxview (due to pspcid) and the object scanning plugins (psscan, etc). NOTE: this cannot be implemented as a volatility "magic" class, because it must be persistent across various classes and sources. We don't want to recalculate the cookie value multiple times. """ meta = kernel_space.profile.metadata vers = (meta.get("major", 0), meta.get("minor", 0)) # this algorithm only applies to Windows 10 or greater if vers < (6, 4): return True # prevent subsequent attempts from recalculating the existing value if self._cookie: return True if not has_distorm: debug.warning("distorm3 module is not installed") return False kdbg = tasks.get_kdbg(kernel_space) if not kdbg: debug.warning("Cannot find KDBG") return False nt_mod = None for mod in kdbg.modules(): nt_mod = mod break if nt_mod == None: debug.warning("Cannot find NT module") return False addr = nt_mod.getprocaddress("ObGetObjectType") if addr == None: debug.warning("Cannot find nt!ObGetObjectType") return False # produce an absolute address by adding the DLL base to the RVA addr += nt_mod.DllBase if not nt_mod.obj_vm.is_valid_address(addr): debug.warning("nt!ObGetObjectType at {0} is invalid".format(addr)) return False # in theory...but so far we haven't tested 32-bits model = meta.get("memory_model") if model == "32bit": mode = distorm3.Decode32Bits else: mode = distorm3.Decode64Bits data = nt_mod.obj_vm.read(addr, 100) ops = distorm3.Decompose(addr, data, mode, distorm3.DF_STOP_ON_RET) addr = None # search backwards from the RET and find the MOVZX if model == "32bit": # movzx ecx, byte ptr ds:_ObHeaderCookie for op in reversed(ops): if (op.size == 7 and 'FLAG_DST_WR' in op.flags and len(op.operands) == 2 and op.operands[0].type == 'Register' and op.operands[1].type == 'AbsoluteMemoryAddress' and op.operands[1].size == 8): addr = op.operands[1].disp & 0xFFFFFFFF break else: # movzx ecx, byte ptr cs:ObHeaderCookie for op in reversed(ops): if (op.size == 7 and 'FLAG_RIP_RELATIVE' in op.flags and len(op.operands) == 2 and op.operands[0].type == 'Register' and op.operands[1].type == 'AbsoluteMemory' and op.operands[1].size == 8): addr = op.address + op.size + op.operands[1].disp break if not addr: debug.warning("Cannot find nt!ObHeaderCookie") return False if not nt_mod.obj_vm.is_valid_address(addr): debug.warning("nt!ObHeaderCookie at {0} is not valid".format(addr)) return False cookie = obj.Object("unsigned int", offset=addr, vm=nt_mod.obj_vm) self._cookie = int(cookie) return True
MFT_types = { 'MFT_FILE_RECORD': [ 0x400, { 'Signature': [ 0x0, ['unsigned int']], 'FixupArrayOffset': [ 0x4, ['unsigned short']], 'NumFixupEntries': [ 0x6, ['unsigned short']], 'LSN': [ 0x8, ['unsigned long long']], 'SequenceValue': [ 0x10, ['unsigned short']], 'LinkCount': [ 0x12, ['unsigned short']], 'FirstAttributeOffset': [0x14, ['unsigned short']], 'Flags': [0x16, ['unsigned short']], 'EntryUsedSize': [0x18, ['int']], 'EntryAllocatedSize': [0x1c, ['unsigned int']], 'FileRefBaseRecord': [0x20, ['unsigned long long']], 'NextAttributeID': [0x28, ['unsigned short']], 'RecordNumber': [0x2c, ['unsigned long']], 'FixupArray': lambda x: obj.Object("Array", offset = x.obj_offset + x.FixupArrayOffset, count = x.NumFixupEntries, vm = x.obj_vm, target = obj.Curry(obj.Object, "unsigned short")), 'ResidentAttributes': lambda x : obj.Object("RESIDENT_ATTRIBUTE", offset = x.obj_offset + x.FirstAttributeOffset, vm = x.obj_vm), 'NonResidentAttributes': lambda x : obj.Object("NON_RESIDENT_ATTRIBUTE", offset = x.obj_offset + x.FirstAttributeOffset, vm = x.obj_vm), }], 'ATTRIBUTE_HEADER': [ 0x10, { 'Type': [0x0, ['int']], 'Length': [0x4, ['int']], 'NonResidentFlag': [0x8, ['unsigned char']], 'NameLength': [0x9, ['unsigned char']], 'NameOffset': [0xa, ['unsigned short']], 'Flags': [0xc, ['unsigned short']], 'AttributeID': [0xe, ['unsigned short']], }], 'RESIDENT_ATTRIBUTE': [0x16, {
def parse_evt_info(self, name, buf, rawtime=False): loc = buf.find("LfLe") ## Skip the EVTLogHeader at offset 4. Here you can also parse ## and print the header values if you like. if loc == 4: loc = buf.find("LfLe", loc + 1) while loc != -1: ## This record's data (and potentially the data for records ## that follow it, so we'll be careful to chop it in the right ## places before future uses). rec = buf[loc - 4:] ## Use a buffer AS to instantiate the object bufferas = addrspace.BufferAddressSpace(self._config, data=rec) evtlog = obj.Object("EVTRecordStruct", offset=0, vm=bufferas) rec_size = bufferas.profile.get_obj_size("EVTRecordStruct") ## Calculate the SID string. If the SidLength is zero, the next ## field (list of strings) starts at StringOffset. If the SidLength ## is non-zero, use the data of length SidLength to determine the ## SID string and the next field starts at SidOffet. if evtlog.SidLength == 0: end = evtlog.StringOffset sid_string = "N/A" else: ## detect manged records based on invalid SID length if evtlog.SidLength > 68: loc = buf.find("LfLe", loc + 1) continue ## these should be appropriately sized SIDs end = evtlog.SidOffset sid_string = self.get_sid_string(rec[end:end + evtlog.SidLength]) computer_name = "" source = "" items = rec[rec_size:end].split("\x00\x00") source = utils.remove_unprintable(items[0]) if len(items) > 1: computer_name = utils.remove_unprintable(items[1]) strings = rec[evtlog.StringOffset:].split("\x00\x00", evtlog.NumStrings) messages = [] for s in range(min(len(strings), evtlog.NumStrings)): messages.append(utils.remove_unprintable(strings[s])) # We'll just say N/A if there are no messages, otherwise join them # together with semi-colons. if messages: msg = ";".join(messages) msg = msg.replace("|", "%7c") else: msg = "N/A" # Records with an invalid timestamp are ignored entirely if evtlog.TimeWritten != None: fields = [ str(evtlog.TimeWritten) if not rawtime else evtlog.TimeWritten, ntpath.basename(name), computer_name, sid_string, source, str(evtlog.EventID), str(evtlog.EventType), msg ] yield fields ## Scan to the next record signature loc = buf.find("LfLe", loc + 1)
def parse_attributes(self, mft_buff, check = True, entrysize = 1024): next_attr = self.ResidentAttributes end = mft_buff.find("\xff\xff\xff\xff") if end == -1: end = entrysize attributes = [] dataseen = False while next_attr != None and next_attr.obj_offset <= end: try: attr = ATTRIBUTE_TYPE_ID.get(int(next_attr.Header.Type), None) except struct.error: next_attr = None attr = None continue if attr == None: next_attr = None elif attr == "STANDARD_INFORMATION": if self.obj_vm._config.DEBUGOUT: print "Found $SI" if not check or next_attr.STDInfo.is_valid(): attributes.append((attr, next_attr.STDInfo)) next_off = next_attr.STDInfo.obj_offset + next_attr.ContentSize if next_off == next_attr.STDInfo.obj_offset: next_attr = None continue next_attr = self.advance_one(next_off, mft_buff, end) elif attr == 'FILE_NAME': if self.obj_vm._config.DEBUGOUT: print "Found $FN" self.add_path(next_attr.FileName) if not check or next_attr.FileName.is_valid(): attributes.append((attr, next_attr.FileName)) next_off = next_attr.FileName.obj_offset + next_attr.ContentSize if next_off == next_attr.FileName.obj_offset: next_attr = None continue next_attr = self.advance_one(next_off, mft_buff, end) elif attr == "OBJECT_ID": if self.obj_vm._config.DEBUGOUT: print "Found $ObjectId" if next_attr.Header.NonResidentFlag == 1: attributes.append((attr, "Non-Resident")) next_attr = None continue else: attributes.append((attr, next_attr.ObjectID)) next_off = next_attr.ObjectID.obj_offset + next_attr.ContentSize if next_off == next_attr.ObjectID.obj_offset: next_attr = None continue next_attr = self.advance_one(next_off, mft_buff, end) elif attr == "DATA": if self.obj_vm._config.DEBUGOUT: print "Found $DATA" try: if next_attr.Header and next_attr.Header.NameOffset > 0 and next_attr.Header.NameLength > 0: adsname = "" if next_attr != None and next_attr.Header != None and next_attr.Header.NameOffset and next_attr.Header.NameLength: nameloc = next_attr.obj_offset + next_attr.Header.NameOffset adsname = obj.Object("NullString", vm = self.obj_vm, offset = nameloc, length = next_attr.Header.NameLength * 2) if adsname != None and adsname.strip() != "" and dataseen: attr += " ADS Name: {0}".format(adsname.strip()) dataseen = True except struct.error: next_attr = None continue try: if next_attr.ContentSize == 0: next_off = next_attr.obj_offset + self.obj_vm.profile.get_obj_size("RESIDENT_ATTRIBUTE") next_attr = self.advance_one(next_off, mft_buff, end) attributes.append((attr, "")) continue start = next_attr.obj_offset + next_attr.ContentOffset theend = min(start + next_attr.ContentSize, end) except struct.error: next_attr = None continue if next_attr.Header.NonResidentFlag == 1: thedata = "" else: try: contents = mft_buff[start:theend] except TypeError: next_attr = None continue thedata = contents attributes.append((attr, thedata)) next_off = theend if next_off == start: next_attr = None continue next_attr = self.advance_one(next_off, mft_buff, end) elif attr == "ATTRIBUTE_LIST": if self.obj_vm._config.DEBUGOUT: print "Found $AttributeList" if next_attr.Header.NonResidentFlag == 1: attributes.append((attr, "Non-Resident")) next_attr = None continue next_attr.process_attr_list(self.obj_vm, self, attributes, check) next_attr = None else: next_attr = None return attributes
def ldrmodules(self): """Volatility ldrmodules plugin. @see volatility/plugins/malware/malfind.py """ log.debug("Executing Volatility ldrmodules plugin on {0}".format( self.memdump)) self.__config() results = [] command = self.plugins["ldrmodules"](self.config) for task in command.calculate(): # Build a dictionary for all three PEB lists where the # keys are base address and module objects are the values. inloadorder = dict( (mod.DllBase.v(), mod) for mod in task.get_load_modules()) ininitorder = dict( (mod.DllBase.v(), mod) for mod in task.get_init_modules()) inmemorder = dict( (mod.DllBase.v(), mod) for mod in task.get_mem_modules()) # Build a similar dictionary for the mapped files. mapped_files = {} for vad, address_space in task.get_vads( vad_filter=task._mapped_file_filter): # Note this is a lot faster than acquiring the full # vad region and then checking the first two bytes. if obj.Object("_IMAGE_DOS_HEADER", offset=vad.Start, vm=address_space).e_magic != 0x5A4D: continue mapped_files[int(vad.Start)] = str(vad.FileObject.FileName or "") # For each base address with a mapped file, print info on # the other PEB lists to spot discrepancies. for base in mapped_files.keys(): # Does the base address exist in the PEB DLL lists? load_mod = inloadorder.get(base, None) init_mod = ininitorder.get(base, None) mem_mod = inmemorder.get(base, None) new = { "process_id": int(task.UniqueProcessId), "process_name": str(task.ImageFileName), "dll_base": "{0:#x}".format(base), "dll_in_load": load_mod is not None, "dll_in_init": init_mod is not None, "dll_in_mem": mem_mod is not None, "dll_mapped_path": str(mapped_files[base]), "load_full_dll_name": "", "init_full_dll_name": "", "mem_full_dll_name": "" } if load_mod: new["load_full_dll_name"] = str(load_mod.FullDllName) if init_mod: new["init_full_dll_name"] = str(init_mod.FullDllName) if mem_mod: new["mem_full_dll_name"] = str(mem_mod.FullDllName) results.append(new) return dict(config={}, data=results)
def cast_as(self, cast_type): """Cast the data in a tag as a specific type""" return obj.Object(cast_type, offset=self.RealDataOffset, vm=self.obj_vm)
def calculate(self): addr_space = utils.load_as(self._config) # we currently don't use this on x64 because for some reason the # x64 version actually doesn't create a DisplayVersion value memory_model = addr_space.profile.metadata.get('memory_model') if memory_model == '32bit': regapi = registryapi.RegistryApi(self._config) regapi.reset_current() regapi.set_current(hive_name = "software") x86key = "Microsoft\\Windows\\CurrentVersion\\Uninstall" x64key = "Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall" for subkey in regapi.reg_get_all_subkeys(None, key = x86key): if str(subkey.Name) == "TrueCrypt": subpath = x86key + "\\" + subkey.Name version = regapi.reg_get_value("software", key = subpath, value = "DisplayVersion") if version: yield "Registry Version", "{0} Version {1}".format( str(subkey.Name), version) scanner = TrueCryptPassphrase(self._config) for offset, passphrase in scanner.calculate(): yield "Password", "{0} at offset {1:#x}".format( passphrase, offset) for proc in tasks.pslist(addr_space): if str(proc.ImageFileName).lower() == "truecrypt.exe": yield "Process", "{0} at {1:#x} pid {2}".format( proc.ImageFileName, proc.obj_offset, proc.UniqueProcessId) scanner = svcscan.SvcScan(self._config) for service in scanner.calculate(): name = str(service.ServiceName.dereference()) if name == "truecrypt": yield "Service", "{0} state {1}".format( name, service.State) for mod in modules.lsmod(addr_space): basename = str(mod.BaseDllName or '').lower() fullname = str(mod.FullDllName or '').lower() if (basename.endswith("truecrypt.sys") or fullname.endswith("truecrypt.sys")): yield "Kernel Module", "{0} at {1:#x} - {2:#x}".format( mod.BaseDllName, mod.DllBase, mod.DllBase + mod.SizeOfImage) scanner = filescan.SymLinkScan(self._config) for symlink in scanner.calculate(): object_header = symlink.get_object_header() if "TrueCryptVolume" in str(symlink.LinkTarget or ''): yield "Symbolic Link", "{0} -> {1} mounted {2}".format( str(object_header.NameInfo.Name or ''), str(symlink.LinkTarget or ''), str(symlink.CreationTime or '')) scanner = filescan.FileScan(self._config) for fileobj in scanner.calculate(): filename = str(fileobj.file_name_with_device() or '') if "TrueCryptVolume" in filename: yield "File Object", "{0} at {1:#x}".format( filename, fileobj.obj_offset) scanner = filescan.DriverScan(self._config) for driver in scanner.calculate(): object_header = driver.get_object_header() driverext = driver.DriverExtension drivername = str(driver.DriverName or '') servicekey = str(driverext.ServiceKeyName or '') if (drivername.endswith("truecrypt") or servicekey.endswith("truecrypt")): yield "Driver", "{0} at {1:#x} range {2:#x} - {3:#x}".format( drivername, driver.obj_offset, driver.DriverStart, driver.DriverStart + driver.DriverSize) for device in driver.devices(): header = device.get_object_header() devname = str(header.NameInfo.Name or '') type = devicetree.DEVICE_CODES.get(device.DeviceType.v()) yield "Device", "{0} at {1:#x} type {2}".format( devname or "<HIDDEN>", device.obj_offset, type or "UNKNOWN") if type == "FILE_DEVICE_DISK": data = addr_space.read(device.DeviceExtension, 2000) ## the file-hosted container path. no other fields in ## the struct are character based, so we should not ## hit false positives on this scan. offset = data.find("\\\x00?\x00?\x00\\\x00") if offset == -1: container = "<HIDDEN>" else: container = obj.Object("String", length = 255, offset = device.DeviceExtension + offset, encoding = "utf16", vm = addr_space) yield "Container", "Path: {0}".format(container)
def __init__(self, base, config, **kwargs): ## We must have an AS below us self.as_assert(base, "No base Address Space") addrspace.BaseAddressSpace.__init__(self, base, config, **kwargs) ## This is a tuple of (physical memory offset, file offset, length) self.runs = [] ## A VMware header is found at offset zero of the file self.header = obj.Object("_VMWARE_HEADER", offset=0, vm=base) self.as_assert( self.header.Magic in [0xBED2BED0, 0xBAD1BAD1, 0xBED2BED2, 0xBED3BED3], "Invalid VMware signature: {0:#x}".format(self.header.Magic), ) ## The number of memory regions contained in the file region_count = self.get_tag( self.header, grp_name="memory", tag_name="regionsCount", data_type="unsigned int", ) if not region_count.is_valid() or region_count == 0: ## Create a single run from the main memory region memory_tag = self.get_tag(self.header, grp_name="memory", tag_name="Memory") self.as_assert(memory_tag != None, "Cannot find the single-region Memory tag") self.runs.append( (0, memory_tag.RealDataOffset, memory_tag.DataDiskSize)) else: ## Create multiple runs - one for each region in the header for i in range(region_count): memory_tag = self.get_tag( self.header, grp_name="memory", tag_name="Memory", indices=[0, 0], ) self.as_assert(memory_tag != None, "Cannot find the Memory tag") memory_offset = (self.get_tag( self.header, grp_name="memory", tag_name="regionPPN", indices=[i], data_type="unsigned int", ) * self.PAGE_SIZE) file_offset = (self.get_tag( self.header, grp_name="memory", tag_name="regionPageNum", indices=[i], data_type="unsigned int", ) * self.PAGE_SIZE + memory_tag.RealDataOffset) length = (self.get_tag( self.header, grp_name="memory", tag_name="regionSize", indices=[i], data_type="unsigned int", ) * self.PAGE_SIZE) self.runs.append((memory_offset, file_offset, length)) ## Make sure we found at least one memory run self.as_assert( len(self.runs) > 0, "Cannot find any memory run information")
def _pre_3(self, buf_addr, buf_len): return obj.Object("String", offset = buf_addr, vm = self.addr_space, length = buf_len)
def calculate(self): addr_space = utils.load_as(self._config) if not self.is_valid_profile(addr_space.profile): debug.error("This command does not support the selected profile.") for task in self.filter_tasks(tasks.pslist(addr_space)): task_space = task.get_process_address_space() # We must have a process AS if not task_space: continue winsock = None # Locate the winsock DLL for mod in task.get_load_modules(): if str(mod.BaseDllName or '').lower() == "ws2_32.dll": winsock = mod break if not winsock: continue # Resolve the closesocket API closesocket = winsock.getprocaddress("closesocket") if not closesocket: continue for vad, process_space in task.get_vads( vad_filter=self._zeus_filter, ): if obj.Object("_IMAGE_DOS_HEADER", offset=vad.Start, vm=process_space).e_magic != 0x5A4D: continue data = process_space.zread(vad.Start, vad.Length) scanner = impscan.ImpScan(self._config).call_scan calls = list(scanner(task_space, vad.Start, data)) for (_, iat_loc, call_dest) in calls: if call_dest != closesocket: continue # Read the DWORD directly after closesocket struct_base = obj.Object('Pointer', offset=iat_loc + 4, vm=task_space) # To be valid, it must point within the vad segment if (struct_base < vad.Start or struct_base > (vad.Start + vad.End)): continue # Grab the key data key = task_space.read(struct_base + 0x2a, RC4_KEYSIZE) # Greg's sanity check if len(key) != RC4_KEYSIZE or key[-2:] != "\x00\x00": continue yield task, struct_base, key