def calculate(self): addr_space = utils.load_as(self._config) if not self._config.sys_offset or not self._config.sam_offset: debug.error("Both SYSTEM and SAM offsets must be provided") return hashdumpmod.dump_memory_hashes(addr_space, self._config, self._config.sys_offset, self._config.sam_offset)
def execute(self): """ Executes the plugin command.""" # Check we can support the plugins profs = registry.get_plugin_classes(obj.Profile) if self._config.PROFILE not in profs: debug.error("Invalid profile " + self._config.PROFILE + " selected") if not self.is_valid_profile(profs[self._config.PROFILE]()): debug.error("This command does not support the profile " + self._config.PROFILE) # # Executing plugins is done in two stages - first we calculate data = self.calculate() ## Then we render the result in some way based on the ## requested output mode: function_name = "render_{0}".format(self._config.OUTPUT) if self._config.OUTPUT_FILE: outfd = open(self._config.OUTPUT_FILE, 'w') # TODO: We should probably check that this won't blat over an existing file else: outfd = sys.stdout try: func = getattr(self, function_name) except AttributeError: ## Try to find out what formats are supported result = [] for x in dir(self): if x.startswith("render_"): _a, b = x.split("_", 1) result.append(b) print "Plugin {0} is unable to produce output in format {1}. Supported formats are {2}. Please send a feature request".format(self.__class__.__name__, self._config.OUTPUT, result) return func(outfd, data)
def check_microarch(self, addr, phy_space, key): microarch = hyper.revision_id_db[key] if microarch.lower() == "sandy": vmcs_off = hyper.vmcs_offset_sandy elif microarch.lower() == "core": vmcs_off = hyper.vmcs_offset_core else: debug.error("Microarchitecture %s not supported yet." % microarch) off = vmcs_off["VMCS_LINK_POINTER"] * 4 data = phy_space.read(addr + off, 0x04) vmcs_link_pointer = struct.unpack('<I', data)[0] data2 = phy_space.read(addr + off + 0x04, 0x04) vmcs_link_pointer2 = struct.unpack('<I', data2)[0] if (vmcs_link_pointer == 0xffffffff and vmcs_link_pointer2 == 0xffffffff): size = layouts.vmcs.vmcs_field_size["GUEST_CR3"] / 8 off = vmcs_off["GUEST_CR3"] * 4 data = phy_space.read(addr + off, size) if size == 4: guest_cr3 = struct.unpack('<I', data)[0] elif size == 8: guest_cr3 = struct.unpack('<Q', data)[0] else: debug.error("CR3 size not possible.") if ((guest_cr3 % 4096) == 0) and (guest_cr3 != 0): debug.info("\t|__ VMCS 0x%08x [CONSISTENT]" % addr)
def render_text(self, outfd, data): if (not self._config.DUMP_DIR or not os.path.isdir(self._config.DUMP_DIR)): debug.error("Please specify an existing output dir (--dump-dir)") self.table_header(outfd, [("Task", "10"), ("VM Start", "[addrpad]"), ("VM End", "[addrpad]"), ("Length", "[addr]"), ("Path", "")]) for (task, vma) in data: if not self._config.VMA or vma.vm_start == self._config.VMA: file_name = "task.{0}.{1:#x}.vma".format(task.pid, vma.vm_start) file_path = os.path.join(self._config.DUMP_DIR, file_name) outfile = open(file_path, "wb+") for page in self.read_addr_range(task, vma.vm_start, vma.vm_end): outfile.write(page) outfile.close() self.table_row(outfd, task.pid, vma.vm_start, vma.vm_end, vma.vm_end - vma.vm_start, file_path)
def calculate(self): addr_space = utils.load_as(self._config) if not self.is_valid_profile(addr_space.profile): debug.error("This command does not support the selected profile.") for objct in self.scan_results(addr_space): if isinstance(objct, _UDP_ENDPOINT): # For UdpA, the state is always blank and the remote end is asterisks for ver, laddr, _ in objct.dual_stack_sockets(): yield objct, "UDP" + ver, laddr, objct.Port, "*", "*", "" elif isinstance(objct, _TCP_ENDPOINT): if objct.AddressFamily == AF_INET: proto = "TCPv4" elif objct.AddressFamily == AF_INET6: proto = "TCPv6" yield objct, proto, objct.LocalAddress, objct.LocalPort, \ objct.RemoteAddress, objct.RemotePort, objct.State elif isinstance(objct, _TCP_LISTENER): # For TcpL, the state is always listening and the remote port is zero for ver, laddr, raddr in objct.dual_stack_sockets(): yield objct, "TCP" + ver, laddr, objct.Port, raddr, 0, "LISTENING"
def calculate(self): ## we need this module imported if not has_yara: debug.error("Please install Yara from code.google.com/p/yara-project") ## leveraged from the windows yarascan plugin rules = self._compile_rules() ## set the linux plugin address spaces common.set_plugin_members(self) if self._config.KERNEL: ## http://fxr.watson.org/fxr/source/osfmk/mach/i386/vm_param.h?v=xnu-2050.18.24 if self.addr_space.profile.metadata.get('memory_model', '32bit') == "32bit": if not common.is_64bit_capable(self.addr_space): kernel_start = 0 else: kernel_start = 0xc0000000 else: kernel_start = 0xffffff8000000000 scanner = malfind.DiscontigYaraScanner(rules = rules, address_space = self.addr_space) for hit, address in scanner.scan(start_offset = kernel_start): yield (None, address, hit, scanner.address_space.zread(address, 64)) else: # Scan each process memory block for task in pstasks.mac_tasks(self._config).calculate(): scanner = MapYaraScanner(task = task, rules = rules) for hit, address in scanner.scan(): yield (task, address, hit, scanner.address_space.zread(address, 64))
def render_text(self, outfd, data): """Renders the tasks to disk images, outputting progress as they go""" if self._config.DUMP_DIR == None: debug.error("Please specify a dump directory (--dump-dir)") if not os.path.isdir(self._config.DUMP_DIR): debug.error(self._config.DUMP_DIR + " is not a directory") self.table_header(outfd, [("Process(V)", "[addrpad]"), ("ImageBase", "[addrpad]"), ("Name", "20"), ("Result", "")]) for task in data: task_space = task.get_process_address_space() if task_space == None: result = "Error: Cannot acquire process AS" elif task.Peb == None: # we must use m() here, because any other attempt to # reference task.Peb will try to instantiate the _PEB result = "Error: PEB at {0:#x} is paged".format(task.m('Peb')) elif task_space.vtop(task.Peb.ImageBaseAddress) == None: result = "Error: ImageBaseAddress at {0:#x} is paged".format(task.Peb.ImageBaseAddress) else: dump_file = "executable." + str(task.UniqueProcessId) + ".exe" result = self.dump_pe(task_space, task.Peb.ImageBaseAddress, dump_file) self.table_row(outfd, task.obj_offset, task.Peb.ImageBaseAddress, task.ImageFileName, result)
def render_text(self, outfd, data): for device in data: ext = device.DeviceExtension.dereference_as("EXTENSION") outfd.write("Container: {0}\n".format(ext.wszVolume)) outfd.write("Hidden Volume: {0}\n".format("Yes" if ext.cryptoInfo.hiddenVolume == 1 else "No")) outfd.write("Removable: {0}\n".format("Yes" if ext.bRemovable == 1 else "No")) outfd.write("Read Only: {0}\n".format("Yes" if ext.bReadOnly == 1 else "No")) outfd.write("Disk Length: {0} (bytes)\n".format(ext.DiskLength)) outfd.write("Host Length: {0} (bytes)\n".format(ext.HostLength)) outfd.write("Encryption Algorithm: {0}\n".format(ext.cryptoInfo.ea)) outfd.write("Mode: {0}\n".format(ext.cryptoInfo.mode)) outfd.write("Master Key\n") key = device.obj_vm.read(ext.cryptoInfo.master_keydata.obj_offset, 64) addr = ext.cryptoInfo.master_keydata.obj_offset outfd.write("{0}\n".format("\n".join( ["{0:#010x} {1:<48} {2}".format(addr + o, h, ''.join(c)) for o, h, c in utils.Hexdump(key) ]))) if self._config.DUMP_DIR: if not os.path.isdir(self._config.DUMP_DIR): debug.error("The path {0} is not a valid directory".format(self._config.DUMP_DIR)) name = "{0:#x}_master.key".format(addr) keyfile = os.path.join(self._config.DUMP_DIR, name) with open(keyfile, "wb") as handle: handle.write(key) outfd.write("Dumped {0} bytes to {1}\n".format(len(key), keyfile)) outfd.write("\n")
def calculate(self): addr_space = utils.load_as(self._config) if not self.is_valid_profile(addr_space.profile): debug.error("This command does not support the selected profile.") return win32.network.determine_sockets(addr_space)
def calculate(self): addr_space = utils.load_as(self._config) win7 = addr_space.profile.metadata.get('major', 0) == 6 and addr_space.profile.metadata.get('minor', 0) == 1 if not self._config.HIVE_OFFSET: hive_offsets = [(self.hive_name(h), h.obj_offset) for h in hivelist.HiveList.calculate(self)] else: hive_offsets = [("User Specified", self._config.HIVE_OFFSET)] for name, hoff in set(hive_offsets): h = hivemod.HiveAddressSpace(addr_space, self._config, hoff) root = rawreg.get_root(h) if not root: if self._config.HIVE_OFFSET: debug.error("Unable to find root key. Is the hive offset correct?") else: skey = "software\\microsoft\\windows\\currentversion\\explorer\\userassist\\" if win7: uakey = skey + "{CEBFF5CD-ACE2-4F4F-9178-9926F41749EA}\\Count" yield win7, name, rawreg.open_key(root, uakey.split('\\')) uakey = skey + "{F4E57C4B-2036-45F0-A9AB-443BCFE33D9F}\\Count" yield win7, name, rawreg.open_key(root, uakey.split('\\')) else: uakey = skey + "{75048700-EF1F-11D0-9888-006097DEACF9}\\Count" yield win7, name, rawreg.open_key(root, uakey.split('\\')) uakey = skey + "{5E6AB780-7743-11CF-A12B-00AA004AE837}\\Count" yield win7, name, rawreg.open_key(root, uakey.split('\\'))
def apply_types(addr_space, ver): """Apply the TrueCrypt types for a specific version of TC. @param addr_space: <volatility.BaseAddressSpace> @param ver: <string> version """ mm_model = addr_space.profile.metadata.get('memory_model', '32bit') try: vtypes = TrueCryptMaster.version_map[ver][mm_model] addr_space.profile.vtypes.update(vtypes) addr_space.profile.merge_overlay({ 'EXTENSION' : [ None, { 'wszVolume' : [ None, ['String', dict(length = 260, encoding = "utf16")]], }], 'CRYPTO_INFO_t' : [ None, { 'mode' : [ None, ['Enumeration', dict(target = "long", choices = {1: 'XTS', 2: 'LWR', 3: 'CBC', 4: 'OUTER_CBC', 5: 'INNER_CBC'})]], 'ea' : [ None, ['Enumeration', dict(target = "long", choices = {1: 'AES', 2: 'SERPENT', 3: 'TWOFISH', 4: 'BLOWFISH', 5: 'CAST', 6: 'TRIPLEDES'})]], }]}) addr_space.profile.compile() except KeyError: debug.error("Truecrypt version {0} is not supported".format(ver))
def render(self, outfd, grid): if not self._config.OUTPUT_FILE: debug.error("Please specify a valid output file using --output-file") self._db = sqlite3.connect(self._config.OUTPUT_FILE, isolation_level = None) create = "CREATE TABLE IF NOT EXISTS " + self._plugin_name + "( id INTEGER, " + \ ", ".join(['"' + self._sanitize_name(i.name) + '" ' + self._column_type(i.type) for i in grid.columns]) + ")" self._db.execute(create) def _add_multiple_row(node, accumulator): accumulator[0] = accumulator[0] + 1 #id accumulator[1].append([accumulator[0]] + [str(v) for v in node.values]) if len(accumulator[1]) > 20000: self._db.execute("BEGIN TRANSACTION") insert = "INSERT INTO " + self._plugin_name + " VALUES (?, " + ", ".join(["?"] * len(node.values)) + ")" self._db.executemany(insert, accumulator[1]) accumulator = [accumulator[0], []] self._db.execute("COMMIT TRANSACTION") self._accumulator = accumulator return accumulator grid.populate(_add_multiple_row, self._accumulator) #Insert last nodes if len(self._accumulator[1]) > 0: self._db.execute("BEGIN TRANSACTION") insert = "INSERT INTO " + self._plugin_name + " VALUES (?, " + ", ".join(["?"] * (len(self._accumulator[1][0])-1)) + ")" self._db.executemany(insert, self._accumulator[1]) self._db.execute("COMMIT TRANSACTION")
def search_stack_frames(self, start, stack_base, stack_limit, yara_rules, frame_delta=32, unwind=DEFAULT_UNWIND): """ Use Yara to search kernel/user stack frames within +/- frame_delta of the frame's start address. Frames to search are chosen by using the strategies specifed by the unwind parameter. yara_rules - compiled Yara rules, built for example with: 1. yara.compile("/path/to/yara.rules") or 2. yara.compile(source="rule dummy { condition: true }") """ if not yara_installed: debug.error("In order to search the stack frames, it is necessary to install yara") stack_registry = registry.get_plugin_classes(StackTop) for unwind_strategy_nm in unwind.split(","): if unwind_strategy_nm not in stack_registry: raise ValueError("{0} is not a known stack unwind strategy".format(unwind_strategy_nm)) unwind_strategy = stack_registry[unwind_strategy_nm](start, stack_base, stack_limit, self) for frame in itertools.chain(unwind_strategy.up(), unwind_strategy.down()): search_data = self.get_process_address_space().zread(frame.start - frame_delta, 2* frame_delta) for match in yara_rules.match(data = search_data): for moffset, name, value in match.strings: # Match offset here is converted into frame start address and a +/- frame_delta yield match, name, value, frame.start, moffset-frame_delta raise StopIteration
def render_text(self, outfd, data): if self._config.DUMP_DIR and not os.path.isdir(self._config.DUMP_DIR): debug.error(self._config.DUMP_DIR + " is not a directory") for o, addr, hit, content in data: outfd.write("Rule: {0}\n".format(hit.rule)) # Find out if the hit is from user or kernel mode if o == None: outfd.write("Owner: (Unknown Kernel Memory)\n") filename = "kernel.{0:#x}.dmp".format(addr) elif o.obj_name == "_EPROCESS": outfd.write("Owner: Process {0} Pid {1}\n".format(o.ImageFileName, o.UniqueProcessId)) filename = "process.{0:#x}.{1:#x}.dmp".format(o.obj_offset, addr) else: outfd.write("Owner: {0}\n".format(o.BaseDllName)) filename = "kernel.{0:#x}.{1:#x}.dmp".format(o.obj_offset, addr) # Dump the data if --dump-dir was supplied if self._config.DUMP_DIR: path = os.path.join(self._config.DUMP_DIR, filename) fh = open(path, "wb") fh.write(content) fh.close() outfd.write("".join( ["{0:#010x} {1:<48} {2}\n".format(addr + o, h, ''.join(c)) for o, h, c in utils.Hexdump(content) ]))
def generator(self, data): if self._config.DUMP_DIR and not os.path.isdir(self._config.DUMP_DIR): debug.error(self._config.DUMP_DIR + " is not a directory") for task in data: for vad, address_space in task.get_vads(vad_filter = task._injection_filter): if self._is_vad_empty(vad, address_space): continue content = address_space.zread(vad.Start, 64) yield (0, [str(task.ImageFileName), int(task.UniqueProcessId), Address(vad.Start), str(vad.Tag), str(vadinfo.PROTECT_FLAGS.get(vad.VadFlags.Protection.v(), "")), str(vad.VadFlags), Bytes(content)]) # Dump the data if --dump-dir was supplied if self._config.DUMP_DIR: filename = os.path.join(self._config.DUMP_DIR, "process.{0:#x}.{1:#x}.dmp".format( task.obj_offset, vad.Start)) self.dump_vad(filename, vad, address_space)
def calculate(self): linux_common.set_plugin_members(self) find_file = self._config.FIND inode_addr = self._config.inode outfile = self._config.outfile listfiles = self._config.LISTFILES if listfiles: for (_, _, file_path, file_dentry) in self.walk_sbs(): yield (file_path, file_dentry.d_inode) elif find_file and len(find_file): for (_, _, file_path, file_dentry) in self.walk_sbs(): if file_path == find_file: yield (file_path, file_dentry.d_inode) break elif inode_addr and inode_addr > 0 and outfile and len(outfile) > 0: inode = obj.Object("inode", offset = inode_addr, vm = self.addr_space) f = open(outfile, "wb") for page in self.get_file_contents(inode): f.write(page) f.close() else: debug.error("Incorrect command line parameters given.")
def _compile_rules(self): """Compile the YARA rules from command-line parameters. @returns: a YARA object on which you can call 'match' This function causes the plugin to exit if the YARA rules have syntax errors or are not supplied correctly. """ rules = None try: if self._config.YARA_RULES: s = self._config.YARA_RULES # Don't wrap hex or regex rules in quotes if s[0] not in ("{", "/"): s = '"' + s + '"' # Option for case insensitive searches if self._config.CASE: s += " nocase" # Scan for unicode and ascii strings if self._config.WIDE: s += " wide ascii" rules = yara.compile(sources = { 'n' : 'rule r1 {strings: $a = ' + s + ' condition: $a}' }) elif self._config.YARA_FILE and os.path.isfile(self._config.YARA_FILE): rules = yara.compile(self._config.YARA_FILE) else: debug.error("You must specify a string (-Y) or a rules file (-y)") except yara.SyntaxError, why: debug.error("Cannot compile rules: {0}".format(str(why)))
def calculate(self): addr_space = utils.load_as(self._config) if not self.is_valid_profile(addr_space.profile): debug.error("This plugin only works on XP and 2003") ## When verbose is specified, we recalculate the list of SIDs for ## services in the registry. Otherwise, we take the list from the ## pre-populated dictionary in getservicesids.py if self._config.VERBOSE: ssids = getservicesids.GetServiceSids(self._config).calculate() for sid, service in ssids: self.extrasids[sid] = " (Service: " + service + ")" else: for sid, service in getservicesids.servicesids.items(): self.extrasids[sid] = " (Service: " + service + ")" ## Get the user's SIDs from the registry self.load_user_sids() for proc in tasks.pslist(addr_space): if str(proc.ImageFileName).lower() == "services.exe": for vad, process_space in proc.get_vads(vad_filter = proc._mapped_file_filter): if vad.FileObject.FileName: name = str(vad.FileObject.FileName).lower() if name.endswith(".evt"): ## Maybe check the length is reasonable, though probably there won't ## ever be event logs that are multiple GB or TB in size. data = process_space.zread(vad.Start, vad.Length) yield name, data
def calculate(self): lpi = linux_process_info if self._config.SYMBOL_DIR: self.symbols = self.load_symbols(self._config.SYMBOL_DIR) #print(self.symbols['libc-2.13.so']) if self._config.DUMP_FILE: try: self.dump_file = open(self._config.DUMP_FILE, 'a+') debug.info("Opened {} for writing".format(self._config.DUMP_FILE)) except IOError: debug.error("Failed to open %s for writing".format(self._config.DUMP_FILE)) for p in linux_process_info.linux_process_info.calculate(self): stats['tasks'] += 1 if p: for i, task in enumerate(p.threads): stats['threads'] += 1 #print(i, task.comm.v(), p.thread_registers[i], p.thread_stacks[i], p.thread_stack_ranges) #for reg, value in p.thread_registers[i]._asdict().iteritems(): # print(reg, "{:016x}".format(value)) debug.info("Starting analysis of task: pid {}, thread name {}".format(task.pid, task.comm)) debug.info("=================================================") yield self.analyze_stack(p, task, i) #, self.analyze_registers(p, task, i) else: stats['tasks_ignored'] += 1
def calculate(self): linux_common.set_plugin_members(self) find_file = self._config.FIND inode_addr = self._config.inode outfile = self._config.outfile listfiles = self._config.LISTFILES if listfiles: for (_, _, file_path, file_dentry) in self.walk_sbs(): yield (file_path, file_dentry.d_inode) elif find_file and len(find_file): for (_, _, file_path, file_dentry) in self.walk_sbs(): if file_path == find_file: yield (file_path, file_dentry.d_inode) break elif inode_addr and inode_addr > 0 and outfile and len(outfile) > 0: inode = obj.Object("inode", offset = inode_addr, vm = self.addr_space) try: f = open(outfile, "wb") except IOError, e: debug.error("Unable to open output file (%s): %s" % (outfile, str(e))) for page in self.get_file_contents(inode): f.write(page) f.close()
def generator(self, data): keyfound = False for win7, reg, key in data: if key: keyfound = True for s in self.regapi.reg_get_all_subkeys(None, None, given_root = key): if s.Name == None: item = "Unknown subkey: " + s.Name.reason else: item = s.Name yield (0, [str(reg), str(self.regapi.reg_get_key_path(key)), str(key.LastWriteTime), str(item), "", ""]) for subname, dat in self.regapi.reg_yield_values(None, None, given_root = key, thetype = "REG_BINARY"): dat_raw = dat try: subname = subname.encode('rot_13') except UnicodeDecodeError: pass if win7: guid = subname.split("\\")[0] if guid in folder_guids: subname = subname.replace(guid, folder_guids[guid]) dat = self.parse_data(dat_raw) yield (0, [str(reg), str(self.regapi.reg_get_key_path(key)), str(key.LastWriteTime), "", str(subname), str(dat)]) if not keyfound: debug.error("The requested key could not be found in the hive(s) searched")
def calculate(self): """Begin carving and analysing""" #Check output dir is provided if self._config.DUMP_DIR == None: debug.error("Please specify a dump directory (--dump-dir)") if not os.path.isdir(self._config.DUMP_DIR): debug.error(self._config.DUMP_DIR + " is not a directory") #Carve packets from all mempages self.addr_space = utils.load_as(self._config) for mempage in self.addr_space.get_available_addresses(): self.carve_packets(self.addr_space.zread(mempage[0], mempage[1])) #Analyze the carved/parsed packets packet_stats = self.analyze_packets(self.parsed_packets) #Dump files to dump-dir self.dump_packets_to_pcap(self.hex_packets, os.path.abspath(os.path.join(self._config.DUMP_DIR, 'packets.pcap'))) with open(os.path.abspath(os.path.join(self._config.DUMP_DIR, 'ips.txt')), 'w') as fd: for ip_to_check in packet_stats['unique_public_ips']: fd.write(ip_to_check + '\n') return packet_stats
def render_text(self, outfd, data): """Runs through the text file outputting which string appears where""" addr_space, tasks = data stringlist = open(self._config.STRING_FILE, "r") verbfd = None if self._config.VERBOSE: verbfd = outfd # Before we bother to start parsing the image, check to make sure the strings # are specified correctly parsedStrings = [] for stringLine in stringlist: (offsetString, string) = self.parse_line(stringLine) try: offset = int(offsetString) except ValueError: debug.error("String file format invalid.") parsedStrings.append((offset, string)) reverse_map = self.get_reverse_map(addr_space, tasks, verbfd) for (offset, string) in parsedStrings: if reverse_map.has_key(offset & 0xFFFFF000): outfd.write("{0:08x} [".format(offset)) outfd.write(' '.join(["{0}:{1}".format(pid[0], pid[1] | (offset & 0xFFF)) for pid in reverse_map[offset & 0xFFFFF000][1:]])) outfd.write("] {0}\n".format(string.strip()))
def calculate(self): addr_space = utils.load_as(self._config) tag = self._config.TAG if tag == None: debug.error("You must enter a --tag to find") minsize = self._config.MIN_SIZE maxsize = self._config.MAX_SIZE poolsize = lambda x : x >= minsize and x <= maxsize if self._config.PAGED: paged = True non_paged = False else: paged = False non_paged = True scanner = GenericPoolScan() scanner.checks = [ ('PoolTagCheck', dict(tag = tag)), ('CheckPoolSize', dict(condition = poolsize)), ('CheckPoolType', dict(paged = paged, non_paged = non_paged)), ] for offset in scanner.scan(addr_space): pool = obj.Object("_POOL_HEADER", offset = offset, vm = addr_space) buf = addr_space.zread(offset, minsize) yield pool, buf
def calculate(self): """Calculates the physical to virtual address mapping""" if self._config.STRING_FILE is None or not os.path.exists(self._config.STRING_FILE): debug.error("Strings file not found") addr_space = utils.load_as(self._config) if self._config.OFFSET != None: tasks = [self.virtual_process_from_physical_offset(addr_space, self._config.OFFSET)] elif self._config.SCAN: procs = list(filescan.PSScan(self._config).calculate()) tasks = [] for task in procs: tasks.append(self.virtual_process_from_physical_offset(addr_space, task.obj_offset)) else: tasks = win32.tasks.pslist(addr_space) try: if self._config.PIDS is not None: pidlist = [int(p) for p in self._config.PIDS.split(',')] tasks = [t for t in tasks if int(t.UniqueProcessId) in pidlist] except (ValueError, TypeError): # TODO: We should probably print a non-fatal warning here pass return addr_space, tasks
def get_processes(self, addr_space): """Enumerate processes based on user options. :param addr_space | <addrspace.AbstractVirtualAddressSpace> :returns <list> """ bounce_back = taskmods.DllList.virtual_process_from_physical_offset if self._config.OFFSET != None: tasks = [bounce_back(addr_space, self._config.OFFSET)] elif self._config.SCAN: procs = list(filescan.PSScan(self._config).calculate()) tasks = [] for task in procs: tasks.append(bounce_back(addr_space, task.obj_offset)) else: tasks = win32.tasks.pslist(addr_space) try: if self._config.PID is not None: pidlist = [int(p) for p in self._config.PID.split(",")] tasks = [t for t in tasks if int(t.UniqueProcessId) in pidlist] except (ValueError, TypeError): debug.error("Invalid PID {0}".format(self._config.PID)) return tasks
def calculate(self): addr_space = utils.load_as(self._config) if self._config.REGEX: try: if self._config.IGNORE_CASE: mod_re = re.compile(self._config.REGEX, re.I) else: mod_re = re.compile(self._config.REGEX) except re.error as e: debug.error('Error parsing regular expression: %s' % e) mods = dict((mod.DllBase.v(), mod) for mod in modules.lsmod(addr_space)) # We need the process list to find spaces for some drivers. Enumerate them here # instead of inside the find_space function, so we only have to do it once. procs = list(tasks.pslist(addr_space)) if self._config.BASE: if self._config.BASE in mods: mod_name = mods[self._config.BASE].BaseDllName else: mod_name = "UNKNOWN" yield addr_space, procs, int(self._config.BASE), mod_name else: for mod in list(mods.values()): if self._config.REGEX: if not mod_re.search(str(mod.FullDllName or '')) and not mod_re.search(str(mod.BaseDllName or '')): continue yield addr_space, procs, mod.DllBase.v(), mod.BaseDllName
def calculate(self): common.set_plugin_members(self) if not self.addr_space.profile.obj_has_member("fs_event_watcher", "proc_name"): debug.error("This plugin only supports OS X >= 10.8.2. Please file a bug if you are running against a version matching this criteria.") event_types = ["CREATE_FILE", "DELETE", "STAT_CHANGED", "RENAME", "CONTENT_MODIFIED", "EXCHANGE", "FINDER_INFO_CHANGED", "CREATE_DIR", "CHOWN"] event_types = event_types + ["XATTR_MODIFIED", "XATTR_REMOVED", "DOCID_CREATED", "DOCID_CHANGED"] table_addr = self.addr_space.profile.get_symbol("_watcher_table") arr = obj.Object(theType = "Array", targetType = "Pointer", count = 8, vm = self.addr_space, offset = table_addr) for watcher_addr in arr: if not watcher_addr.is_valid(): continue watcher = watcher_addr.dereference_as("fs_event_watcher") name = self.addr_space.read(watcher.proc_name.obj_offset, 33) if name: idx = name.find("\x00") if idx != -1: name = name[:idx] events = "" event_arr = obj.Object(theType = "Array", targetType = "unsigned char", offset = watcher.event_list.v(), count = 13, vm = self.addr_space) for (i, event) in enumerate(event_arr): if event == 1: events = events + event_types[i] + ", " if len(events) and events[-1] == " " and events[-2] == ",": events = events[:-2] yield watcher_addr, name, watcher.pid, events
def calculate(self): linux_common.set_plugin_members(self) phys_addr_space = utils.load_as(self._config, astype="physical") if phys_addr_space.profile.metadata.get("memory_model", "32bit") == "32bit": fmt = "<I" else: fmt = "<Q" needles = [] for sym in phys_addr_space.profile.get_all_symbol_names("kernel"): if sym.find("_sched_class") != -1: addr = phys_addr_space.profile.get_symbol(sym) needles.append(struct.pack(fmt, addr)) if len(needles) == 0: debug.error("Unable to scan for processes. Please file a bug report.") back_offset = phys_addr_space.profile.get_obj_offset("task_struct", "sched_class") scanner = poolscan.MultiPoolScanner(needles) for _, offset in scanner.scan(phys_addr_space): ptask = obj.Object("task_struct", offset=offset - back_offset, vm=phys_addr_space) if not ptask.exit_state.v() in [0, 16, 32, 16 | 32]: continue if not (0 < ptask.pid < 66000): continue yield ptask
def tz_from_string(_option, _opt_str, value, parser): """Stores a tzinfo object from a string""" if value is not None: if value[0] in ['+', '-']: # Handed a numeric offset, create an OffsetTzInfo valarray = [value[i:i + 2] for i in range(1, len(value), 2)] multipliers = [3600, 60] offset = 0 for i in range(min(len(valarray), len(multipliers))): offset += int(valarray[i]) * multipliers[i] if value[0] == '-': offset = -offset timezone = OffsetTzInfo(offset = offset) else: # Value is a lookup, choose pytz over time.tzset if tz_pytz: try: timezone = pytz.timezone(value) except pytz.UnknownTimeZoneError: debug.error("Unknown display timezone specified") else: if not hasattr(time, 'tzset'): debug.error("This operating system doesn't support tzset, please either specify an offset (eg. +1000) or install pytz") timezone = value parser.values.tz = timezone
def render_text(self, outfd, data): if not has_distorm3: debug.warning("For best results please install distorm3") if self._config.DUMP_DIR and not os.path.isdir(self._config.DUMP_DIR): debug.error(self._config.DUMP_DIR + " is not a directory") for task in data: for vad, address_space in task.get_vads( vad_filter=task._injection_filter): if self._is_vad_empty(vad, address_space): continue content = address_space.zread(vad.Start, 64) outfd.write("Process: {0} Pid: {1} Address: {2:#x}\n".format( task.ImageFileName, task.UniqueProcessId, vad.Start)) outfd.write("Vad Tag: {0} Protection: {1}\n".format( vad.Tag, vadinfo.PROTECT_FLAGS.get(vad.VadFlags.Protection.v(), ""))) outfd.write("Flags: {0}\n".format(str(vad.VadFlags))) outfd.write("\n") # this is for address reporting in the output data_start = vad.Start # all zeros in the first page followed by 558B at the base of # the second page is an indicator of wiped PE headers if content.count(chr(0)) == len(content): if address_space.zread(vad.Start, 0x1000).count(chr(0)) == 0x1000: next_page = address_space.zread(vad.Start + 0x1000, 64) if next_page[0:2] == "\x55\x8B": outfd.write( "**** POSSIBLE WIPED PE HEADER AT BASE *****\n\n" ) content = next_page data_start = vad.Start + 0x1000 outfd.write("{0}\n".format("\n".join([ "{0:#010x} {1:<48} {2}".format(data_start + o, h, ''.join(c)) for o, h, c in utils.Hexdump(content) ]))) outfd.write("\n") outfd.write("\n".join([ "{0:#010x} {1:<16} {2}".format(o, h, i) for o, i, h in Disassemble(content, data_start) ])) # Dump the data if --dump-dir was supplied if self._config.DUMP_DIR: filename = os.path.join( self._config.DUMP_DIR, "process.{0:#x}.{1:#x}.dmp".format( task.obj_offset, vad.Start)) self.dump_vad(filename, vad, address_space) outfd.write("\n\n")
def calculate(self): if not has_distorm: debug.error("You must install distorm3") addr_space = utils.load_as(self._config) all_mods = [] if self._config.OFFSET != None: all_tasks = [ taskmods.DllList.virtual_process_from_physical_offset( addr_space, self._config.OFFSET) ] else: all_tasks = list(tasks.pslist(addr_space)) all_mods = list(modules.lsmod(addr_space)) # Operate in kernel mode if pid is not supplied if not self._config.PID and not self._config.OFFSET: if not self._config.BASE: debug.error("You must specify --BASE") base_address = self._config.BASE size_to_read = self._config.SIZE # Get the size from the module list if its not supplied if not size_to_read: for module in all_mods: if module.DllBase == base_address: size_to_read = module.SizeOfImage break # Alternately, try the size from the PE header if not size_to_read: pefile = obj.Object("_IMAGE_DOS_HEADER", offset=base_address, vm=addr_space) try: nt_header = pefile.get_nt_header() size_to_read = nt_header.OptionalHeader.SizeOfImage except ValueError: pass if not size_to_read: debug.error("You must specify --SIZE") kernel_space = tasks.find_space(addr_space, all_tasks, base_address) if not kernel_space: debug.error("Cannot read supplied address") data = kernel_space.zread(base_address, size_to_read) apis = self.enum_apis(all_mods) addr_space = kernel_space else: # In process mode, we find the process by PID task = None for atask in all_tasks: if self._config.OFFSET or atask.UniqueProcessId == self._config.PID: task = atask break if not task: debug.error("You must supply an active PID") task_space = task.get_process_address_space() if not task_space: debug.error("Cannot acquire process AS") all_mods = list(task.get_load_modules()) # PEB is paged or no DLLs loaded if not all_mods: debug.error("Cannot load DLLs in process AS") # If an address is supplied with a size, try to get # the size from the vad node. If neither are supplied, # assume we should carve the main process executable. if self._config.BASE: base_address = self._config.BASE size_to_read = self._config.SIZE if not size_to_read: for vad in task.VadRoot.traverse(): if base_address >= vad.Start and base_address <= vad.End: size_to_read = vad.Length if not size_to_read: debug.error("You must specify --SIZE") else: # Its OK to blindly take the 0th element because the # executable is always the first module to load. base_address = all_mods[0].DllBase size_to_read = all_mods[0].SizeOfImage data = task_space.zread(base_address, size_to_read) apis = self.enum_apis(all_mods) addr_space = task_space # This is a dictionary of confirmed API calls. calls_imported = dict( (iat, call) for (_, iat, call) in self.call_scan(addr_space, base_address, data) if call in apis) # Scan forward self._vicinity_scan(addr_space, calls_imported, apis, base_address, len(data), forward=True) # Scan reverse self._vicinity_scan(addr_space, calls_imported, apis, base_address, len(data), forward=False) for iat, call in sorted(calls_imported.items()): yield iat, call, apis[call][0], apis[call][1]
class Command(object): """ Base class for each plugin command """ op = "" opts = "" args = "" cmdname = "" # meta_info will be removed meta_info = {} # Make these class variables so they can be modified across every plugin elide_data = True tablesep = " " text_sort_column = None def __init__(self, config, *_args, **_kwargs): """ Constructor uses args as an initializer. It creates an instance of OptionParser, populates the options, and finally parses the command line. Options are stored in the self.opts attribute. """ self._config = config self._formatlist = [] @staticmethod def register_options(config): """Registers options into a config object provided""" config.add_option( "OUTPUT", default='text', cache_invalidator=False, help= "Output in this format (support is module specific, see the Module Output Options below)" ) config.add_option("OUTPUT-FILE", default=None, cache_invalidator=False, help="Write output in this file") config.add_option("VERBOSE", default=0, action='count', cache_invalidator=False, short_option='v', help='Verbose information') config.add_option('CASENUMBER', short_option='N', default='001', action='store', help='Case Number default = 001') config.add_option('COMPUTERNAME', short_option='C', default='NoName', action='store', help='ComputerName default = NoName') @classmethod def help(cls): """ This function returns a string that will be displayed when a user lists available plugins. """ try: return textwrap.dedent(cls.__doc__) except (AttributeError, TypeError): return "" @staticmethod def is_valid_profile(profile): return True def calculate(self): """ This function is responsible for performing all calculations We should not have any output functions (e.g. print) in this function at all. If this function is expected to take a long time to return some data, the function should return a generator. """ def execute(self): """ Executes the plugin command.""" # Check we can support the plugins profs = registry.get_plugin_classes(obj.Profile) # force user to give a profile if a plugin # other than kdbgscan or imageinfo are given: plugin_name = self.__class__.__name__.lower() if plugin_name != "mac_get_profile": if self._config.PROFILE == None: if plugin_name in ["kdbgscan", "imageinfo"]: self._config.update("PROFILE", "WinXPSP2x86") else: debug.error("You must set a profile!") if self._config.PROFILE not in profs: debug.error("Invalid profile " + self._config.PROFILE + " selected") if not self.is_valid_profile(profs[self._config.PROFILE]()): debug.error("This command does not support the profile " + self._config.PROFILE) # # Executing plugins is done in two stages - first we calculate data = self.calculate() ## Then we render the result in some way based on the ## requested output mode: function_name = "render_{0}".format(self._config.OUTPUT) if not self._config.OUTPUT == "sqlite" and self._config.OUTPUT_FILE: out_file = '{0}_{1}.txt'.format( time.strftime('%Y%m%d%H%M%S'), plugin_name ) if self._config.OUTPUT_FILE == '.' else self._config.OUTPUT_FILE if os.path.exists(out_file): debug.error( "File " + out_file + " already exists. Cowardly refusing to overwrite it...") print 'Outputting to: {0}'.format(out_file) outfd = open(out_file, 'wb') else: outfd = sys.stdout try: func = getattr(self, function_name) except AttributeError: ## Try to find out what formats are supported result = [] for x in dir(self): if x.startswith("render_"): _a, b = x.split("_", 1) result.append(b) print "Plugin {0} is unable to produce output in format {1}. Supported formats are {2}. Please send a feature request".format( self.__class__.__name__, self._config.OUTPUT, result) return func(outfd, data) def _formatlookup(self, profile, code): """Code to turn profile specific values into format specifications""" code = code or "" if not code.startswith('['): return code # Strip off the square brackets code = code[1:-1].lower() if code.startswith('addr'): spec = fmtspec.FormatSpec("#10x") if profile.metadata.get('memory_model', '32bit') == '64bit': spec.minwidth += 8 if 'pad' in code: spec.fill = "0" spec.align = spec.align if spec.align else "=" else: # Non-padded addresses will come out as numbers, # so titles should align > spec.align = ">" return spec.to_string() # Something went wrong debug.warning("Unknown table format specification: " + code) return "" def _elide(self, string, length): """Adds three dots in the middle of a string if it is longer than length""" # Only elide data if we've been asked to (which we are by default) if not self.elide_data: return string if length == -1: return string if len(string) < length: return (" " * (length - len(string))) + string elif len(string) == length: return string else: if length < 5: debug.error("Cannot elide a string to length less than 5") even = ((length + 1) % 2) length = (length - 3) / 2 return string[:length + even] + "..." + string[-length:] def format_value(self, value, fmt): """ Formats an individual field using the table formatting codes""" profile = addrspace.BufferAddressSpace(self._config).profile return ("{0:" + self._formatlookup(profile, fmt) + "}").format(value) def table_header(self, outfd, title_format_list=None): """Table header renders the title row of a table This also stores the header types to ensure everything is formatted appropriately. It must be a list of tuples rather than a dict for ordering purposes. """ titles = [] rules = [] self._formatlist = [] profile = addrspace.BufferAddressSpace(self._config).profile for (k, v) in title_format_list: spec = fmtspec.FormatSpec(self._formatlookup(profile, v)) # If spec.minwidth = -1, this field is unbounded length if spec.minwidth != -1: spec.minwidth = max(spec.minwidth, len(k)) # Get the title specification to follow the alignment of the field titlespec = fmtspec.FormatSpec(formtype='s', minwidth=max(spec.minwidth, len(k))) titlespec.align = spec.align if spec.align in "<>^" else "<" # Add this to the titles, rules, and formatspecs lists titles.append(("{0:" + titlespec.to_string() + "}").format(k)) rules.append("-" * titlespec.minwidth) self._formatlist.append(spec) # Write out the titles and line rules if outfd: outfd.write(self.tablesep.join(titles) + "\n") outfd.write(self.tablesep.join(rules) + "\n") def table_row(self, outfd, *args): """Outputs a single row of a table""" reslist = [] if len(args) > len(self._formatlist): debug.error("Too many values for the table") for index in range(len(args)): spec = self._formatlist[index] result = self._elide( ("{0:" + spec.to_string() + "}").format(args[index]), spec.minwidth) reslist.append(result) outfd.write(self.tablesep.join(reslist) + "\n") text_stock_renderers = { Hex: "#x", Address: "#8x", Address64: "#16x", int: "", str: "<", float: ".2", Bytes: "" } def text_cell_renderers(self, columns): """Returns default renderers for the columns listed""" renderlist = [FormatCellRenderer("")] * len(columns) # FIXME: Really, this should be handled by the plugin knowing what type of AS each object comes from # However, as a nasty workaround, we can force all x64 profiles to produce addresses that are 64-bit in length # It does not deal with PAE address spaces, or WoW64 addresses, or anything else weird or wonderful # This will NOT be in volatility 3.0 x64 = False if self._config.PROFILE.endswith("x64"): x64 = True for column in columns: if not isinstance(column, renderers.Column): raise TypeError("Columns must be a list of Column objects") columntype = column.type if not x64 or column.type != Address else Address64 renderlist[column.index] = FormatCellRenderer( self.text_stock_renderers[columntype]) return renderlist def unified_output(self, data): raise NotImplementedError( "Rendering using the unified output format has not been implemented for this plugin." ) def _render(self, outfd, renderer, data): output = self.unified_output(data) if isinstance(output, renderers.TreeGrid): renderer.render(outfd, output) else: raise TypeError("Unified Output must return a TreeGrid object") def render_text(self, outfd, data): self._render( outfd, TextRenderer(self.text_cell_renderers, sort_column=self.text_sort_column, config=self._config), data) def render_greptext(self, outfd, data): try: self._render( outfd, GrepTextRenderer(self.text_cell_renderers, sort_column=self.text_sort_column), data) except NotImplementedError, why: debug.error(why) except TypeError, why: debug.error(why)
def render_html(self, outfd, data): try: self._render(outfd, HTMLRenderer(), data) except NotImplementedError, why: debug.error(why)
def render_json(self, outfd, data): try: self._render(outfd, JSONRenderer(), data) except NotImplementedError, why: debug.error(why)
sort_column=self.text_sort_column), data) except NotImplementedError, why: debug.error(why) except TypeError, why: debug.error(why) def render_csv(self, outfd, data): try: self._render( outfd, CSVTextRenderer(self.text_cell_renderers, sort_column=self.text_sort_column), data) except NotImplementedError, why: debug.error(why) except TypeError, why: debug.error(why) def render_json(self, outfd, data): try: self._render(outfd, JSONRenderer(), data) except NotImplementedError, why: debug.error(why) except TypeError, why: debug.error(why) def render_sqlite(self, outfd, data): try: self._render(outfd, SqliteRenderer(self.__class__.__name__, self._config), data) except NotImplementedError, why:
def calculate(self): linux_common.set_plugin_members(self) if not self._config.BASE: debug.error("No base address specified.") if not self._config.PATH: debug.error("No known-good path specified.") fd = open(self._config.PATH, "rb") known_good = fd.read() fd.close() bufferas = addrspace.BufferAddressSpace(self._config, data=known_good) elf_hdr = obj.Object("elf_hdr", offset=0, vm=bufferas) tasks = linux_pslist.linux_pslist.calculate(self) for task in tasks: proc_as = task.get_process_address_space() for vma in task.get_proc_maps(): if self._config.BASE != vma.vm_start: continue for sym in elf_hdr.symbols(): if sym.st_value == 0 or (sym.st_info & 0xf) != 2: continue symname = elf_hdr.symbol_name(sym) sym_offset = sym.st_value # in the same vma if vma.vm_start < sym.st_value < vma.vm_end: vm_start = vma.vm_start sym_offset = sym_offset - vm_start full_address = sym.st_value else: next_vma = vma.vm_next if next_vma.vm_start < sym.st_value < next_vma.vm_end: vm_start = next_vma.vm_start sym_offset = sym.st_value - vm_start full_address = sym.st_value else: full_address = vma.vm_start + sym.st_value mem_buffer = proc_as.read(vm_start + sym_offset, sym.st_size) if sym.st_value > vma.vm_start: disk_off = sym.st_value - vm_start else: disk_off = sym.st_value disk_buffer = bufferas.read(disk_off, sym.st_size) # bad if mem_buffer != None and disk_buffer != mem_buffer: yield task, symname, full_address elif mem_buffer == None: print "Function %s paged out in memory" % symname
def _fix_sym_table(self, module, sections_info): all_sym_data = "" first_name = False print "walking %d syms to be fixed...." % module.num_symtab if self.addr_space.profile.metadata.get('memory_model', '32bit') == '64bit': sym_type = "elf64_sym" st_value_fmt = "<Q" st_size_fmt = "<Q" else: sym_type = "elf32_sym" st_value_fmt = "<I" st_size_fmt = "<I" val_map = {} name_idx_map = {} syms = obj.Object(theType="Array", targetType=sym_type, count=module.num_symtab, vm=module.obj_vm, offset=module.symtab) for (e, sym) in enumerate(syms): if sym.st_value > 0 and not module.obj_vm.profile.get_symbol_by_address( "kernel", sym.st_value): val_map[sym.st_value.v()] = self._find_sec( sections_info, sym.st_value) for (i, sect) in enumerate(module.get_sections()): name_idx_map[str(sect.sect_name)] = ( i + 1, sect.address) ### account for null segment syms = obj.Object(theType="Array", targetType=sym_type, count=module.num_symtab, vm=module.obj_vm, offset=module.symtab) for sym in syms: # fix absolute addresses st_value_int = sym.st_value.v() if st_value_int > 0 and st_value_int in val_map: secname = val_map[st_value_int] if secname in name_idx_map: sect_addr = name_idx_map[secname][1] # LOOK_HERE st_value_sub = st_value_int - sect_addr st_value_full = st_value_int else: st_value_sub = st_value_int st_value_full = st_value_int st_value = struct.pack(st_value_fmt, st_value_sub) #### fix bindings #### # moved out of the sections part if sym.st_name > 0: first_name = True if first_name: bind = 1 # STB_GLOBAL if sym.st_value == 0: stype = 0 elif module.obj_vm.profile.get_symbol_by_address( "kernel", sym.st_value): stype = 0 # STT_NOTYPE else: secname = val_map[sym.st_value.v()] # a .text. section but not relocations if secname.find(".text") != -1 and secname.find( ".rela") == -1: stype = 2 # STT_FUNC else: stype = 1 # STT_OBJECT else: bind = 0 # STB_LOCAL stype = 3 # STT_SECTION b = (bind << 4) & 0xf0 t = stype & 0xf st_info = (b | t) & 0xff #print "st_info: %x : %x | %x || %d | %x" % (sym.st_value, b, t, st_info, st_info) st_info = struct.pack("B", st_info) #### fix indexes #### if sym.st_value > 0 and sym.st_value.v() in val_map: secname = val_map[sym.st_value.v()] if secname in name_idx_map: st_shndx = name_idx_map[secname][0] st_shndx = struct.pack("<H", st_shndx) elif not secname: st_shndx = struct.pack("<H", sym.st_shndx) else: debug.error("no index for %s" % secname) else: st_shndx = struct.pack("<H", sym.st_shndx) ###### # ones that aren't mangled st_name = struct.pack("<I", sym.st_name) st_other = struct.pack("B", sym.st_other) st_size = struct.pack(st_size_fmt, sym.st_size) if sym_type == "elf64_sym": sec_all = st_name + st_info + st_other + st_shndx + st_value + st_size sec_len = 24 else: sec_all = st_name + st_value + st_size + st_info + st_other + st_shndx sec_len = 16 if len(sec_all) != sec_len: debug.error("Invalid section length: %d" % len(sec_all)) all_sym_data = all_sym_data + sec_all return all_sym_data
def render_text(self, outfd, data): """Output the data""" # Are we dumping the text to files? dump_to_file = self._config.dump_dir != None if dump_to_file and not os.path.isdir(self._config.dump_dir): debug.error('{0} is not a directory'.format(self._config.dump_dir)) # Are we removing nulls? rm_nulls = not self._config.nulls tasks = win32.tasks.pslist(self._addr_space) # Build a dict of the tasks, indexed by pid the_tasks = {} if self._config.pid is None: for t in tasks: the_tasks[int(t.UniqueProcessId)] = t else: for t in tasks: if self._config.pid == t.UniqueProcessId: the_tasks[int(t.UniqueProcessId)] = t break outfd.write('{0} process{1} to check.\n'.format(len(the_tasks), '' if len(the_tasks) == 1 else 'es')) # In case the PID's not found if len(the_tasks) < 1: return counts = {} for winsta, atom_tables in data: for desktop in winsta.desktops(): for wnd, _level in desktop.windows(desktop.DeskInfo.spwnd): if self._config.pid is None or int(wnd.Process.UniqueProcessId) in the_tasks: atom_class = self.translate_atom(winsta, atom_tables, wnd.ClassAtom) if not isinstance(atom_class, volatility.obj.NoneObject) and \ not isinstance(wnd.Process.ImageFileName, volatility.obj.NoneObject): atom_class_name = str(atom_class) context = '{0}\\{1}\\{2}'.format(winsta.dwSessionId, winsta.Name, desktop.Name) if not self._config.experimental_only: # Edit control if atom_class_name.endswith('!Edit'): # or atom_class_name == 'Edit': task = the_tasks[int(wnd.Process.UniqueProcessId)] self.dump_edit(dump_to_file, rm_nulls, atom_class_name, context, outfd, wnd, task) if 'Edit' in counts: counts['Edit'] += 1 else: counts['Edit'] = 1 # Experimental options if self._config.experimental or self._config.experimental_only: # Listbox control if atom_class_name.endswith('!Listbox'): # or atom_class_name.endswith('!ComboLBox')): task = the_tasks[int(wnd.Process.UniqueProcessId)] self.dump_listbox(dump_to_file, atom_class_name, context, outfd, wnd, task) if 'ListBox' in counts: counts['ListBox'] += 1 else: counts['ListBox'] = 1 # Combobox control elif atom_class_name.endswith('!Combobox'): task = the_tasks[int(wnd.Process.UniqueProcessId)] self.dump_combobox(dump_to_file, atom_class_name, context, outfd, wnd, task) if 'ComboBox' in counts: counts['ComboBox'] += 1 else: counts['ComboBox'] = 1 outfd.write('{0}\n'.format('*' * 55)) for k in counts.keys(): outfd.write('{0} {1} {2} found.\n'.format(counts[k], k, 'control' if counts[k] == 1 else 'controls'))
def get_symbol(self, sym_name, nm_type="", module="kernel"): """Gets a symbol out of the profile sym_name -> name of the symbol nm_tyes -> types as defined by 'nm' (man nm for examples) module -> which module to get the symbol from, default is kernel, otherwise can be any name seen in 'lsmod' This fixes a few issues from the old static hash table method: 1) Conflicting symbols can be handled, if a symbol is found to conflict on any profile, then the plugin will need to provide the nm_type to differentiate, otherwise the plugin will be errored out 2) Can handle symbols gathered from modules on disk as well from the static kernel symtable is stored as a hash table of: symtable[module][sym_name] = [(symbol address, symbol type), (symbol addres, symbol type), ...] The function has overly verbose error checking on purpose... """ symtable = self.sys_map ret = None # check if the module is there... if module in symtable: mod = symtable[module] # check if the requested symbol is in the module if sym_name in mod: sym_list = mod[sym_name] # if a symbol has multiple definitions, then the plugin needs to specify the type if len(sym_list) > 1: if nm_type == "": debug.error( "Requested symbol {0:s} in module {1:s} has multiple definitions and no type given\n" .format(sym_name, module)) else: for (addr, stype) in sym_list: if stype == nm_type: ret = addr break if ret == None: debug.error( "Requested symbol {0:s} in module {1:s} could not be found\n" .format(sym_name, module)) else: # get the address of the symbol ret = sym_list[0][0] else: debug.debug( "Requested symbol {0:s} not found in module {1:s}\n". format(sym_name, module)) else: debug.info( "Requested module {0:s} not found in symbol table\n". format(module)) if self.shift_address and ret: ret = ret + self.shift_address return ret
elif inode_addr and inode_addr > 0 and outfile and len(outfile) > 0: inode = obj.Object("inode", offset=inode_addr, vm=self.addr_space) try: f = open(outfile, "wb") except IOError, e: debug.error("Unable to open output file (%s): %s" % (outfile, str(e))) for page in self.get_file_contents(inode): f.write(page) f.close() else: debug.error("Incorrect command line parameters given.") def render_text(self, outfd, data): shown_header = 0 for (file_path, inode) in data: if not shown_header: self.table_header(outfd, [("Inode Number", "16"), ("Inode", "[addr]"), ("File Path", "")]) shown_header = 1 inode_num = inode.i_ino self.table_row(outfd, inode_num, inode, file_path)
def _validate_config(self): if self._config.PID is not None and len( self._config.PID.split(',')) != 1: debug.error("Please enter the process PID")
def calculate(self): addr_space = utils.load_as(self._config) if not has_distorm3: debug.error("Install distorm3 code.google.com/p/distorm/") if not self._config.SKIP_PROCESS: for proc in self.filter_tasks(tasks.pslist(addr_space)): process_name = str(proc.ImageFileName).lower() if (self._config.QUICK and process_name not in self.critical_process): #debug.debug("Skipping non-critical process {0} ({1})".format( # process_name, proc.UniqueProcessId)) continue process_space = proc.get_process_address_space() if not process_space: #debug.debug("Cannot acquire process AS for {0} ({1})".format( # process_name, proc.UniqueProcessId)) continue module_group = ModuleGroup(proc.get_load_modules()) for dll in module_group.mods: if not process_space.is_valid_address(dll.DllBase): continue dll_name = str(dll.BaseDllName or '').lower() if (self._config.QUICK and dll_name not in self.critical_dlls and dll.DllBase != proc.Peb.ImageBaseAddress): #debug.debug("Skipping non-critical dll {0} at {1:#x}".format( # dll_name, dll.DllBase)) continue #debug.debug("Analyzing {0}!{1}".format(process_name, dll_name)) for hook in self.get_hooks(HOOK_MODE_USER, process_space, dll, module_group): if not self._config.NO_WHITELIST: if self.whitelist(hook.hook_mode | hook.hook_type, str(proc.ImageFileName), hook.VictimModule, hook.HookModule, hook.Function): continue yield proc, dll, hook if not self._config.SKIP_KERNEL: process_list = list(tasks.pslist(addr_space)) module_group = ModuleGroup(modules.lsmod(addr_space)) for mod in module_group.mods: #module_name = str(mod.BaseDllName or '') #debug.debug("Analyzing {0}".format(module_name)) kernel_space = tasks.find_space(addr_space, process_list, mod.DllBase) if not kernel_space: #debug.debug("No kernel AS for {0} at {1:#x}".format( # module_name, mod.DllBase)) continue for hook in self.get_hooks(HOOK_MODE_KERNEL, kernel_space, mod, module_group): if not self._config.NO_WHITELIST: if self.whitelist(hook.hook_mode | hook.hook_type, "", hook.VictimModule, hook.HookModule, hook.Function): continue yield None, mod, hook
def profile_unsupported(self, func_name): debug.error( "{0:s}: This profile is currently unsupported by this plugin. Please file a bug report on our issue tracker to have support added." .format(func_name))
def calculate(self): if not has_yara: debug.error("Yara must be installed for this plugin") addr_space = utils.load_as(self._config) os, memory_model = self.is_valid_profile(addr_space.profile) if not os: debug.error("This command does not support the selected profile.") rules = yara.compile(sources=formbook_sig) for task in self.filter_tasks(tasks.pslist(addr_space)): scanner = malfind.VadYaraScanner(task=task, rules=rules) for hit, address in scanner.scan(): vad_base_addr, end = self.get_vad_base(task, address) proc_addr_space = task.get_process_address_space() data = proc_addr_space.zread(vad_base_addr, end - vad_base_addr) config_data = [] try: pe = pefile.PE(data=data) except: continue for pattern in CONFIG_PATTERNS: offset = re.search(pattern, data).start() offset += 6 key1_offset = unpack("=I", data[offset:offset + 4])[0] + offset + 11 key1 = data[key1_offset:key1_offset + (0x14 * 2)] offset += 23 key2_offset = unpack("=I", data[offset:offset + 4])[0] + offset + 11 key2 = data[key2_offset:key2_offset + (0x14 * 2)] offset += 21 config_size = unpack("=I", data[offset:offset + 4])[0] offset += 5 config_offset = unpack("=I", data[offset:offset + 4])[0] + offset + 11 config = data[config_offset:config_offset + (config_size * 2)] offset += 33 url_size = unpack("b", data[offset])[0] for pattern in STRINGS_PATTERNS: offset = re.search(pattern, data).start() offset += 19 strings_size = unpack("=I", data[offset:offset + 4])[0] offset += 5 strings_offset = unpack("=I", data[offset:offset + 4])[0] + offset + 11 strings_data = data[strings_offset:strings_offset + (strings_size * 2)] for pattern in HASHS_PATTERNS: offset = re.search(pattern, data).start() offset += 1 hashs_size = unpack("=I", data[offset:offset + 4])[0] offset += 11 hashs_offset = unpack("=I", data[offset:offset + 4])[0] + offset + 11 hashs_data = data[hashs_offset:hashs_offset + (hashs_size * 2)] config_data.append(self.formbook_decrypt(key1, key2, config, config_size, strings_data, strings_size, url_size, hashs_data, hashs_size)) yield task, vad_base_addr, end, hit, memory_model, config_data break
def calculate(self): """Parse the control structures""" # Check the output folder exists if self._config.DUMP_DIR and not os.path.isdir(self._config.dump_dir): debug.error('{0} is not a directory'.format(self._config.dump_dir)) # Apply the correct vtypes for the profile addr_space = utils.load_as(self._config) addr_space.profile.object_classes.update(Editbox.editbox_classes) self.apply_types(addr_space) # Build a list of tasks tasks = win32.tasks.pslist(addr_space) if self._config.PID: pids = [int(p) for p in self._config.PID.split(',')] the_tasks = [t for t in tasks if t.UniqueProcessId in pids] else: the_tasks = [t for t in tasks] # In case no PIDs found if len(the_tasks) < 1: return # Iterate through all the window objects matching for supported controls mh = messagehooks.MessageHooks(self._config) for winsta, atom_tables in mh.calculate(): for desktop in winsta.desktops(): for wnd, _level in desktop.windows(desktop.DeskInfo.spwnd): if wnd.Process in the_tasks: atom_class = mh.translate_atom(winsta, atom_tables, wnd.ClassAtom) if atom_class: atom_class = str(atom_class) if '!' in atom_class: comctl_class = atom_class.split( '!')[-1].lower() if comctl_class in supported_controls: # Do we need to fake being 32bit for Wow? if wnd.Process.IsWow64 and not self.fake_32bit: meta = addr_space.profile.metadata meta['memory_model'] = '32bit' self.apply_types(addr_space, meta) self.fake_32bit = True elif not wnd.Process.IsWow64 and self.fake_32bit: self.apply_types(addr_space) self.fake_32bit = False context = '{0}\\{1}\\{2}'.format( winsta.dwSessionId, winsta.Name, desktop.Name) task_vm = wnd.Process.get_process_address_space( ) wndextra_offset = wnd.v( ) + addr_space.profile.get_obj_size( 'tagWND') wndextra = obj.Object( 'address', offset=wndextra_offset, vm=task_vm) ctrl = obj.Object( supported_controls[comctl_class], offset=wndextra, vm=task_vm) if self._config.DUMP_DIR: dump_to_file( ctrl, wnd.Process.UniqueProcessId, wnd.Process.ImageFileName, self._config.DUMP_DIR) yield context, atom_class, wnd.Process.UniqueProcessId, \ wnd.Process.ImageFileName, wnd.Process.IsWow64, ctrl
def walk_modules_address_space(self, addr_space): list_mods = [ x[0].obj_offset for x in linux_lsmod.linux_lsmod(self._config).calculate() ] if addr_space.profile.get_symbol("module_addr_min"): min_addr_sym = obj.Object( "unsigned long", offset=addr_space.profile.get_symbol("module_addr_min"), vm=addr_space, ) max_addr_sym = obj.Object( "unsigned long", offset=addr_space.profile.get_symbol("module_addr_max"), vm=addr_space, ) elif addr_space.profile.get_symbol("mod_tree"): skip_size = addr_space.profile.get_obj_size("latch_tree_root") addr = addr_space.profile.get_symbol("mod_tree") ulong_size = addr_space.profile.get_obj_size("unsigned long") min_addr_sym = obj.Object("unsigned long", offset=addr + skip_size, vm=addr_space) max_addr_sym = obj.Object( "unsigned long", offset=addr + skip_size + ulong_size, vm=addr_space, ) else: debug.error( "Unsupport kernel verison. Please file a bug ticket that includes your kernel version and distribution." ) min_addr = min_addr_sym & ~0xFFF max_addr = (max_addr_sym & ~0xFFF) + 0x1000 scan_buf = b"" llen = max_addr - min_addr allfs = b"\xff" * 4096 memory_model = self.addr_space.profile.metadata.get( 'memory_model', '32bit') if memory_model == '32bit': minus_size = 4 else: minus_size = 8 check_bufs = [] replace_bufs = [] check_nums = [ 3000, 2800, 2700, 2500, 2300, 2100, 2000, 1500, 1300, 1200, 1024, 512, 256, 128, 96, 64, 48, 32, 24, ] for num in check_nums: check_bufs.append(b"\x00" * num) replace_bufs.append((b"\xff" * (num - minus_size)) + b"\x00" * minus_size) for page in range(min_addr, max_addr, 4096): to_append = allfs tmp = addr_space.read(page, 4096) if tmp: non_zero = False for t in tmp: if t != b"\x00": non_zero = True break if non_zero: for i in range(len(check_nums)): tmp = tmp.replace(check_bufs[i], replace_bufs[i]) to_append = tmp scan_buf = scan_buf + to_append for cur_addr in re.finditer( rb"(?=(\x00\x00\x00\x00|\x01\x00\x00\x00|\x02\x00\x00\x00))", scan_buf, ): mod_addr = min_addr + cur_addr.start() if mod_addr in list_mods: continue m = obj.Object("module", offset=mod_addr, vm=addr_space) if m.is_valid(): yield m
def calculate(self): if not has_yara: debug.error("Yara must be installed for this plugin") if not has_aplib: debug.error("Aplib must be installed for this plugin") addr_space = utils.load_as(self._config) os, memory_model = self.is_valid_profile(addr_space.profile) if not os: debug.error("This command does not support the selected profile.") rules = yara.compile(sources=ursnif_sig) for task in self.filter_tasks(tasks.pslist(addr_space)): scanner = malfind.VadYaraScanner(task=task, rules=rules) for hit, address in scanner.scan(): vad_base_addr, end = self.get_vad_base(task, address) proc_addr_space = task.get_process_address_space() data = proc_addr_space.zread(vad_base_addr, end - vad_base_addr) config_data = [] # Parse standard Ursnif config_data = self.parse_joinned_data(data) # Parse static configuration type Ursnif if not config_data: p_data = OrderedDict() data = self.pe_magic_check(data) try: pe = pefile.PE(data=data) except: continue imagebase = pe.NT_HEADERS.OPTIONAL_HEADER.ImageBase for pattern in CONFIG_PATTERNS: m = re.search(pattern, data) if m: if pe.FILE_HEADER.Machine in (pefile.MACHINE_TYPE['IMAGE_FILE_MACHINE_IA64'], pefile.MACHINE_TYPE['IMAGE_FILE_MACHINE_AMD64']): c2_num = unpack("b", data[m.start(7) + 19])[0] else: c2_num = unpack("b", data[m.start(6)])[0] if c2_num >= 16: c2_num = 1 for i in range(c2_num): if pe.FILE_HEADER.Machine in (pefile.MACHINE_TYPE['IMAGE_FILE_MACHINE_IA64'], pefile.MACHINE_TYPE['IMAGE_FILE_MACHINE_AMD64']): c2_addr = m.start(4) + unpack("=I", data[m.start(3):m.start(3) + 4])[0] c2_table_offset = unpack("=Q", data[c2_addr + (8 * i):c2_addr + 8 + (8 * i)])[0] - imagebase else: c2_addr = unpack("=I", data[m.start(4):m.start(4) + 4])[0] - imagebase c2_table_offset = unpack("=I", data[c2_addr + (4 * i):c2_addr + 4 + (4 * i)])[0] - imagebase try: c2 = self.decode_data(data, pe, c2_table_offset) except: c2 = "Decode fail" p_data["Server " + str(i)] = c2 if pe.FILE_HEADER.Machine in (pefile.MACHINE_TYPE['IMAGE_FILE_MACHINE_IA64'], pefile.MACHINE_TYPE['IMAGE_FILE_MACHINE_AMD64']): serpent_key_offset = m.start(8) + unpack("=I", data[m.start(7):m.start(7) + 4])[0] else: serpent_key_offset = unpack("=I", data[m.start(8):m.start(8) + 4])[0] - imagebase try: serpent_key = self.decode_data(data, pe, serpent_key_offset) except: serpent_key = "Decode fail" p_data["Serpent key"] = serpent_key for pattern in RSA_PATTERNS: m = re.search(pattern, data) if m: if pe.FILE_HEADER.Machine in (pefile.MACHINE_TYPE['IMAGE_FILE_MACHINE_IA64'], pefile.MACHINE_TYPE['IMAGE_FILE_MACHINE_AMD64']): rsa_key_offset = m.start(2) + unpack("=I", data[m.start(1):m.start(1) + 4])[0] rsa_key = data[rsa_key_offset + 4:rsa_key_offset + 0x44] rsa_mod = data[rsa_key_offset + 0x44:rsa_key_offset + 0x84] else: rsa_key_offset = unpack("=I", data[m.start(1):m.start(1) + 4])[0] - imagebase rsa_key = data[rsa_key_offset:rsa_key_offset + 0x40] mod_offset = unpack("=I", data[m.start(4):m.start(4) + 4])[0] - imagebase rsa_mod = data[mod_offset:mod_offset + 0x40] p_data["RSA key"] = rsa_key.encode("hex") p_data["RSA modulus"] = rsa_mod.encode("hex") config_data.append(p_data) yield task, vad_base_addr, end, hit, memory_model, config_data break
def set_plugin_members(obj_ref): obj_ref.addr_space = utils.load_as(obj_ref._config) if not obj_ref.is_valid_profile(obj_ref.addr_space.profile): debug.error("This command does not support the selected profile.")
def render_text(self, outfd, data): outfd.write("\n\nThread Map Information:\n\n") verbose = self._config.verbose dump_dir = self._config.DUMP_DIR # Check if -D (dump dir) is passed and directory exists if dump_dir and not os.path.isdir(dump_dir): debug.error("'{}' was not found".format(dump_dir)) for proc, thread_info, thread_points_to_proc_image, \ suspicious_thread_in_process in data: # A legitimate process won't be printed unless verbose is passed if not verbose and not suspicious_thread_in_process \ and thread_points_to_proc_image == PROCESS_MESSAGES["Points to Process Image"]: continue proc_pid = proc.UniqueProcessId proc_ppid = proc.InheritedFromUniqueProcessId proc_name = proc.ImageFileName address_space = proc.get_process_address_space() # Skip printing out system process if not verbosed if proc_pid == 4 and not verbose: continue outfd.write("Process: {} PID: {} PPID: {}\n\n".format(proc_name, proc_pid, proc_ppid)) if not thread_points_to_proc_image: outfd.write("** {}\n".format(PROCESS_MESSAGES["No Image File"])) elif thread_points_to_proc_image == PROCESS_MESSAGES["Thread Suspended"]: outfd.write("** {}\n".format(PROCESS_MESSAGES["Thread Suspended"])) if suspicious_thread_in_process: outfd.write("** Found suspicious threads in process\n") outfd.write("\n") for thread in thread_info: thread_id = thread.struct.Cid.UniqueThread.v() # Print threads if they are suspected, verbose is enabled, or none # of the threads point to process's image file if verbose or thread.reason or not thread_points_to_proc_image: # Check if the thread is terminated if thread.struct.Terminated != 0: outfd.write("Thread : {} (IS "\ "Terminated)\n\n".format(thread_id)) else: outfd.write("Thread ID: {} (ACTIVE)\n\n".format(thread_id)) # Print out the thread's findings if there are some if thread.reason: outfd.write("Reason:\n {}\n\n".format('\n'.join(thread.reason))) # Couldn't find a valid location for thread if thread.no_location: outfd.write("Couldn't obtain thread's location in memory, " "Might be unmapped\n") # Print out thread's mapped kernel space information elif thread.mapped_to_kernel: outfd.write("Kernel space info:\n") outfd.write("\tThread Entry Point: {0:#x}\n".format(thread.entry_point)) outfd.write("\tMapped to kernel at: {0:#x}\n".format(thread.module_start_address)) outfd.write("\tModule name: {}\n\n".format(thread.file_object_name)) outfd.write(thread.disassemble(address_space, thread.entry_point)) # Print out thread's mapped vad information elif thread.mapped_to_vad: outfd.write("Vad Info:\n") outfd.write("\tThread Entry Point: {0:#x}\n".format(thread.entry_point)) outfd.write(thread.parse_vad(thread.vad_object)) outfd.write(thread.disassemble(address_space, thread.entry_point)) # Dump the vad data if dump_dir: filename = "Process.{0}.Thread.{1}.entrypoint.{2:#x}.dmp".format(proc_pid, thread_id, thread.entry_point) full_path = os.path.join(dump_dir, filename) self.dump_vad(full_path, thread.vad_object, address_space) # Print out thread's vad information from found JMP/CALL address if thread.jmp_data: for type_of_memory, memory_object, jmp_address in thread.jmp_data: if type_of_memory == "vad": outfd.write("\n\tSuspicious JMP/CALL to: {0:#x}\n".format(jmp_address)) outfd.write(thread.parse_vad(memory_object)) outfd.write(thread.disassemble(address_space, jmp_address)) # Dump suspicious JMP/CALL vad data if dump_dir: filename = "Process.{0}.Thread.{1}.JMP_or_CALL_address.{2:#x}.dmp".format(proc_pid, thread_id, jmp_address) full_path = os.path.join(dump_dir, filename) self.dump_vad(full_path, memory_object, address_space) elif type_of_memory == "kernel": module_name, module_start_address = memory_object outfd.write("\n\tSuspicious JMP/CALL to: {0:#x}\n".format(jmp_address)) outfd.write("\tMapped to kernel at: {0:#x}\n".format(module_start_address)) outfd.write("\tModule name: {}\n\n".format(module_name)) outfd.write(thread.disassemble(address_space, jmp_address)) else: outfd.write("\n\n\tSuspicious JMP/CALL to: {0:#x}\n".format(jmp_address)) outfd.write("\n\t** Couldn't read memory\n") outfd.write("----------------------------------------------------------------------\n\n") outfd.write("----------------------------------------------------------------------\n\n")
def debug_error(msg): debug.error(msg)
def calculate(self): if not has_yara: debug.error("Please install Yara from code.google.com/p/yara-project") addr_space = utils.load_as(self._config) if self._config.YARA_RULES: s = self._config.YARA_RULES # Don't wrap hex or regex rules in quotes if s[0] not in ("{", "/"): s = '"' + s + '"' # Scan for unicode strings if self._config.WIDE: s += "wide" rules = yara.compile(sources = { 'n' : 'rule r1 {strings: $a = ' + s + ' condition: $a}' }) elif self._config.YARA_FILE: rules = yara.compile(self._config.YARA_FILE) else: debug.error("You must specify a string (-Y) or a rules file (-y)") if self._config.KERNEL: # Find KDBG so we know where kernel memory begins. Do not assume # the starting range is 0x80000000 because we may be dealing with # an image with the /3GB boot switch. kdbg = tasks.get_kdbg(addr_space) # FIXME: Addresses should be truncated to 48 bits. Currently # we do that in Pointer.__eq__ but not in Pointer.v(). This prevents # module lookups in yarascan's --kernel mode on x64 from working # properly because win32.tasks.find_module cannot match the truncated # address with non-truncated mod.DllBase.v(). Changing Pointer.v() # could have wide spread effects, so this yarascan issue is fixed # temporarily by giving YaraScan a private version of find_module # that performs the pointer truncation. After fixing this globally, # we can dereference MmSystemRangeStart as a Pointer instead of an # address and then remove the manual bitmask below. start = kdbg.MmSystemRangeStart.dereference_as("address") start = start & 0xffffffffffff # Modules so we can map addresses to owners mods = dict((mod.DllBase & 0xffffffffffff, mod) for mod in modules.lsmod(addr_space)) mod_addrs = sorted(mods.keys()) # There are multiple views (GUI sessions) of kernel memory. # Since we're scanning virtual memory and not physical, # all sessions must be scanned for full coverage. This # really only has a positive effect if the data you're # searching for is in GUI memory. sessions = [] for proc in tasks.pslist(addr_space): sid = proc.SessionId # Skip sessions we've already seen if sid == None or sid in sessions: continue session_space = proc.get_process_address_space() if session_space == None: continue sessions.append(sid) scanner = DiscontigYaraScanner(address_space = session_space, rules = rules) for hit, address in scanner.scan(start_offset = start): module = self.find_module(mods, mod_addrs, address) yield (module, address, hit, session_space.zread(address, 1024)) else: for task in self.filter_tasks(tasks.pslist(addr_space)): scanner = VadYaraScanner(task = task, rules = rules) for hit, address in scanner.scan(): yield (task, address, hit, scanner.address_space.zread(address, 1024))
def runconfig(self): """check and setup configuration options upon initlization""" self.keylist = [] self.protokeylist = [] self.ethkeylist = [] # //<Check if SAVE_PCAP or SAVE_RAW is True > // if any((self.config.SAVE_PCAP, self.config.SAVE_RAW)): # //<Test if SAVE_PCAP or SAVE_RAW options are set: > // # //<Make sure the dump directory is set else generate error and exit > // if self.config.DUMP_DIR == None: debug.error( "Please specify a dump directory (--dump-dir)\nExample: -C out.bal --dump-dir outdir" ) # //<Make sure the output directory is real, if not exit if not os.path.isdir(self.config.DUMP_DIR): debug.error(self.config.DUMP_DIR + " is not a directory") # //<Check if Save Pcap option was set if self.config.SAVE_PCAP != None: # //<Make sure dpkt is installed if not exit if not has_dpkt: debug.error("Install dpkt http://code.google.com/p/dpkt/") # //<If dpkt is install, check filename to make sure it ends with cap > // else: self.pcapfile = self.config.SAVE_PCAP if self.pcapfile[-3:].lower() != "cap": self.pcapfile = self.pcapfile + ".pcap" # //<define output file path and filename plus initial pcw file descriptor > // self.pcapfile = open( os.path.join(self.config.DUMP_DIR, self.pcapfile), 'wb') self.pcw = dpkt.pcap.Writer(self.pcapfile) if self.config.FILTER_PACKET: temp_list = self.config.FILTER_PACKET.replace(" ", "") temp_list = temp_list.split(',') for filtername in temp_list: refval = eval(str(filtername)) protoitem = self.protocols.get(refval) etheritem = self.ethertypes.get(refval) if protoitem: self.protokeylist.append(str(refval)) if etheritem: self.ethkeylist.append(str(refval)) # // Rut Roh # //Filter selection not in ethkeylist or protokeylist if len(self.ethkeylist or self.protokeylist) == 0: estr = "Ethernet Types:\n" pstr = "Protocols:\n" pitems = self.protocols.items() ethItems = self.ethertypes.items() for p in pitems: pstr += '0x%04X' % p[0] + " : " + str(p[1]) + "\n" pstr += "\n" for e in ethItems: estr += '0x%04X' % e[0] + " : " + str(e[1]) + "\n" dbgstr = "Invalid filter type or format. Available options are:\n%s\n\nExample: \"-F 0x0800,0x86DD\"" % ( pstr + estr) debug.error('%s' % dbgstr) else: self.filterset = 1 return True
def __init__(self, renderers_func,config): if not can_parse_xml_files: debug.error("You do not have the required libraries for xml files\nYou have to install lxml") self._config = config self._tree = ElementTree.parse('xml_renderer.xml') self._root = tree.getroot()
def calculate(self): if not has_yara: debug.error("Yara must be installed for this plugin") addr_space = utils.load_as(self._config) os, memory_model = self.is_valid_profile(addr_space.profile) if not os: debug.error("This command does not support the selected profile.") rules = yara.compile(sources=tscookie_sig) for task in self.filter_tasks(tasks.pslist(addr_space)): scanner = malfind.VadYaraScanner(task=task, rules=rules) for hit, address in scanner.scan(): vad_base_addr, end = self.get_vad_base(task, address) proc_addr_space = task.get_process_address_space() data = proc_addr_space.zread(vad_base_addr, end - vad_base_addr) config_data = [] dll_index = data.find(MZ_HEADER) if dll_index: dll_data = data[dll_index:] dll = pefile.PE(data=dll_data) else: continue if "TSCookie" in str(hit): d = 2 else: d = 0 for pattern in CONFIG_PATTERNS: mc = re.search(pattern, dll_data) if mc: try: (config_rva, ) = unpack( "=I", dll_data[mc.start() + d + 1:mc.start() + d + 5]) config_addr = dll.get_physical_by_rva( config_rva - dll.NT_HEADERS.OPTIONAL_HEADER.ImageBase) break except: print("[!] Not found config data.\n") config_size = 0 enc = [] while not ( dll_data[config_addr + config_size] == "\x00" and dll_data[config_addr + config_size + 1] == "\x00" and dll_data[config_addr + config_size + 2] == "\x00"): enc.append(dll_data[config_addr + config_size]) config_size += 1 enc_config_data = "".join(enc) if config_size == 0x8D4: rc4key_length = 4 else: rc4key_length = 0x80 try: enc_config = enc_config_data[rc4key_length:] rc4key = enc_config_data[:rc4key_length] config = self.rc4(enc_config, rc4key) if len(config) > 0: if "TSCookie" in str(hit): config_data.append(self.parse_config(config)) else: config_data.append( self.parse_loader_config(config)) except: print("[!] Not found config data.\n") yield task, vad_base_addr, end, hit, memory_model, config_data break
import re, os, struct from collections import namedtuple import volatility.plugins.registry.lsadump as lsadump import volatility.debug as debug import volatility.cache as cache import volatility.utils as utils import volatility.plugins.common as common import volatility.plugins.registry.registryapi as registryapi import volatility.plugins.filescan as filescan import volatility.plugins.dumpfiles as dumpfiles from volatility.renderers import TreeGrid from volatility.renderers.basic import Bytes try: from DPAPI.Core import * except: debug.error('Please install DPAPIck library: ' + \ 'https://bitbucket.org/jmichel/dpapick') from Crypto.PublicKey import RSA from Crypto.Util.number import bytes_to_long # Ensuring debug messages are only displayed # the user enables Verbose information. VERBOSE = False def debug_error(msg): debug.error(msg) def debug_info(msg): if VERBOSE: debug.info(msg)
def calculate(self): """ This works by walking the IDT table for the entries that Linux uses and verifies that each is a symbol in the kernel """ linux_common.set_plugin_members(self) if self.profile.metadata['arch'] not in ["x64", "x86"]: debug.error( "This plugin is only supported on Intel-based memory captures" ) tblsz = 256 sym_addrs = self.profile.get_all_addresses() # hw handlers + system call check_idxs = list(range(0, 20)) + [128] if self.profile.metadata.get('memory_model', '32bit') == "32bit": if self.profile.has_type("gate_struct"): idt_type = "gate_struct" else: idt_type = "desc_struct" else: if self.profile.has_type("gate_struct64"): idt_type = "gate_struct64" elif self.profile.has_type("gate_struct"): idt_type = "gate_struct" else: idt_type = "idt_desc" # this is written as a list b/c there are supposdly kernels with per-CPU IDTs # but I haven't found one yet... addrs = [self.addr_space.profile.get_symbol("idt_table")] for tableaddr in addrs: table = obj.Object( theType='Array', offset=tableaddr, vm=self.addr_space, targetType=idt_type, count=tblsz, ) for i in check_idxs: ent = table[i] if not ent: continue if hasattr(ent, "Address"): idt_addr = ent.Address else: low = ent.offset_low middle = ent.offset_middle if hasattr(ent, "offset_high"): high = ent.offset_high else: high = 0 idt_addr = (high << 32) | (middle << 16) | low if idt_addr != 0: if not idt_addr in sym_addrs: hooked = 1 sym_name = "HOOKED" else: hooked = 0 sym_name = self.profile.get_symbol_by_address( "kernel", idt_addr ) yield (i, ent, idt_addr, sym_name, hooked)
def calculate(self): if self._config.OUTPUT == "xlsx" and not has_openpyxl: debug.error( "You must install OpenPyxl for xlsx format:\n\thttps://bitbucket.org/ericgazoni/openpyxl/wiki/Home" ) elif self._config.OUTPUT == "xlsx" and not self._config.OUTPUT_FILE: debug.error( "You must specify an output *.xlsx file!\n\t(Example: --output-file=OUTPUT.xlsx)" ) if (self._config.HIVE or self._config.USER) and "Registry" not in self._config.TYPE: debug.error( "You must use --registry in conjuction with -H/--hive and/or -U/--user" ) if self._config.TYPE != None: for t in self._config.TYPE.split(","): if t.strip() not in self.types and t.strip() != "Registry": debug.error( "You have entered an incorrect type: {0}".format(t)) addr_space = utils.load_as(self._config) version = (addr_space.profile.metadata.get('major', 0), addr_space.profile.metadata.get('minor', 0)) pids = {} #dictionary of process IDs/ImageFileName body = False if self._config.OUTPUT == "body": body = True if self._config.MACHINE != "": self._config.update("MACHINE", "{0} ".format(self._config.MACHINE)) detections = False if self._config.OUTPUT == "xlsx" and self._config.HIGHLIGHT != None: detections = True if "ImageDate" in self._config.TYPE: im = imageinfo.ImageInfo(self._config).get_image_time(addr_space) yield self.getoutput("[{0}LIVE RESPONSE]{1} (System time)".format( self._config.MACHINE, "" if body else "|"), im['ImageDatetime'], body=body) if version <= (6, 1) and "IEHistory" in self._config.TYPE: self._config.update("LEAK", True) data = iehistory.IEHistory(self._config).calculate() for process, record in data: ## Extended fields are available for these records if record.obj_name == "_URL_RECORD": line = "[{6}IEHISTORY]{0} {1}->{5}{0} PID: {2}/Cache type \"{3}\" at {4:#x}".format( "" if body else "|", process.ImageFileName, process.UniqueProcessId, record.Signature, record.obj_offset, record.Url, self._config.MACHINE) yield self.getoutput(line, record.LastModified, end=record.LastAccessed, body=body) self._config.remove_option("REDR") self._config.remove_option("LEAK") psx = [] if "Process" in self._config.Type or "TimeDateStamp" in self._config.Type or \ "LoadTime" in self._config.Type or "_CM_KEY_BODY" in self._config.Type: psx = psxview.PsXview(self._config).calculate() for offset, eprocess, ps_sources in psx: pids[eprocess.UniqueProcessId.v()] = eprocess.ImageFileName if "Process" in self._config.TYPE: line = "[{5}PROCESS]{0} {1}{0} PID: {2}/PPID: {3}/POffset: 0x{4:08x}".format( "" if body else "|", eprocess.ImageFileName, eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId, offset, self._config.MACHINE) yield self.getoutput(line, eprocess.CreateTime, end=eprocess.ExitTime, body=body) if not hasattr(eprocess.obj_vm, "vtop"): eprocess = taskmods.DllList( self._config).virtual_process_from_physical_offset( addr_space, eprocess.obj_offset) if eprocess == None: continue else: ps_ad = eprocess.get_process_address_space() if ps_ad == None: continue if version[0] == 5 and "Process" in self._config.TYPE: line = "[{5}PROCESS LastTrimTime]{0} {1}{0} PID: {2}/PPID: {3}/POffset: 0x{4:08x}".format( "" if body else "|", eprocess.ImageFileName, eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId, offset, self._config.MACHINE) yield self.getoutput(line, eprocess.Vm.LastTrimTime, body=body) if eprocess.ObjectTable.HandleTableList and "_CM_KEY_BODY" in self._config.TYPE: for handle in eprocess.ObjectTable.handles(): if not handle.is_valid(): continue name = "" object_type = handle.get_object_type() if object_type == "Key": key_obj = handle.dereference_as("_CM_KEY_BODY") name = key_obj.full_key_name() line = "[{6}Handle (Key)]{0} {1}{0} {2} PID: {3}/PPID: {4}/POffset: 0x{5:08x}".format( "" if body else "|", name, eprocess.ImageFileName, eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId, offset, self._config.MACHINE) yield self.getoutput( line, key_obj.KeyControlBlock.KcbLastWriteTime, body=body) if detections and "Process" in self._config.TYPE: injected = False for vad, address_space in eprocess.get_vads( vad_filter=eprocess._injection_filter): if malfind.Malfind(self._config)._is_vad_empty( vad, address_space): continue line = "PID: {0}/PPID: {1}/POffset: 0x{2:08x}".format( eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId, offset) self.suspicious[line.strip()] = { "reason": "MALFIND", "color": "RED" } self.suspiciouspids[eprocess.UniqueProcessId.v()] = { "reason": "MALFIND", "color": "RED" } injected = True proc_okay = False if not injected and (eprocess.ExitTime.v() > 0 or str(eprocess.ImageFileName).lower() in ["system", "smss.exe", "csrss.exe"]): proc_okay = True elif not injected and ps_sources['psscan'].has_key(offset) and (not ps_sources['pslist'].has_key(offset) or not \ ps_sources['thrdproc'].has_key(offset) or not ps_sources['pspcid'].has_key(offset) or not \ ps_sources['csrss'].has_key(offset) or not ps_sources['session'].has_key(offset) or not \ ps_sources['deskthrd'].has_key(offset)): proc_okay = True if not proc_okay and (not ps_sources['pslist'].has_key(offset) or not ps_sources['psscan'].has_key(offset) or not ps_sources['thrdproc'].has_key(offset) \ or not ps_sources['pspcid'].has_key(offset) or not ps_sources['csrss'].has_key(offset) or not ps_sources['session'].has_key(offset) \ or not ps_sources['deskthrd'].has_key(offset)): if self.suspicious.get(line.strip(), {}).get("reason", None) == None: self.suspicious[line.strip()] = { "reason": "PSXVIEW", "color": "RED" } self.suspiciouspids[eprocess.UniqueProcessId.v()] = { "reason": "PSXVIEW", "color": "RED" } else: self.suspicious[line.strip()] = { "reason": "MALFIND and PSXVIEW", "color": "RED" } self.suspiciouspids[eprocess.UniqueProcessId.v()] = { "reason": "MALFIND and PSXVIEW", "color": "RED" } if eprocess.Peb == None or eprocess.Peb.ImageBaseAddress == None: continue # Get DLL PE timestamps for Wow64 processes (excluding 64-bit ones) if eprocess.IsWow64 and "TimeDateStamp" in self._config.TYPE: for vad, address_space in eprocess.get_vads( vad_filter=eprocess._mapped_file_filter): if vad.FileObject.FileName: name = str(vad.FileObject.FileName).lower() basename = ntpath.basename(name) if not basename.endswith("dll") or basename in [ "wow64cpu.dll", "ntdll.dll", "wow64.dll", "wow64win.dll" ]: continue data = ps_ad.zread(vad.Start, vad.Length) bufferas = addrspace.BufferAddressSpace(self._config, data=data) try: pe_file = obj.Object("_IMAGE_DOS_HEADER", offset=0, vm=bufferas) header = pe_file.get_nt_header() except ValueError, ve: continue line = "[{7}PE HEADER 32-bit (dll)]{0} {4}{0} Process: {1}/PID: {2}/PPID: {3}/Process POffset: 0x{5:08x}/DLL Base: 0x{6:08x}".format( "" if body else "|", eprocess.ImageFileName, eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId, basename, offset, vad.Start, self._config.MACHINE) yield self.getoutput(line, header.FileHeader.TimeDateStamp, body=body) # get DLL PE timestamps mods = dict() if "TimeDateStamp" in self._config.TYPE or "LoadTime" in self._config.TYPE: mods = dict((mod.DllBase.v(), mod) for mod in eprocess.get_load_modules()) for mod in mods.values(): basename = str(mod.BaseDllName or "") if basename == str(eprocess.ImageFileName): line = "[{7}PE HEADER (exe)]{0} {4}{0} Process: {1}/PID: {2}/PPID: {3}/Process POffset: 0x{5:08x}/DLL Base: 0x{6:08x}".format( "" if body else "|", eprocess.ImageFileName, eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId, basename, offset, mod.DllBase.v(), self._config.MACHINE) else: line = "[{7}PE HEADER (dll)]{0} {4}{0} Process: {1}/PID: {2}/PPID: {3}/Process POffset: 0x{5:08x}/DLL Base: 0x{6:08x}".format( "" if body else "|", eprocess.ImageFileName, eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId, basename, offset, mod.DllBase.v(), self._config.MACHINE) if "TimeDateStamp" in self._config.TYPE: yield self.getoutput(line, mod.TimeDateStamp, body=body) line2 = "[{7}PE DEBUG]{0} {4}{0} Process: {1}/PID: {2}/PPID: {3}/Process POffset: 0x{5:08x}/DLL Base: 0x{6:08x}".format( "" if body else "|", eprocess.ImageFileName, eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId, basename, offset, mod.DllBase.v(), self._config.MACHINE) yield self.getoutput( line2, mod.get_debug_directory().TimeDateStamp, body=body) if hasattr(mod, "LoadTime") and "LoadTime" in self._config.TYPE: temp = line.replace( "[{0}PE HEADER ".format(self._config.MACHINE), "[{0}DLL LOADTIME ".format(self._config.MACHINE)) yield self.getoutput(temp, mod.TimeDateStamp, end=mod.LoadTime, body=body) if detections and eprocess.UniqueProcessId.v( ) in self.suspiciouspids.keys(): suspiciousline = "Process: {0}/PID: {1}/PPID: {2}/Process POffset: 0x{3:08x}/DLL Base: 0x{4:08x}".format( eprocess.ImageFileName, eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId, offset, mod.DllBase.v()) self.suspicious[suspiciousline] = { "reason": "Process flagged: " + self.suspiciouspids[ eprocess.UniqueProcessId.v()]["reason"], "color": "BLUE" }
def calculate(self): if self._config.OUTPUT == "xlsx" and not has_openpyxl: debug.error( "You must install OpenPyxl for xlsx format:\n\thttps://bitbucket.org/ericgazoni/openpyxl/wiki/Home" ) elif self._config.OUTPUT == "xlsx" and not self._config.OUTPUT_FILE: debug.error( "You must specify an output *.xlsx file!\n\t(Example: --output-file=OUTPUT.xlsx)" ) if (self._config.HIVE or self._config.USER) and not (self._config.REGISTRY): debug.error( "You must use -R/--registry in conjuction with -H/--hive and/or -U/--user" ) addr_space = utils.load_as(self._config) version = (addr_space.profile.metadata.get('major', 0), addr_space.profile.metadata.get('minor', 0)) pids = {} #dictionary of process IDs/ImageFileName offsets = [] #process offsets im = imageinfo.ImageInfo(self._config).get_image_time(addr_space) body = False if self._config.OUTPUT == "body": body = True if not body: event = "{0}|[END LIVE RESPONSE]\n".format(im['ImageDatetime']) else: event = "0|[END LIVE RESPONSE]|0|---------------|0|0|0|{0}|{0}|{0}|{0}\n".format( im['ImageDatetime'].v()) yield event # Get EPROCESS psscan = filescan.PSScan(self._config).calculate() for eprocess in psscan: if eprocess.obj_offset not in offsets: offsets.append(eprocess.obj_offset) if not body: line = "{0}|{1}|{2}|{3}|{4}|{5}|0x{6:08x}||\n".format( eprocess.CreateTime or '-1', "[PROCESS]", eprocess.ImageFileName, eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId, eprocess.ExitTime or '', eprocess.obj_offset) else: line = "0|[PROCESS] {2}/PID: {3}/PPID: {4}/POffset: 0x{5:08x}|0|---------------|0|0|0|{0}|{1}|{0}|{0}\n".format( eprocess.CreateTime.v(), eprocess.ExitTime.v(), eprocess.ImageFileName, eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId, eprocess.obj_offset) pids[eprocess.UniqueProcessId.v()] = eprocess.ImageFileName yield line # Get Sockets and Evtlogs XP/2k3 only if addr_space.profile.metadata.get('major', 0) == 5: socks = sockets.Sockets(self._config).calculate() #socks = sockscan.SockScan(self._config).calculate() # you can use sockscan instead if you uncomment for sock in socks: la = "{0}:{1}".format(sock.LocalIpAddress, sock.LocalPort) if not body: line = "{0}|[SOCKET]|{1}|{2}|Protocol: {3} ({4})|{5:#010x}|||\n".format( sock.CreateTime, sock.Pid, la, sock.Protocol, protos.protos.get(sock.Protocol.v(), "-"), sock.obj_offset) else: line = "0|[SOCKET] PID: {1}/LocalIP: {2}/Protocol: {3}({4})/POffset: 0x{5:#010x}|0|---------------|0|0|0|{0}|{0}|{0}|{0}\n".format( sock.CreateTime.v(), sock.Pid, la, sock.Protocol, protos.protos.get(sock.Protocol.v(), "-"), sock.obj_offset) yield line stuff = evtlogs.EvtLogs.calculate(self) for name, buf in stuff: for fields in self.parse_evt_info(name, buf, rawtime=True): if not body: line = '{0} |[EVT LOG]|{1}|{2}|{3}|{4}|{5}|{6}|{7}\n'.format( fields[0], fields[1], fields[2], fields[3], fields[4], fields[5], fields[6], fields[7]) else: line = "0|[EVT LOG] {1}/{2}/{3}/{4}/{5}/{6}/{7}|0|---------------|0|0|0|{0}|{0}|{0}|{0}\n".format( fields[0].v(), fields[1], fields[2], fields[3], fields[4], fields[5], fields[6], fields[7]) yield line else: # Vista+ nets = netscan.Netscan(self._config).calculate() for net_object, proto, laddr, lport, raddr, rport, state in nets: conn = "{0}:{1} -> {2}:{3}".format(laddr, lport, raddr, rport) if not body: line = "{0}|[NETWORK CONNECTION]|{1}|{2}|{3}|{4}|{5:<#10x}||\n".format( str(net_object.CreateTime or "-1"), net_object.Owner.UniqueProcessId, conn, proto, state, net_object.obj_offset) else: line = "0|[NETWORK CONNECTION] {1}/{2}/{3}/{4}/{5:<#10x}|0|---------------|0|0|0|{0}|{0}|{0}|{0}\n".format( net_object.CreateTime.v(), net_object.Owner.UniqueProcessId, conn, proto, state, net_object.obj_offset) yield line # Get threads threads = modscan.ThrdScan(self._config).calculate() for thread in threads: image = pids.get(thread.Cid.UniqueProcess.v(), "UNKNOWN") if not body: line = "{0}|[THREAD]|{1}|{2}|{3}|{4}|||\n".format( thread.CreateTime or '-1', image, thread.Cid.UniqueProcess, thread.Cid.UniqueThread, thread.ExitTime or '', ) else: line = "0|[THREAD] {2}/PID: {3}/TID: {4}|0|---------------|0|0|0|{0}|{1}|{0}|{0}\n".format( thread.CreateTime.v(), thread.ExitTime.v(), image, thread.Cid.UniqueProcess, thread.Cid.UniqueThread, ) yield line # now we get to the PE part. All PE's are dumped in case you want to inspect them later data = moddump.ModDump(self._config).calculate() for addr_space, procs, mod_base, mod_name in data: space = tasks.find_space(addr_space, procs, mod_base) if space != None: try: header = procdump.ProcExeDump(self._config).get_nt_header( space, mod_base) except ValueError, ve: continue try: if not body: line = "{0}|[PE Timestamp (module)]|{1}||{2:#010x}|||||\n".format( header.FileHeader.TimeDateStamp or '-1', mod_name, mod_base) else: line = "0|[PE Timestamp (module)] {1}/Base: {2:#010x}|0|---------------|0|0|0|{0}|{0}|{0}|{0}\n".format( header.FileHeader.TimeDateStamp.v(), mod_name, mod_base) except ValueError, ve: if not body: line = "-1|[PE Timestamp (module)]|{0}||{1}|||||\n".format( mod_name, mod_base) else: line = "0|[PE Timestamp (module)] {0}/Base: {1:#010x}|0|---------------|0|0|0|0|0|0|0\n".format( mod_name, mod_base) yield line
def calculate(self): if (self._config.HIVE or self._config.USER) and "Registry" not in self._config.TYPE: debug.error( "You must use --registry in conjuction with -H/--hive and/or -U/--user" ) if self._config.TYPE != None: for t in self._config.TYPE.split(","): if t.strip() not in self.types and t.strip() != "Registry": debug.error( "You have entered an incorrect type: {0}".format(t)) addr_space = utils.load_as(self._config) version = ( addr_space.profile.metadata.get('major', 0), addr_space.profile.metadata.get('minor', 0), ) pids = {} # dictionary of process IDs/ImageFileName body = False if self._config.OUTPUT == "body": body = True if self._config.MACHINE != "": self._config.update("MACHINE", "{0} ".format(self._config.MACHINE)) if "ImageDate" in self._config.TYPE: im = imageinfo.ImageInfo(self._config).get_image_time(addr_space) yield self.getoutput( "[{0}LIVE RESPONSE]{1} (System time){1}".format( self._config.MACHINE, "" if body else "|"), im['ImageDatetime'], body=body, ) if version <= (6, 1) and "IEHistory" in self._config.TYPE: self._config.update("LEAK", True) data = iehistory.IEHistory(self._config).calculate() for process, record in data: ## Extended fields are available for these records if record.obj_name == "_URL_RECORD": line = "[{6}IEHISTORY]{0} {1}->{5}{0} PID: {2}/Cache type \"{3}\" at {4:#x}".format( "" if body else "|", process.ImageFileName, process.UniqueProcessId, record.Signature, record.obj_offset, record.Url, self._config.MACHINE, ) yield self.getoutput( line, record.LastModified, end=record.LastAccessed, body=body, ) self._config.remove_option("REDR") self._config.remove_option("LEAK") psx = [] if ("Process" in self._config.Type or "TimeDateStamp" in self._config.Type or "LoadTime" in self._config.Type or "_CM_KEY_BODY" in self._config.Type): psx = psxview.PsXview(self._config).calculate() for offset, eprocess, ps_sources in psx: pids[eprocess.UniqueProcessId.v()] = eprocess.ImageFileName if "Process" in self._config.TYPE: line = "[{5}PROCESS]{0} {1}{0} PID: {2}/PPID: {3}/POffset: 0x{4:08x}".format( "" if body else "|", eprocess.ImageFileName, eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId, offset, self._config.MACHINE, ) yield self.getoutput(line, eprocess.CreateTime, end=eprocess.ExitTime, body=body) if not hasattr(eprocess.obj_vm, "vtop"): eprocess = taskmods.DllList( self._config).virtual_process_from_physical_offset( addr_space, eprocess.obj_offset) if eprocess == None: continue else: ps_ad = eprocess.get_process_address_space() if ps_ad == None: continue if version[0] == 5 and "Process" in self._config.TYPE: line = "[{5}PROCESS LastTrimTime]{0} {1}{0} PID: {2}/PPID: {3}/POffset: 0x{4:08x}".format( "" if body else "|", eprocess.ImageFileName, eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId, offset, self._config.MACHINE, ) yield self.getoutput(line, eprocess.Vm.LastTrimTime, body=body) if (eprocess.ObjectTable.HandleTableList and "_CM_KEY_BODY" in self._config.TYPE): for handle in eprocess.ObjectTable.handles(): if not handle.is_valid(): continue name = "" object_type = handle.get_object_type() if object_type == "Key": key_obj = handle.dereference_as("_CM_KEY_BODY") name = key_obj.full_key_name() line = "[{6}Handle (Key)]{0} {1}{0} {2} PID: {3}/PPID: {4}/POffset: 0x{5:08x}".format( "" if body else "|", name, eprocess.ImageFileName, eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId, offset, self._config.MACHINE, ) yield self.getoutput( line, key_obj.KeyControlBlock.KcbLastWriteTime, body=body, ) if eprocess.Peb == None or eprocess.Peb.ImageBaseAddress == None: continue # Get DLL PE timestamps for Wow64 processes (excluding 64-bit ones) if eprocess.IsWow64 and "TimeDateStamp" in self._config.TYPE: for vad, address_space in eprocess.get_vads( vad_filter=eprocess._mapped_file_filter): if vad.FileObject.FileName: name = str(vad.FileObject.FileName).lower() basename = ntpath.basename(name) if not basename.endswith("dll") or basename in [ "wow64cpu.dll", "ntdll.dll", "wow64.dll", "wow64win.dll", ]: continue data = ps_ad.zread(vad.Start, vad.Length) bufferas = addrspace.BufferAddressSpace(self._config, data=data) try: pe_file = obj.Object("_IMAGE_DOS_HEADER", offset=0, vm=bufferas) header = pe_file.get_nt_header() except ValueError as ve: continue line = "[{7}PE HEADER 32-bit (dll)]{0} {4}{0} Process: {1}/PID: {2}/PPID: {3}/Process POffset: 0x{5:08x}/DLL Base: 0x{6:08x}".format( "" if body else "|", eprocess.ImageFileName, eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId, basename, offset, vad.Start, self._config.MACHINE, ) yield self.getoutput(line, header.FileHeader.TimeDateStamp, body=body) # get DLL PE timestamps mods = dict() if ("TimeDateStamp" in self._config.TYPE or "LoadTime" in self._config.TYPE): mods = dict((mod.DllBase.v(), mod) for mod in eprocess.get_load_modules()) for mod in list(mods.values()): basename = str(mod.BaseDllName or "") if basename == str(eprocess.ImageFileName): line = "[{7}PE HEADER (exe)]{0} {4}{0} Process: {1}/PID: {2}/PPID: {3}/Process POffset: 0x{5:08x}/DLL Base: 0x{6:08x}".format( "" if body else "|", eprocess.ImageFileName, eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId, basename, offset, mod.DllBase.v(), self._config.MACHINE, ) else: line = "[{7}PE HEADER (dll)]{0} {4}{0} Process: {1}/PID: {2}/PPID: {3}/Process POffset: 0x{5:08x}/DLL Base: 0x{6:08x}".format( "" if body else "|", eprocess.ImageFileName, eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId, basename, offset, mod.DllBase.v(), self._config.MACHINE, ) if "TimeDateStamp" in self._config.TYPE: yield self.getoutput(line, mod.TimeDateStamp, body=body) line2 = "[{7}PE DEBUG]{0} {4}{0} Process: {1}/PID: {2}/PPID: {3}/Process POffset: 0x{5:08x}/DLL Base: 0x{6:08x}".format( "" if body else "|", eprocess.ImageFileName, eprocess.UniqueProcessId, eprocess.InheritedFromUniqueProcessId, basename, offset, mod.DllBase.v(), self._config.MACHINE, ) yield self.getoutput( line2, mod.get_debug_directory().TimeDateStamp, body=body, ) if (hasattr(mod, "LoadTime") and "LoadTime" in self._config.TYPE): temp = line.replace( "[{0}PE HEADER ".format(self._config.MACHINE), "[{0}DLL LOADTIME ".format(self._config.MACHINE), ) yield self.getoutput(temp, mod.LoadTime, body=body) # Get Sockets and Evtlogs XP/2k3 only if version[0] == 5: # socks = sockets.Sockets(self._config).calculate() socks = [] if "Socket" in self._config.TYPE: socks = sockscan.SockScan(self._config).calculate( ) # you can use sockscan instead if you uncomment for sock in socks: la = "{0}:{1}".format(sock.LocalIpAddress, sock.LocalPort) line = "[{6}SOCKET]{0} LocalIP: {2}/Protocol: {3}({4}){0} PID: {1}/POffset: 0x{5:#010x}".format( "" if body else "|", sock.Pid, la, sock.Protocol, protos.protos.get(sock.Protocol.v(), "-"), sock.obj_offset, self._config.MACHINE, ) yield self.getoutput(line, sock.CreateTime, body=body) stuff = [] if "EvtLog" in self._config.TYPE: evt = evtlogs.EvtLogs(self._config) stuff = evt.calculate() for name, buf in stuff: for fields in evt.parse_evt_info(name, buf, rawtime=True): line = "[{8}EVT LOG]{0} {1}{0} {2}/{3}/{4}/{5}/{6}/{7}".format( "" if body else "|", fields[1], fields[2], fields[3], fields[4], fields[5], fields[6], fields[7], self._config.MACHINE, ) yield self.getoutput(line, fields[0], body=body) elif version <= (6, 1): # Vista+ nets = [] if "Socket" in self._config.TYPE: nets = netscan.Netscan(self._config).calculate() for net_object, proto, laddr, lport, raddr, rport, state in nets: if net_object.CreateTime.v() == 0: continue if raddr == "*" and rport == "*": conn = "{0}:{1}".format(laddr, lport) socket_type = "SOCKET" else: conn = "{0}:{1} -> {2}:{3}".format(laddr, lport, raddr, rport) socket_TYPE = "CONNECTION" line = ( "[{6}NETWORK {7}]{0} {2}{0} {1}/{3}/{4}/{5:<#10x}".format( "" if body else "|", net_object.Owner.UniqueProcessId, conn, proto, state, net_object.obj_offset, self._config.MACHINE, socket_type, )) yield self.getoutput(line, net_object.CreateTime, body=body) # Get threads threads = [] if "Thread" in self._config.TYPE: threads = modscan.ThrdScan(self._config).calculate() for thread in threads: image = pids.get(thread.Cid.UniqueProcess.v(), "UNKNOWN") line = "[{4}THREAD]{0} {1}{0} PID: {2}/TID: {3}".format( "" if body else "|", image, thread.Cid.UniqueProcess, thread.Cid.UniqueThread, self._config.MACHINE, ) yield self.getoutput(line, thread.CreateTime, end=thread.ExitTime, body=body) data = [] if "Symlink" in self._config.TYPE: data = filescan.SymLinkScan(self._config).calculate() for link in data: objct = link.get_object_header() line = "[{6}SYMLINK]{0} {1}->{2}{0} POffset: {3}/Ptr: {4}/Hnd: {5}".format( "" if body else "|", str(objct.NameInfo.Name or ''), str(link.LinkTarget or ''), link.obj_offset, objct.PointerCount, objct.HandleCount, self._config.MACHINE, ) yield self.getoutput(line, link.CreationTime, body=body) data = [] if "TimeDateStamp" in self._config.TYPE: data = moddump.ModDump(self._config).calculate() for aspace, procs, mod_base, mod_name in data: mod_name = str(mod_name or '') space = tasks.find_space(aspace, procs, mod_base) if space != None: try: pe_file = obj.Object("_IMAGE_DOS_HEADER", offset=mod_base, vm=space) header = pe_file.get_nt_header() except ValueError as ve: continue line = ( "[{3}PE HEADER (module)]{0} {1}{0} Base: {2:#010x}".format( "" if body else "|", mod_name, mod_base, self._config.MACHINE, )) yield self.getoutput(line, header.FileHeader.TimeDateStamp, body=body) uastuff = [] if "Userassist" in self._config.TYPE: uastuff = userassist.UserAssist(self._config).calculate() for win7, reg, key in uastuff: ts = "{0}".format(key.LastWriteTime) for v in rawreg.values(key): tp, dat = rawreg.value_data(v) subname = v.Name if tp == 'REG_BINARY': dat_raw = dat try: subname = codecs.encode(subname, 'rot_13') except UnicodeDecodeError: pass if win7: guid = subname.split("\\")[0] if guid in userassist.folder_guids: subname = subname.replace( guid, userassist.folder_guids[guid]) bufferas = addrspace.BufferAddressSpace(self._config, data=dat_raw) uadata = obj.Object("_VOLUSER_ASSIST_TYPES", offset=0, vm=bufferas) ID = "N/A" count = "N/A" fc = "N/A" tf = "N/A" lw = "N/A" if (len(dat_raw) < bufferas.profile.get_obj_size( '_VOLUSER_ASSIST_TYPES') or uadata == None): continue else: if hasattr(uadata, "ID"): ID = "{0}".format(uadata.ID) if hasattr(uadata, "Count"): count = "{0}".format(uadata.Count) else: count = "{0}".format( uadata.CountStartingAtFive if uadata.CountStartingAtFive < 5 else uadata.CountStartingAtFive - 5) if hasattr(uadata, "FocusCount"): seconds = (uadata.FocusTime + 500) / 1000.0 time = (datetime.timedelta(seconds=seconds) if seconds > 0 else uadata.FocusTime) fc = "{0}".format(uadata.FocusCount) tf = "{0}".format(time) lw = "{0}".format(uadata.LastUpdated) subname = subname.replace("|", "%7c") line = "[{7}USER ASSIST]{0} {2}{0} Registry: {1}/ID: {3}/Count: {4}/FocusCount: {5}/TimeFocused: {6}".format( "" if body else "|", reg, subname, ID, count, fc, tf, self._config.MACHINE, ) yield self.getoutput(line, uadata.LastUpdated, body=body) shimdata = [] if "Shimcache" in self._config.TYPE: shimdata = shimcache.ShimCache(self._config).calculate() for path, lm, lu in shimdata: line = "[{2}SHIMCACHE]{0} {1}{0} ".format("" if body else "|", path, self._config.MACHINE) if lu: yield self.getoutput(line, lm, end=lu, body=body) else: yield self.getoutput(line, lm, body=body) if ("_HBASE_BLOCK" in self._config.TYPE or "_CMHIVE" in self._config.TYPE or "Registry" in self._config.TYPE): regapi = registryapi.RegistryApi(self._config) for o in regapi.all_offsets: if "_HBASE_BLOCK" in self._config.TYPE: line = "[{2}_HBASE_BLOCK TimeStamp]{0} {1}{0} ".format( "" if body else "|", regapi.all_offsets[o], self._config.MACHINE, ) h = obj.Object("_HHIVE", o, addr_space) yield self.getoutput(line, h.BaseBlock.TimeStamp, body=body) if ("_CMHIVE" in self._config.TYPE and version[0] == 6 and addr_space.profile.metadata.get('build', 0) >= 7601): line = ( line) = "[{2}_CMHIVE LastWriteTime]{0} {1}{0} ".format( "" if body else "|", regapi.all_offsets[o], self._config.MACHINE, ) cmhive = obj.Object("_CMHIVE", o, addr_space) yield self.getoutput(line, cmhive.LastWriteTime, body=body) if "Registry" in self._config.TYPE: regapi.reset_current() regdata = regapi.reg_get_all_keys(self._config.HIVE, self._config.USER, reg=True, rawtime=True) for lwtime, reg, item in regdata: item = item.replace("|", "%7c") line = "[{3}REGISTRY]{0} {2}{0} Registry: {1}".format( "" if body else "|", reg, item, self._config.MACHINE) yield self.getoutput(line, lwtime, body=body) if "Timer" in self._config.TYPE: volmagic = obj.VolMagic(addr_space) KUSER_SHARED_DATA = obj.Object( "_KUSER_SHARED_DATA", offset=volmagic.KUSER_SHARED_DATA.v(), vm=addr_space, ) interrupt = (KUSER_SHARED_DATA.InterruptTime.High1Time << 32) | KUSER_SHARED_DATA.InterruptTime.LowPart now = KUSER_SHARED_DATA.SystemTime.as_windows_timestamp() data = timers.Timers(self._config).calculate() for timer, module in data: signaled = "-" if timer.Header.SignalState.v(): signaled = "Yes" module_name = "UNKNOWN" if module: module_name = str(module.BaseDllName or '') try: # human readable time taken from http://computer.forensikblog.de/en/2011/10/timers-and-times.html bufferas = addrspace.BufferAddressSpace( self._config, data=struct.pack( '<Q', timer.DueTime.QuadPart - interrupt + now), ) due_time = obj.Object("WinTimeStamp", is_utc=True, offset=0, vm=bufferas) except TypeError: due_time = 0 line = "[{6}TIMER]{0} {1}{0} Signaled: {2}/Routine: 0x{3:x}/Period(ms): {4}/Offset: 0x{5:x}".format( "" if body else "|", module_name, signaled, timer.Dpc.DeferredRoutine, timer.Period, timer.obj_offset, self._config.MACHINE, ) yield self.getoutput(line, due_time, body=body)