def calculate(self): if self._config.MACHINE != "": self._config.update("MACHINE", f"{self._config.MACHINE} ") offsets = [] address_space = utils.load_as(self._config, astype='physical') if self._config.OFFSET != None: items = [int(o, 16) for o in self._config.OFFSET.split(',')] for offset in items: mft_buff = address_space.read(offset, self._config.ENTRYSIZE) bufferas = addrspace.BufferAddressSpace(self._config, data=mft_buff) mft_entry = obj.Object('MFT_FILE_RECORD', vm=bufferas, offset=0) offsets.append((offset, mft_entry, mft_buff)) else: scanner = poolscan.MultiPoolScanner(needles=[b'FILE', b'BAAD']) print( "Scanning for MFT entries and building directory, this can take a while" ) seen = [] for _, offset in scanner.scan(address_space): mft_buff = address_space.read(offset, self._config.ENTRYSIZE) bufferas = addrspace.BufferAddressSpace(self._config, data=mft_buff) name = "" try: mft_entry = obj.Object('MFT_FILE_RECORD', vm=bufferas, offset=0) temp = mft_entry.advance_one( mft_entry.ResidentAttributes.STDInfo.obj_offset + mft_entry.ResidentAttributes.ContentSize, mft_buff, self._config.ENTRYSIZE, ) if temp == None: continue mft_entry.add_path(temp.FileName) name = temp.FileName.get_name() except struct.error: if self._config.DEBUGOUT: print(f"Problem entry at offset: {hex(offset)}") continue if (int(mft_entry.RecordNumber), name) in seen: continue else: seen.append((int(mft_entry.RecordNumber), name)) offsets.append((offset, mft_entry, mft_buff)) for offset, mft_entry, mft_buff in offsets: if self._config.DEBUGOUT: print(f"Processing MFT Entry at offset: {hex(offset)}") attributes = mft_entry.parse_attributes(mft_buff, not self._config.NOCHECK, self._config.ENTRYSIZE) yield offset, mft_entry, attributes
def calculate(self): address_space = utils.load_as(self._config, astype='physical') scanner = INDXScanner(needles=['INDX\(']) # Carve INDX Headers for offset in scanner.scan(address_space): indx_entries = [] indx_header_buff = address_space.zread(offset, 0x40) bufferas = addrspace.BufferAddressSpace(self._config, data=indx_header_buff) indx_header = obj.Object('INDX_HEADER', vm=bufferas, offset=0) # Check the headers if indx_header.isValid: indx_buff = address_space.zread(offset + 40, 4096) indx_bufferas = addrspace.BufferAddressSpace(self._config, data=indx_buff) o = 0 # Iterate through the entries while o < indx_header.AllocatedSizeOfEntries: indx_entry_header = obj.Object('INDX_ENTRY_HEADER', vm=indx_bufferas, offset=o) # Check the entry headers if indx_entry_header.isValid: if len(indx_bufferas. data[o + 16:o + 16 + indx_entry_header.StreamLength] ) == indx_entry_header.StreamLength: indx_entry = obj.Object('FILE_NAME', vm=indx_bufferas, offset=o + 16) if indx_entry.is_valid(): null_date = datetime.datetime( 1990, 1, 1, 0, 0, 0).replace(tzinfo=timefmt.UTC()) future_date = datetime.datetime( 2025, 1, 1, 0, 0, 0).replace(tzinfo=timefmt.UTC()) if null_date <= indx_entry.FileAccessedTime.as_datetime() <= future_date and \ null_date <= indx_entry.ModifiedTime.as_datetime() <= future_date and \ null_date <= indx_entry.MFTAlteredTime.as_datetime() <= future_date and \ null_date <= indx_entry.CreationTime.as_datetime() <= future_date: indx_entries.append( [indx_entry_header, indx_entry]) o = o + indx_entry_header.EntryLength else: o += 1 yield offset, indx_header, indx_entries
def parse_key(self, regapi, reg, thekey, given_root = None): items = {} # a dictionary of shellbag objects indexed by value name for value, data in regapi.reg_yield_values(None, thekey, thetype = 'REG_BINARY', given_root = given_root): if data == None or thekey.find("S-") != -1 or str(value).startswith("LastKnownState") or thekey.lower().find("cmi-create") != -1: continue if str(value).startswith("ItemPos"): items[str(value)] = [] bufferas = addrspace.BufferAddressSpace(self._config, data = data) i = 0x18 while i < len(data) - 0x10: item = obj.Object("ITEMPOS", offset = i, vm = bufferas) if item != None and item.Size >= 0x15: items[str(value)].append(item) i += item.Size + 0x8 elif str(value).lower().startswith("mrulistex"): list = {} bufferas = addrspace.BufferAddressSpace(self._config, data = data) i = 0 while i < len(data) - 4: list[obj.Object("int", offset = i, vm = bufferas).v()] = (i / 4) i += 4 items["MruListEx"] = list elif len(data) >= 0x10: bufferas = addrspace.BufferAddressSpace(self._config, data = data) item = obj.Object("SHELLITEM", offset = 0, vm = bufferas) thetype = SHELL_ITEM_TYPES.get(int(item.Type), None) if thetype != None: if thetype == "UNKNOWN_00" and len(data) == bufferas.profile.get_obj_size("_VOLUSER_ASSIST_TYPES"): # this is UserAssist Data item = obj.Object("_VOLUSER_ASSIST_TYPES", offset = 0, vm = bufferas) try: value = value.encode('rot_13') except UnicodeDecodeError: pass else: if bufferas.profile.get_obj_size(thetype) > len(data): continue item = obj.Object(thetype, offset = 0, vm = bufferas) if hasattr(item, "DataSize") and item.DataSize <= 0: continue if thetype in self.supported: temp = "" if hasattr(item, "Attributes"): temp = str(item.Attributes.UnicodeFilename) elif hasattr(item, "Name"): temp = str(item.Name) self.paths[reg + ":" + thekey + ":" + str(value)] = temp items[str(value)] = [] items[str(value)].append(item) return items
def calculate(self): address_space = utils.load_as(self._config, astype = 'physical') if not self.is_valid_profile(address_space.profile): debug.error("This command does not support the selected profile.") scanner = PrefetchScanner(needles = ['SCCA']) pf_files = [] print "Scanning for Prefetch files, this can take a while............." for offset in scanner.scan(address_space): pf_buff = address_space.read(offset-4, 256) bufferas = addrspace.BufferAddressSpace(self._config, data = pf_buff) pf_header = obj.Object('PF_HEADER', vm = bufferas, offset = 0) if pf_header.Version != 23 and pf_header.Version != 17: continue if pf_header.Version2 != 15 and pf_header.Version2 != 17: continue if pf_header.NtosBoot != 0 and pf_header.NtosBoot != 1: continue if pf_header.Length < 1 or pf_header.Length > 99999999: continue if not ('%X' % pf_header.Hash).isalnum(): continue if pf_header.LastExecTime == 0: continue if pf_header.TimesExecuted > 99999999: continue pf_files.append((offset, pf_header)) return pf_files
def calculate(self): addr_space = utils.load_as(self._config) regapi = registryapi.RegistryApi(self._config) regapi.reset_current() version = (addr_space.profile.metadata.get('major', 0), addr_space.profile.metadata.get('minor', 0)) for value, data_raw in regapi.reg_yield_values('security', 'Policy\\PolAdtEv', thetype = 'REG_NONE'): bufferas = addrspace.BufferAddressSpace(self._config, data = data_raw) if version <= (5, 1): ap = obj.Object("AuditPolDataXP", offset = 0, vm = bufferas) elif version <= (6, 0): ap = obj.Object("AuditPolDataVista", offset = 0, vm = bufferas) elif version == (6, 1): ap = obj.Object("AuditPolData7", offset = 0, vm = bufferas) elif version == (6, 2) or version == (6, 3): ap = obj.Object("AuditPolData8", offset = 0, vm = bufferas) else: ap = obj.Object("AuditPolData10", offset = 0, vm = bufferas) if ap == None: debug.error("No AuditPol data found") yield data_raw, ap
def __init__(self, window_size=8): self.buffer = addrspace.BufferAddressSpace(conf.DummyConfig(), data='\x00' * 1024) self.window_size = window_size self.constraints = [] self.error_count = 0
def calculate(self): """Determines the address space""" profilelist = [ p.__name__ for p in registry.get_plugin_classes(obj.Profile).values() ] proflens = {} maxlen = 0 origprofile = self._config.PROFILE for p in profilelist: self._config.update('PROFILE', p) buf = addrspace.BufferAddressSpace(self._config) if buf.profile.metadata.get('os', 'unknown') == 'windows': proflens[p] = str(obj.VolMagic(buf).KDBGHeader) maxlen = max(maxlen, len(proflens[p])) self._config.update('PROFILE', origprofile) scanner = KDBGScanner(needles=proflens.values()) aspace = utils.load_as(self._config, astype='any') for offset in scanner.scan(aspace): val = aspace.read(offset, maxlen + 0x10) for l in proflens: if val.find(proflens[l]) >= 0: kdbg = obj.Object("_KDDEBUGGER_DATA64", offset=offset, vm=aspace) yield l, kdbg
def parse_data(self, dat_raw): bufferas = addrspace.BufferAddressSpace(self._config, data=dat_raw) uadata = obj.Object("_VOLUSER_ASSIST_TYPES", offset=0, vm=bufferas) if len(dat_raw) < bufferas.profile.get_obj_size( '_VOLUSER_ASSIST_TYPES') or uadata == None: return None output = "" if hasattr(uadata, "ID"): output = "\n{0:15} {1}".format("ID:", uadata.ID) if hasattr(uadata, "Count"): output += "\n{0:15} {1}".format("Count:", uadata.Count) else: output += "\n{0:15} {1}".format( "Count:", uadata.CountStartingAtFive if uadata.CountStartingAtFive < 5 else uadata.CountStartingAtFive - 5) if hasattr(uadata, "FocusCount"): seconds = (uadata.FocusTime + 500) / 1000.0 time = datetime.timedelta( seconds=seconds) if seconds > 0 else uadata.FocusTime output += "\n{0:15} {1}\n{2:15} {3}".format( "Focus Count:", uadata.FocusCount, "Time Focused:", time) output += "\n{0:15} {1}\n".format("Last updated:", uadata.LastUpdated) return output
def get_sid_string(self, data): """Take a buffer of data from the event record and parse it as a SID. @param data: buffer of data from SidOffset of the event record to SidOffset + SidLength. @returns: sid string """ sid_name = "" bufferas = addrspace.BufferAddressSpace(self._config, data = data) sid = obj.Object("_SID", offset = 0, vm = bufferas) for i in sid.IdentifierAuthority.Value: id_auth = i sid_string = "S-" + "-".join(str(i) for i in (sid.Revision, id_auth) + tuple(sid.SubAuthority)) if sid_string in getsids.well_known_sids: sid_name = " ({0})".format(getsids.well_known_sids[sid_string]) else: sid_name_re = getsids.find_sid_re(sid_string, getsids.well_known_sid_re) if sid_name_re: sid_name = " ({0})".format(sid_name_re) else: sid_name = self.extrasids.get(sid_string, "") sid_string += sid_name return sid_string
def time_object(self): nsecs = self.time_as_integer # Build a timestamp object from the integer time_val = struct.pack("<I", nsecs) time_buf = addrspace.BufferAddressSpace(self.obj_vm.get_config(), data = time_val) time_obj = obj.Object("UnixTimeStamp", offset = 0, vm = time_buf, is_utc = True) return time_obj
def parse_data_dict(self, dat_raw): item = {} item["ID"] = -1 item["focus"] = -1 item["time"] = "N/A" bufferas = addrspace.BufferAddressSpace(self._config, data=dat_raw) uadata = obj.Object("_VOLUSER_ASSIST_TYPES", offset=0, vm=bufferas) if len(dat_raw) < bufferas.profile.get_obj_size( '_VOLUSER_ASSIST_TYPES') or uadata == None: return None if hasattr(uadata, "ID"): item["ID"] = int(uadata.ID) if hasattr(uadata, "Count"): item["count"] = int(uadata.Count) else: item["count"] = int( uadata.CountStartingAtFive if uadata.CountStartingAtFive < 5 else uadata.CountStartingAtFive - 5) if hasattr(uadata, "FocusCount"): seconds = (uadata.FocusTime + 500) / 1000.0 time = datetime.timedelta( seconds=seconds) if seconds > 0 else uadata.FocusTime item["focus"] = int(uadata.FocusCount) item["time"] = str(time) item["lastupdate"] = str(uadata.LastUpdated) return item
def get_full(self, full): bufferas = addrspace.BufferAddressSpace( self.obj_vm._config, data="\x00\x00\x00\x00\x00\x00\x00\x00") nulltime = obj.Object("WinTimeStamp", vm=bufferas, offset=0, is_utc=True) try: modified = str(self.ModifiedTime) except struct.error: modified = nulltime try: mftaltered = str(self.MFTAlteredTime) except struct.error: mftaltered = nulltime try: creation = str(self.CreationTime) except struct.error: creation = nulltime try: accessed = str(self.FileAccessedTime) except struct.error: accessed = nulltime try: return "{0:20} {1:30} {2:30} {3:30} {4}".format( creation, modified, mftaltered, accessed, self.remove_unprintable(full)) except struct.error: return None
def table_header(self, outfd, title_format_list = None): """Table header renders the title row of a table This also stores the header types to ensure everything is formatted appropriately. It must be a list of tuples rather than a dict for ordering purposes. """ titles = [] rules = [] self._formatlist = [] profile = addrspace.BufferAddressSpace(self._config).profile for (k, v) in title_format_list: spec = fmtspec.FormatSpec(self._formatlookup(profile, v)) # If spec.minwidth = -1, this field is unbounded length if spec.minwidth != -1: spec.minwidth = max(spec.minwidth, len(k)) # Get the title specification to follow the alignment of the field titlespec = fmtspec.FormatSpec(formtype = 's', minwidth = max(spec.minwidth, len(k))) titlespec.align = spec.align if spec.align in "<>^" else "<" # Add this to the titles, rules, and formatspecs lists titles.append(("{0:" + titlespec.to_string() + "}").format(k)) rules.append("-" * titlespec.minwidth) self._formatlist.append(spec) # Write out the titles and line rules if outfd: outfd.write(self.tablesep.join(titles) + "\n") outfd.write(self.tablesep.join(rules) + "\n")
def calculate(self): addr_space = utils.load_as(self._config) regapi = registryapi.RegistryApi(self._config) regapi.reset_current() currentcs = regapi.reg_get_currentcontrolset() if currentcs == None: currentcs = "ControlSet001" version = (addr_space.profile.metadata.get('major', 0), addr_space.profile.metadata.get('minor', 0)) xp = False if version <= (5, 1): key = currentcs + '\\' + "Control\\Session Manager\\AppCompatibility" xp = True else: key = currentcs + '\\' + "Control\\Session Manager\\AppCompatCache" data_raw = regapi.reg_get_value('system', key, "AppCompatCache") if data_raw == None or len(data_raw) < 0x1c: debug.error("No ShimCache data found") bufferas = addrspace.BufferAddressSpace(self._config, data = data_raw) shimdata = obj.Object("ShimRecords", offset = 0, vm = bufferas) if shimdata == None: debug.error("No ShimCache data found") for e in shimdata.Entries: if xp: path = str(''.join([str(c) for c in e.Path])) yield self.remove_unprintable(path), e.LastModified, e.LastUpdate else: yield self.remove_unprintable(bufferas.read(int(e.PathOffset), int(e.Length))), e.LastModified, None
def calculate(self): addr_space = utils.load_as(self._config) self.regapi = registryapi.RegistryApi(self._config) result = {} if not self._config.HIVE_OFFSET: self.regapi.set_current("SYSTEM") else: name = obj.Object("_CMHIVE", vm = addr_space, offset = self._config.HIVE_OFFSET).get_name() self.regapi.all_offsets[self._config.HIVE_OFFSET] = name self.regapi.current_offsets[self._config.HIVE_OFFSET] = name self.regapi.reset_current() currentcs = self.regapi.reg_get_currentcontrolset() if currentcs == None: currentcs = "ControlSet001" shutdownkey = currentcs + "\\Control\\Windows" key = self.regapi.reg_get_key("system", shutdownkey) value = self.regapi.reg_get_value("system", shutdownkey, "ShutdownTime", given_root = key) result["key"] = key result["hive"] = "SYSTEM" result["valuename"] = "ShutdownTime" result["value"] = value result["timestamp"] = "" if value != None: try: bufferas = addrspace.BufferAddressSpace(self._config, data = value) result["timestamp"] = obj.Object("WinTimeStamp", vm = bufferas, offset = 0, is_utc = True) except (struct.error, TypeError): pass yield result
def __str__(self): bufferas = addrspace.BufferAddressSpace( self.obj_vm._config, data=b"\x00\x00\x00\x00\x00\x00\x00\x00") nulltime = obj.Object("WinTimeStamp", vm=bufferas, offset=0, is_utc=True) try: modified = str(self.ModifiedTime) except struct.error: modified = nulltime try: mftaltered = str(self.MFTAlteredTime) except struct.error: mftaltered = nulltime try: creation = str(self.CreationTime) except struct.error: creation = nulltime try: accessed = str(self.FileAccessedTime) except struct.error: accessed = nulltime return f"{creation:20} {modified:30} {mftaltered:30} {accessed:30} {self.remove_unprintable(self.get_name())}"
def get_entries(addr_space, regapi): regapi.reset_current() currentcs = regapi.reg_get_currentcontrolset() if currentcs == None: currentcs = "ControlSet001" version = (addr_space.profile.metadata.get('major', 0), addr_space.profile.metadata.get('minor', 0)) xp = False if version <= (5, 1): key = currentcs + "\\Control\\Session Manager\\AppCompatibility" xp = True else: key = currentcs + "\\Control\\Session Manager\\AppCompatCache" data_raw = regapi.reg_get_value('system', key, "AppCompatCache") if data_raw == None or len(data_raw) < 0x1c: debug.warning("No ShimCache data found") raise StopIteration bufferas = addrspace.BufferAddressSpace(addr_space.get_config(), data = data_raw) shimdata = obj.Object("ShimRecords", offset = 0, vm = bufferas) if shimdata == None: debug.warning("No ShimCache data found") raise StopIteration for e in shimdata.Entries: if xp: yield e.Path, e.LastModified, e.LastUpdate else: yield ShimCache.remove_unprintable(bufferas.read(int(e.PathOffset), int(e.Length))), e.LastModified, None
def _section_chunks(self, sec_name): """Get the win32k.sys section as an array of 32-bit unsigned longs. @param sec_name: name of the PE section in win32k.sys to search for. @returns all chunks on a 4-byte boundary. """ dos_header = obj.Object("_IMAGE_DOS_HEADER", offset=self.Win32KBase, vm=self.obj_vm) if dos_header: try: nt_header = dos_header.get_nt_header() sections = [ sec for sec in nt_header.get_sections() if str(sec.Name) == sec_name ] # There should be exactly one section if sections: desired_section = sections[0] return obj.Object("Array", targetType="unsigned long", offset=desired_section.VirtualAddress + dos_header.obj_offset, count=desired_section.Misc.VirtualSize / 4, vm=self.obj_vm) except ValueError: ## This catches PE header parsing exceptions pass ## Don't try to read an address that doesn't exist if not self.Win32KBase: return [] ## In the rare case when win32k.sys PE header is paged or corrupted ## thus preventing us from parsing the sections, use the fallback ## mechanism of just reading 5 MB (max size of win32k.sys) from the ## base of the kernel module. data = self.obj_vm.zread(self.Win32KBase, 0x500000) ## Fill a Buffer AS with the zread data and set its base to win32k.sys ## so we can still instantiate an Array and have each chunk at the ## correct offset in virtual memory. buffer_as = addrspace.BufferAddressSpace(conf.ConfObject(), data=data, base_offset=self.Win32KBase) return obj.Object("Array", targetType="unsigned long", offset=self.Win32KBase, count=len(data) / 4, vm=buffer_as)
def get_time(self): if not hasattr(self, "base_calendartime"): return "N/A" data = struct.pack("<I", self.base_calendartime) bufferas = addrspace.BufferAddressSpace(self.obj_vm.get_config(), data = data) dt = obj.Object("UnixTimeStamp", offset = 0, vm = bufferas, is_utc = True) return dt
def as_timestamp(self): time_val = struct.pack("<I", self.tv_sec) time_buf = addrspace.BufferAddressSpace(self.obj_vm.get_config(), data=time_val) time_obj = obj.Object("UnixTimeStamp", offset=0, vm=time_buf, is_utc=True) return time_obj
def render_text(self, outfd, data): """Render the plugin's default text output""" for task, vad, params in data: # Get a magic object from the buffer buffer_space = addrspace.BufferAddressSpace( config=self._config, data=params['decoded_magic']) magic_obj = obj.Object(self.magic_struct, offset=0, vm=buffer_space) outfd.write("*" * 50 + "\n") outfd.write("{0:<30} : {1}\n".format("Process", task.ImageFileName)) outfd.write("{0:<30} : {1}\n".format("Pid", task.UniqueProcessId)) outfd.write("{0:<30} : {1}\n".format("Address", vad.Start)) # grab the URLs from the decoded buffer decoded_config = params['decoded_config'] urls = [] while "http" in decoded_config: url = decoded_config[decoded_config.find("http"):] urls.append(url[:url.find('\x00')]) decoded_config = url[url.find('\x00'):] for i, url in enumerate(urls): outfd.write("{0:<30} : {1}\n".format("URL {0}".format(i), url)) outfd.write("{0:<30} : {1}\n".format( "Identifier", ''.join([chr(c) for c in magic_obj.guid if c != 0]))) outfd.write("{0:<30} : {1}\n".format("Mutant key", magic_obj.guid_xor_key)) outfd.write("{0:<30} : {1}\n".format("XOR key", magic_obj.xorkey)) outfd.write("{0:<30} : {1}\n".format( "Registry", "HKEY_CURRENT_USER\\SOFTWARE\\Microsoft\\{0}".format( magic_obj.keyname))) outfd.write("{0:<30} : {1}\n".format(" Value 1", magic_obj.value1)) outfd.write("{0:<30} : {1}\n".format(" Value 2", magic_obj.value2)) outfd.write("{0:<30} : {1}\n".format(" Value 3", magic_obj.value3)) outfd.write("{0:<30} : {1}\n".format("Executable", magic_obj.exefile)) outfd.write("{0:<30} : {1}\n".format("Data file", magic_obj.datfile)) outfd.write("{0:<30} : \n{1}\n".format( "Config RC4 key", "\n".join([ "{0:#010x} {1:<48} {2}".format(vad.Start + o, h, ''.join(c)) for o, h, c in utils.Hexdump(params['config_key']) ]))) self.render_extra(outfd, task, vad, params)
def calculate(self): address_space = utils.load_as(self._config, astype='physical') scanner = RCRDScanner(needles=['RCRD\(']) # Carve RCRD Headers for offset in scanner.scan(address_space): lsn_records = [] lsn_page_header_buff = address_space.zread(offset, 0x28) lsn_page_bufferas = addrspace.BufferAddressSpace( self._config, data=lsn_page_header_buff) rcrd_header = obj.Object('RCRD_HEADER', vm=lsn_page_bufferas, offset=0) if rcrd_header.isValid: lsn_page_buff = address_space.zread(offset, 4096) o = 0x28 # skip the RCRD header while o < (4096 - 0x58): # Read in the data containing that might contain possible LSN record lsn_header_buff = lsn_page_buff[o:o + 0x58] lsn_header_bufferas = addrspace.BufferAddressSpace( self._config, data=lsn_header_buff) # Create the LSN record header object lsn_record_header = obj.Object('LSN_RECORD_HEADER', vm=lsn_header_bufferas, offset=0) # Check if it's valid if lsn_record_header != None and \ lsn_record_header.isValid: # read in the LSN record data lsn_record_buff = lsn_page_buff[ o + 0x58:o + 0x58 + lsn_record_header.RedoLength + lsn_record_header.UndoLength] lsn_record_bufferas = addrspace.BufferAddressSpace( self._config, data=lsn_record_buff) # yield the results yield offset, o, lsn_record_header, lsn_record_buff, lsn_record_bufferas # Increase the offset within the page buffer o = o + 0x58 + lsn_record_header.RedoLength + lsn_record_header.UndoLength else: o += 8
def calculate(self): """Determines the address space""" profilelist = [ p.__name__ for p in registry.get_plugin_classes(obj.Profile).values() ] encrypted_kdbg_profiles = [] proflens = {} maxlen = 0 origprofile = self._config.PROFILE for p in profilelist: self._config.update('PROFILE', p) buf = addrspace.BufferAddressSpace(self._config) if buf.profile.metadata.get('os', 'unknown') == 'windows': proflens[p] = str(obj.VolMagic(buf).KDBGHeader) maxlen = max(maxlen, len(proflens[p])) if (buf.profile.metadata.get('memory_model', '64bit') == '64bit' and (buf.profile.metadata.get('major', 0), buf.profile.metadata.get('minor', 0)) >= (6, 2)): encrypted_kdbg_profiles.append(p) self._config.update('PROFILE', origprofile) # keep track of the number of potential KDBGs we find count = 0 if origprofile not in encrypted_kdbg_profiles: scanner = KDBGScanner(needles=proflens.values()) aspace = utils.load_as(self._config, astype='any') for offset in scanner.scan(aspace): val = aspace.read(offset, maxlen + 0x10) for l in proflens: if val.find(proflens[l]) >= 0: kdbg = obj.Object("_KDDEBUGGER_DATA64", offset=offset, vm=aspace) yield l, kdbg count += 1 # only perform the special win8/2012 scan if we didn't find # any others and if a virtual x64 address space is available if count == 0: if origprofile in encrypted_kdbg_profiles: encrypted_kdbg_profiles = [origprofile] for profile in encrypted_kdbg_profiles: self._config.update('PROFILE', profile) aspace = utils.load_as(self._config, astype='any') if hasattr(aspace, 'vtop'): for kdbg in obj.VolMagic( aspace).KDBG.generate_suggestions(): yield profile, kdbg
def flags(self): """Returns the file's flags""" data = struct.pack('=I', self.FileFlags & self.FileFlagsMask) addr_space = addrspace.BufferAddressSpace(self.obj_vm.get_config(), 0, data) bitmap = {'Debug': 0, 'Prerelease': 1, 'Patched': 2, 'Private Build': 3, 'Info Inferred': 4, 'Special Build' : 5, } return obj.Object('Flags', offset = 0, vm = addr_space, bitmap = bitmap)
def convertUnixTime(self, nsec): """ Convert unix epoch time in nanoseconds to a date string """ try: time_val = struct.pack("<I", nsec // 1000000000) time_buf = addrspace.BufferAddressSpace(self._config, data=time_val) time_obj = obj.Object("UnixTimeStamp", offset=0, vm=time_buf, is_utc=True) return time_obj except Exception as e: return None
def render_text(self, outfd, data): for p, start, url, config_key, creds_key, decoded_config, decoded_magic in data: # get a magic object from the buffer buffer_space = addrspace.BufferAddressSpace(config=self._config, data=decoded_magic) buffer_space.profile.add_types(zeus_types) magic_obj = obj.Object('_ZEUS_MAGIC', offset=0, vm=buffer_space) # This simulates how Zeus uses PathRenameExtensionW datfile = self.parse_string(magic_obj.datfile)[0:-4] + ".dat" syntax = "-" * 50 + '\n' + \ "Process: {0}\n".format(p.ImageFileName) + \ "Pid: {0}\n".format(p.UniqueProcessId) + \ "Address: 0x{0:X}\n".format(start) + \ "URL: {0}\n".format(url) + \ "Identifier: {0}\n".format(''.join([chr(c) for c in magic_obj.guid if c != 0])) + \ "Mutant key: 0x{0:X}\n".format(magic_obj.guid_xor_key) + \ "XOR key: 0x{0:X}\n".format(magic_obj.xorkey) + \ "Registry: HKEY_CURRENT_USER\\SOFTWARE\\Microsoft\\{0}\n".format(self.parse_string(magic_obj.keyname)) + \ " Value 1: {0}\n".format(self.parse_string(magic_obj.value1)) + \ " Value 2: {0}\n".format(self.parse_string(magic_obj.value2)) + \ " Value 3: {0}\n".format(self.parse_string(magic_obj.value3)) + \ "Executable: {0}\n".format(self.parse_string(magic_obj.exefile)) + \ "Data file: {0}\n".format(datfile) + "\n" + \ "Config RC4 Key:\n{0}\n".format(self.get_hex(config_key)) + \ "Credential RC4 Key:\n{0}\n".format(self.get_hex(creds_key)) if self._config.DUMP_DIR: fname_conf = "{0}.{1:#x}.conf.key".format( p.UniqueProcessId, start) fname_cred = "{0}.{1:#x}.cred.key".format( p.UniqueProcessId, start) f = open(os.path.join(self._config.DUMP_DIR, fname_conf), "wb") if f: f.write(config_key) f.close() f = open(os.path.join(self._config.DUMP_DIR, fname_cred), "wb") if f: f.write(creds_key) f.close() outfd.write(syntax)
def calculate(self): address_space = utils.load_as(self._config, astype='physical') scanner = USNScanner(needles=[ '.{2}\x00\x00\x02\x00\x00\x00.{31}\x01.{17}[\x00\x01]\x3C\x00' ]) usn_entries = [] #print "Scanning for USNJRNL entries" for offset in scanner.scan(address_space): usn_buff = address_space.zread(offset, 0x3c + 1024) bufferas = addrspace.BufferAddressSpace(self._config, data=usn_buff) usn_entry = obj.Object('USN_RECORD_V2', vm=bufferas, offset=0) if usn_entry.isValid: yield offset, usn_entry
def advance_one(self, next_off, mft_buff, end): item = None attr = None cursor = 0 while attr == None and cursor <= end: bufferas = addrspace.BufferAddressSpace(self._config, data=mft_buff) item = obj.Object('RESIDENT_ATTRIBUTE', vm=bufferas, offset=next_off + cursor) try: attr = ATTRIBUTE_TYPE_ID.get(int(item.Header.Type), None) except struct.error: return item cursor += 1 return item
def get_task_start_time(self): start_time = self.start_time start_secs = start_time.tv_sec + (start_time.tv_nsec / linux_common.nsecs_per / 100) sec = self.get_boot_time() + start_secs # convert the integer as little endian try: data = struct.pack("<I", sec) except struct.error: # in case we exceed 0 <= number <= 4294967295 return "" bufferas = addrspace.BufferAddressSpace(self.obj_vm.get_config(), data = data) dt = obj.Object("UnixTimeStamp", offset = 0, vm = bufferas, is_utc = True) return dt
def start_time(self): nsecs_per = 1000000 start_time = self.p_start start_secs = start_time.tv_sec + (start_time.tv_usec / nsecs_per) # convert the integer as little endian. we catch struct.error # here because if the process has exited (i.e. detected with mac_dead_procs) # then the timestamp may not be valid. start_secs could be negative # or higher than can fit in a 32-bit "I" integer field. try: data = struct.pack("<I", start_secs) except struct.error: return "" bufferas = addrspace.BufferAddressSpace(self.obj_vm.get_config(), data = data) dt = obj.Object("UnixTimeStamp", offset = 0, vm = bufferas, is_utc = True) return dt