def collect(self): """Render output.""" count = 0 for run in self.generate_memory_ranges(): for rule, address in self.generate_hits(run): count += 1 if count >= self.plugin_args.hits: break # Result hit the physical memory - Get some context on this hit. if run.data.get("type") == "PhysicalAS": symbol = pfn.PhysicalAddressContext(self.session, address) else: symbol = utils.FormattedAddress( self.session.address_resolver, address, max_distance=2**64) yield dict( Owner=run.data.get("task") or run.data.get("type"), Rule=rule, Offset=address, hexdump=utils.HexDumpedString( run.address_space.read( address - self.plugin_args.pre_context, self.plugin_args.context + self.plugin_args.pre_context)), Context=symbol, # Provide the address space where the hit is reported. address_space=run.address_space, run=run)
def collect(self): """Render output.""" count = 0 for run in self.generate_memory_ranges(): for rule, address, _, _ in self.generate_hits(run): count += 1 if count >= self.plugin_args.hits: break # Result hit the physical memory - Get some context on this hit. if run.data.get("type") == "PhysicalAS": rammap_plugin = self.session.plugins.rammap( start=address, end=address+1) symbol = rammap_plugin.summary()[0] else: symbol = self.session.address_resolver.format_address( address) yield (run.data.get("task") or run.data.get("type"), rule, address, utils.HexDumpedString( run.address_space.read( address - self.plugin_args.pre_context, self.plugin_args.context + self.plugin_args.pre_context)), symbol)
def render_low_frag_info(self, heap, renderer): """Displays information about the low fragmentation front end.""" renderer.format("Low Fragmentation Front End Information:\n") renderer.table_header([ dict(name="Entry", style="address"), ("Alloc", "allocation_length", "4"), ("Length", "length", ">4"), dict(name="Data"), ]) # Render the LFH allocations in increasing allocation sizes. Collect # them first, then display by sorted allocation size, and offset. entries_by_size = {} for entry, allocation_length in self.enumerate_lfh_heap_allocations( heap): entries_by_size.setdefault(allocation_length, []).append(entry) for allocation_length, entries in sorted(entries_by_size.iteritems()): for entry in sorted(entries, key=lambda x: x.obj_offset): data = entry.v()[:64] renderer.table_row( entry, allocation_length, entry.length, utils.HexDumpedString(data), )
def collect(self): """Render output.""" count = 0 address_space = self.session.physical_address_space for buffer_as in scan.BufferASGenerator( self.session, address_space, self.plugin_args.start, self.plugin_args.start + self.plugin_args.limit): self.session.report_progress( "Scanning buffer %#x->%#x (%#x)", buffer_as.base_offset, buffer_as.end(), buffer_as.end() - buffer_as.base_offset) for match in self.rules.match(data=buffer_as.data): for buffer_offset, _, _ in match.strings: hit_offset = buffer_offset + buffer_as.base_offset count += 1 if count >= self.plugin_args.hits: break yield dict( Rule=match.rule, Offset=hit_offset, hexdump=utils.HexDumpedString( self.session.physical_address_space.read( hit_offset - self.plugin_args.pre_context, self.plugin_args.context + self.plugin_args.pre_context)))
def collect(self): for hit in super(IRDump, self).collect(): path = hit.get("path") if path: fd = path.open() if fd: yield dict(divider=path.filename) to_read = min( self.plugin_args.length, self.plugin_args.width * self.plugin_args.rows) for offset in utils.xrange( self.plugin_args.start, self.plugin_args.start + to_read, self.plugin_args.width): fd.seek(offset) data = fd.read(self.plugin_args.width) if not data: break yield dict( offset=offset, FileSpec=path.filename, hexdump=utils.HexDumpedString(data), nowrap=True, hex_width=self.plugin_args.width)
def collect(self): to_read = min( self.width * self.rows, self.plugin_args.address_space.end() - self.plugin_args.offset) if self.plugin_args.length is not None: to_read = min(to_read, self.plugin_args.length) resolver = self.session.address_resolver for offset in range(self.offset, self.offset + to_read): comment = resolver.format_address(offset, max_distance=0) if comment: self.address_map.AddRange(offset, offset + 1, ",".join(comment)) offset = self.offset for offset in range(self.offset, self.offset + to_read, self.width): # Add a symbol name for the start of each row. hex_data = utils.HexDumpedString( self.plugin_args.address_space.read(offset, self.width), highlights=self.address_map.HighlightRange(offset, offset + self.width, relative=True)) comment = self.address_map.GetComment(offset, offset + self.width) yield dict(offset=offset, hexdump=hex_data, comment=comment, nowrap=True, hex_width=self.width) # Advance the offset so we can continue from this offset next time we # get called. self.offset = offset
def collect_scan_physical(self): """This method scans the physical memory.""" for rule, address, _, _ in self.generate_hits( self.physical_address_space): if address > self.end: return yield (None, rule, address, utils.HexDumpedString( self.physical_address_space.read( address - self.pre_context, self.context + self.pre_context)))
def collect_kernel_scan(self): for rule, address, _, _ in self.generate_hits( self.session.default_address_space): if address > self.end: return symbol = self.session.address_resolver.format_address(address) yield (None, rule, address, utils.HexDumpedString( self.session.default_address_space.read( address - self.pre_context, self.context + self.pre_context)), symbol)
def collect(self): zone = self.session.plugins.search( "(select zone from zones() where zone.name == {zone_name}).zone", query_parameters=dict(zone_name=self.zone_name), silent=True).first_result if not zone: raise ValueError("No such zone %r." % self.zone_name) for offset in zone.known_offsets: yield dict(offset=offset, data=utils.HexDumpedString( zone.obj_vm.read(offset, zone.elem_size)))
def render_process_heap_info(self, heap, renderer): if (self.plugin_args.heaps and heap.ProcessHeapsListIndex not in self.plugin_args.heaps): return if 1 <= heap.ProcessHeapsListIndex <= 64: renderer.format("Heap {0}: {1:#x} ({2})\nBackend Info:\n\n", heap.ProcessHeapsListIndex, heap.BaseAddress, heap.FrontEndHeapType) renderer.table_header([ dict(name="Segment", type="TreeNode", width=18, child=dict(style="address")), ("End", "segment_end", "[addr]"), ("Length", "length", "8"), dict(name="Data"), ]) for seg in heap.Segments: seg_start = seg.FirstEntry.obj_offset seg_end = seg.LastValidEntry.v() renderer.table_row( seg_start, seg_end, seg_end - seg_start, depth=1) for entry in seg.FirstEntry.walk_list("NextEntry", True): # If this is the last entry it goes until the end of the # segment. start = entry.obj_offset + 0x10 if start > seg_end: break if entry.Flags.LAST_ENTRY: end = seg.LastValidEntry.v() else: end = entry.obj_offset + entry.Size * 16 data = heap.obj_vm.read(start, min(16, end-start)) renderer.table_row( entry, end, end - start, utils.HexDumpedString(data), depth=2) if heap.FrontEndHeapType.LOW_FRAG: self.render_low_frag_info(heap, renderer)
def collect_task_scan(self, task): """Scan a task's address space.""" end = min(self.session.GetParameter("highest_usermode_address"), self.end) task_as = task.get_process_address_space() for rule, address, _, _ in self.generate_hits(task_as, end=end): if address > self.end: return symbol = self.session.address_resolver.format_address(address) yield (task, rule, address, utils.HexDumpedString( task_as.read(address - self.pre_context, self.context + self.pre_context)), symbol)
def collect(self): address_space = self.session.physical_address_space for buffer_as in scan.BufferASGenerator( self.session, address_space, self.plugin_args.start, self.plugin_args.start + self.plugin_args.limit): self.session.report_progress( "Scanning buffer %#x->%#x (%#x)", buffer_as.base_offset, buffer_as.end(), buffer_as.end() - buffer_as.base_offset) for match in self.unified_rule.match(data=buffer_as.data): for buffer_offset, string_name, value in sorted(match.strings): hit_offset = buffer_offset + buffer_as.base_offset self.context_buffer.add_hit(string_name, hit_offset, value) # Now re-run the original expression on all unique contexts. it = self.context_buffer.get_combined_context_buffers() for context, original_offset_map, pseudo_data in it: seen = set() self.session.report_progress( "Scanning pseudo buffer of length %d" % len(pseudo_data)) # Report any hits of the original sig on this context. for match in self.rules.match(data=pseudo_data): self.session.report_progress() # Only report a single hit of the same rule on the same context. dedup_key = (match.rule, context) if dedup_key in seen: continue seen.add(dedup_key) for buffer_offset, _, value in match.strings: hit_offset = original_offset_map.get(buffer_offset) if hit_offset is not None: if isinstance(context, int): owner = self.session.profile._EPROCESS(context) else: owner = context yield dict( Owner=owner, Rule=match.rule, Offset=hit_offset, HexDump=utils.HexDumpedString( address_space.read( hit_offset - self.plugin_args.pre_context, self.plugin_args.context + self.plugin_args.pre_context)), Context=pfn.PhysicalAddressContext( self.session, hit_offset))
def collect(self): """Render output.""" count = 0 scanner = BaseYaraASScanner( session=self.session, address_space=self.session.physical_address_space, rules=self.rules) for rule, address, _, _ in scanner.scan( offset=self.plugin_args.start, maxlen=self.plugin_args.limit): count += 1 if count >= self.plugin_args.hits: break yield (rule, address, utils.HexDumpedString( self.session.physical_address_space.read( address - self.plugin_args.pre_context, self.plugin_args.context + self.plugin_args.pre_context)))
def render(self, renderer): if self.offset == None: renderer.format("Error: {0}\n", self.offset.reason) return to_read = min(self.width * self.rows, self.address_space.end() - self.offset) if self.length is not None: to_read = min(to_read, self.length) renderer.table_header( [("Offset", "offset", "[addr]"), dict(name="Data", style="hexdump", hex_width=self.width), ("Comment", "comment", "40")], suppress_headers=self.suppress_headers) resolver = self.session.address_resolver for offset in range(self.offset, self.offset + to_read): comment = resolver.format_address(offset, max_distance=0) if comment: self.address_map.AddRange(offset, offset + 1, ",".join(comment)) offset = self.offset for offset in range(self.offset, self.offset + to_read, self.width): # Add a symbol name for the start of each row. hex_data = utils.HexDumpedString( self.address_space.read(offset, self.width), highlights=self.address_map.HighlightRange(offset, offset + self.width, relative=True)) comment = self.address_map.GetComment(offset, offset + self.width) renderer.table_row(offset, hex_data, comment, nowrap=True) # Advance the offset so we can continue from this offset next time we # get called. self.offset = offset
def collect(self): count = 0 for path in self.plugin_args.paths: file_info = common.FileFactory(path, session=self.session) run = addrspace.Run(start=0, end=file_info.st_size, file_offset=0, address_space=standard.FDAddressSpace( session=self.session, fhandle=file_info.open())) for rule, address, _, _ in self.generate_hits(run): count += 1 if count >= self.plugin_args.hits: break yield (file_info, rule, address, utils.HexDumpedString( run.address_space.read( address - self.plugin_args.pre_context, self.plugin_args.context + self.plugin_args.pre_context)), None)
def collect_task_scan(self, task): """Scan a task's address space. In Windows pagetable entries outside the VAD might be uninitialized and this will lead to scanning massive regions of mostly unmapped memory. When asked to scan process memory we only scan memory inside the VAD. """ # We have to change process context so the address resolver and task_as # line up. task_as = self.session.default_address_space count = 0 scanner = yarascanner.BaseYaraASScanner( profile=self.profile, session=self.session, address_space=task_as, rules=self.rules) for vad in sorted(task.RealVadRoot.traverse(), key=lambda x: x.Start): self.session.report_progress( "Scanning VAD %s from %#0x (%#0x)", task.name, vad.Start, vad.Length) # Only scan the VAD region. for hit in scanner.scan(vad.Start, vad.Length): count += 1 rule = hit[0] address = hit[1] symbol = self.session.address_resolver.format_address(address) yield (task, rule, address, utils.HexDumpedString( task_as.read(address, 0x40)), symbol) # If we exceed the total hit count we are done. if count > self.hits: return
def collect(self): pfn_hits = {} hits = {} pfn_context = {} address_space = self.session.physical_address_space for buffer_as in scan.BufferASGenerator( self.session, address_space, self.plugin_args.start, self.plugin_args.start + self.plugin_args.limit): self.session.report_progress( "Scanning buffer %#x->%#x (%#x)", buffer_as.base_offset, buffer_as.end(), buffer_as.end() - buffer_as.base_offset) for match in self.unified_rule.match(data=buffer_as.data): for buffer_offset, _, value in sorted(match.strings): hit_offset = buffer_offset + buffer_as.base_offset pfn_id = hit_offset >> 12 if pfn_id not in pfn_context: context_strings = self.get_contexts( address_space, pfn_id << 12) if context_strings: pfn_context[pfn_id] = context_strings else: self.session.logging.debug( "No process context for %#x", hit_offset) pfn_hits.setdefault(pfn_id, set()).add((hit_offset, value)) for pfn_hit, hit_offsets in pfn_hits.iteritems(): if pfn_hit in pfn_context: contexts = pfn_context[pfn_hit] for hit_offset, value in hit_offsets: for context in contexts: if context: hits.setdefault(context, {})[hit_offset] = value else: self.session.logging.debug( "Context for %#x invalid", hit_offset) # Now re-run the original expression on all unique contexts. pad = "\xFF" * 10 for context, context_data in hits.iteritems(): data = [] data_len = 0 # Map the original offset to the dummy buffer offset. omap = {} for hit_offset, value in context_data.iteritems(): omap[data_len] = hit_offset # Some padding separates out the sigs. data.append(value) data.append(pad) data_len += len(value) + len(pad) pseudo_data = "".join(data) seen = set() # Report any hits of the original sig on this context. for match in self.rules.match(data=pseudo_data): # Only report a single hit of the same rule on the same context. dedup_key = (match.rule, context) if dedup_key in seen: continue seen.add(dedup_key) for buffer_offset, _, value in match.strings: hit_offset = omap.get(buffer_offset) if hit_offset is not None: if isinstance(context, int): owner = self.session.profile._EPROCESS(context) else: owner = context yield dict( Owner=owner, Rule=match.rule, Offset=hit_offset, HexDump=utils.HexDumpedString( address_space.read( hit_offset - self.plugin_args.pre_context, self.plugin_args.context + self.plugin_args.pre_context)), Context=pfn.PhysicalAddressContext( self.session, hit_offset))
def column_types(self): return dict(offset=int, hexdump=utils.HexDumpedString(""), comment=utils.AttributedString(""))