def collect(self): """Render output.""" count = 0 address_space = self.session.physical_address_space for buffer_as in scan.BufferASGenerator( self.session, address_space, self.plugin_args.start, self.plugin_args.start + self.plugin_args.limit): self.session.report_progress( "Scanning buffer %#x->%#x (%#x)", buffer_as.base_offset, buffer_as.end(), buffer_as.end() - buffer_as.base_offset) for match in self.rules.match(data=buffer_as.data): for buffer_offset, _, _ in match.strings: hit_offset = buffer_offset + buffer_as.base_offset count += 1 if count >= self.plugin_args.hits: break yield dict( Match=match, Rule=match.rule, Offset=hit_offset, hexdump=utils.HexDumpedString( self.session.physical_address_space.read( hit_offset - self.plugin_args.pre_context, self.plugin_args.context + self.plugin_args.pre_context)))
def render_low_frag_info(self, heap, renderer): """Displays information about the low fragmentation front end.""" renderer.format("Low Fragmentation Front End Information:\n") renderer.table_header([ dict(name="Entry", style="address"), ("Alloc", "allocation_length", "4"), ("Length", "length", ">4"), dict(name="Data"), ]) # Render the LFH allocations in increasing allocation sizes. Collect # them first, then display by sorted allocation size, and offset. entries_by_size = {} for entry, allocation_length in self.enumerate_lfh_heap_allocations( heap): entries_by_size.setdefault(allocation_length, []).append(entry) for allocation_length, entries in sorted(entries_by_size.iteritems()): for entry in sorted(entries, key=lambda x: x.obj_offset): data = entry.v()[:64] renderer.table_row( entry, allocation_length, entry.length, utils.HexDumpedString(data), )
def collect(self): for hit in super(IRDump, self).collect(): path = hit.get("path") if path: fd = path.open() if fd: yield dict(divider=path.filename) to_read = min( self.plugin_args.length, self.plugin_args.width * self.plugin_args.rows) for offset in utils.xrange( self.plugin_args.start, self.plugin_args.start + to_read, self.plugin_args.width): fd.seek(offset) data = fd.read(self.plugin_args.width) if not data: break yield dict( offset=offset, FileSpec=path.filename, hexdump=utils.HexDumpedString(data), nowrap=True, hex_width=self.plugin_args.width)
def collect(self): """Render output.""" count = 0 for run in self.generate_memory_ranges(): for match, address in self.generate_hits(run): count += 1 if count >= self.plugin_args.hits: break # Result hit the physical memory - Get some context on this hit. if run.data.get("type") == "PhysicalAS": symbol = pfn.PhysicalAddressContext(self.session, address) else: symbol = utils.FormattedAddress( self.session.address_resolver, address, max_distance=2**64) yield dict( Owner=run.data.get("task") or run.data.get("type"), Match=match, Rule=match.rule, Offset=address, hexdump=utils.HexDumpedString( run.address_space.read( address - self.plugin_args.pre_context, self.plugin_args.context + self.plugin_args.pre_context)), Context=symbol, # Provide the address space where the hit is reported. address_space=run.address_space, run=run)
def collect(self): to_read = min( self.width * self.rows, self.plugin_args.address_space.end() - self.plugin_args.offset) if self.plugin_args.length is not None: to_read = min(to_read, self.plugin_args.length) resolver = self.session.address_resolver for offset in range(self.offset, self.offset + to_read): comment = resolver.format_address(offset, max_distance=0) if comment: self.address_map.AddRange(offset, offset + 1, ",".join(comment)) offset = self.offset for offset in range(self.offset, self.offset + to_read, self.width): # Add a symbol name for the start of each row. hex_data = utils.HexDumpedString( self.plugin_args.address_space.read(offset, self.width), highlights=self.address_map.HighlightRange( offset, offset + self.width, relative=True)) comment = self.address_map.GetComment(offset, offset + self.width) yield dict(offset=offset, hexdump=hex_data, comment=comment, nowrap=True, hex_width=self.width) # Advance the offset so we can continue from this offset next time we # get called. self.offset = offset
def collect(self): zone = self.session.plugins.search( "(select zone from zones() where zone.name == {zone_name}).zone", query_parameters=dict(zone_name=self.zone_name), silent=True).first_result if not zone: raise ValueError("No such zone %r." % self.zone_name) for offset in zone.known_offsets: yield dict(offset=offset, data=utils.HexDumpedString( zone.obj_vm.read(offset, zone.elem_size)))
def render_process_heap_info(self, heap, renderer): if (self.plugin_args.heaps and heap.ProcessHeapsListIndex not in self.plugin_args.heaps): return if 1 <= heap.ProcessHeapsListIndex <= 64: renderer.format("Heap {0}: {1:#x} ({2})\nBackend Info:\n\n", heap.ProcessHeapsListIndex, heap.BaseAddress, heap.FrontEndHeapType) renderer.table_header([ dict(name="Segment", type="TreeNode", width=18, child=dict(style="address")), ("End", "segment_end", "[addr]"), ("Length", "length", "8"), dict(name="Data"), ]) for seg in heap.Segments: seg_start = seg.FirstEntry.obj_offset seg_end = seg.LastValidEntry.v() renderer.table_row(seg_start, seg_end, seg_end - seg_start, depth=1) for entry in seg.FirstEntry.walk_list("NextEntry", True): # If this is the last entry it goes until the end of the # segment. start = entry.obj_offset + 0x10 if start > seg_end: break if entry.Flags.LAST_ENTRY: end = seg.LastValidEntry.v() else: end = entry.obj_offset + entry.Size * 16 data = heap.obj_vm.read(start, min(16, end - start)) renderer.table_row(entry, end, end - start, utils.HexDumpedString(data), depth=2) if heap.FrontEndHeapType.LOW_FRAG: self.render_low_frag_info(heap, renderer)
def collect(self): address_space = self.session.physical_address_space for buffer_as in scan.BufferASGenerator( self.session, address_space, self.plugin_args.start, self.plugin_args.start + self.plugin_args.limit): self.session.report_progress( "Scanning buffer %#x->%#x (%#x)", buffer_as.base_offset, buffer_as.end(), buffer_as.end() - buffer_as.base_offset) for match in self.unified_rule.match(data=buffer_as.data): for buffer_offset, string_name, value in sorted(match.strings): hit_offset = buffer_offset + buffer_as.base_offset self.context_buffer.add_hit(string_name, hit_offset, value) # Now re-run the original expression on all unique contexts. it = self.context_buffer.get_combined_context_buffers() for context, original_offset_map, pseudo_data in it: seen = set() self.session.report_progress( "Scanning pseudo buffer of length %d" % len(pseudo_data)) # Report any hits of the original sig on this context. for match in self.rules.match(data=pseudo_data): self.session.report_progress() # Only report a single hit of the same rule on the same context. dedup_key = (match.rule, context) if dedup_key in seen: continue seen.add(dedup_key) for buffer_offset, _, value in match.strings: hit_offset = original_offset_map.get(buffer_offset) if hit_offset is not None: if isinstance(context, int): owner = self.session.profile._EPROCESS(context) else: owner = context yield dict( Owner=owner, Rule=match.rule, Offset=hit_offset, HexDump=utils.HexDumpedString( address_space.read( hit_offset - self.plugin_args.pre_context, self.plugin_args.context + self.plugin_args.pre_context)), Context=pfn.PhysicalAddressContext( self.session, hit_offset))
def collect(self): count = 0 for path in self.plugin_args.paths: self.session.logging.debug("File yara scanning %s", path) file_info = common.FileFactory(path, session=self.session) run = addrspace.Run(start=0, end=file_info.st_size, file_offset=0, address_space=standard.FDAddressSpace( session=self.session, fhandle=file_info.open())) for rule, address in self.generate_hits(run): count += 1 if count >= self.plugin_args.hits: break yield (file_info, rule, address, utils.HexDumpedString( run.address_space.read( address - self.plugin_args.pre_context, self.plugin_args.context + self.plugin_args.pre_context)), None)
def column_types(self): return dict(offset=int, hexdump=utils.HexDumpedString(""), comment=utils.AttributedString(""))