def filter_processes(self): """Filters eprocess list using pids lists.""" for proc in self.list_process(): if not self.filtering_requested: yield proc else: if int(proc.pid) in self.plugin_args.pids: yield proc elif (self.plugin_args.proc_regex and self.plugin_args.proc_regex.match( utils.SmartUnicode(proc.name))): yield proc
def __str__(self): """Print the contents somewhat concisely.""" result = [] for k, v in self.items(): if isinstance(v, obj.BaseObject): v = repr(v) value = u"\n ".join(utils.SmartUnicode(v).splitlines()) if len(value) > 100: value = u"%s ..." % value[:100] result.append(u" %s = %s" % (k, value)) return u"{\n" + u"\n".join(sorted(result)) + u"\n}"
def _run_flow(self, flow_obj): # Flow has a condition - we only run the flow if the condition matches. if flow_obj.condition: try: if not list(self.session.plugins.search(flow_obj.condition)): self.session.logging.debug( "Ignoring flow %s because condition %s is not true.", flow_obj.flow_id, flow_obj.condition) return # If the query failed to run we must ignore this flow. except Exception as e: self.session.logging.exception(e) return # Prepare the session specified by this flow. rekall_session = self._get_session(flow_obj.session) for action in flow_obj.actions: # Make a progress ticket for this action if required. flow_obj.ticket.status = "Started" flow_obj.ticket.client_id = self._config.client.writeback.client_id flow_obj.ticket.current_action = action flow_obj.ticket.timestamp = time.time() try: # Write the ticket to set a checkpoint. flow_obj.ticket.send_message() # Run the action with the new session, and report the produced # collections. Note that the ticket contains all collections for # all actions cumulatively. action_to_run = action.from_primitive(action.to_primitive(), session=rekall_session) for collection in (action_to_run.run(flow_obj=flow_obj) or []): collection.location = collection.location.get_canonical() flow_obj.ticket.collections.append(collection) except Exception as e: flow_obj.ticket.status = "Error" flow_obj.ticket.error = utils.SmartUnicode(e) flow_obj.ticket.backtrace = traceback.format_exc() flow_obj.ticket.send_message() self.session.logging.exception(e) return flow_obj.ticket.status = "Done" flow_obj.ticket.current_action = None flow_obj.ticket.send_message()
def generate_hits(self): """Generates _CONSOLE_INFORMATION objects.""" architecture = self.profile.metadata("arch") # The process we select is conhost on Win7 or csrss for others # Only select those processes we care about: for task in self.session.plugins.pslist( proc_regex="(conhost.exe|csrss.exe)").filter_processes(): if utils.SmartUnicode(task.ImageFileName).lower() == "conhost.exe": if architecture == "AMD64": process_profile = ConHost64(session=self.session) else: process_profile = ConHost86(session=self.session) elif utils.SmartUnicode(task.ImageFileName).lower() == "csrss.exe": if architecture == "AMD64": process_profile = WinSrv64(session=self.session) else: process_profile = WinSrv86(session=self.session) else: continue scanner = ConsoleScanner( task=task, process_profile=process_profile, session=self.session, max_history=self.plugin_args.max_history, history_buffers=self.plugin_args.history_buffers) pattern = bytes((self.plugin_args.max_history, 0)) scanner.checks = [ ("StringCheck", dict(needle=pattern)) ] scanner.build_constraints() for console in scanner.scan(): yield task, console
def _EnsureInitialized(self): if self._initialized: return symbols = {} self.pe_profile = None # Get a usable profile. if "Symbol" in self.session.GetParameter("name_resolution_strategies"): # Load the profile for this binary. self.pe_profile = self.session.LoadProfile( "%s/GUID/%s" % (self.NormalizeModuleName(self.pe_helper.RSDS.Filename), self.pe_helper.RSDS.GUID_AGE)) if self.pe_profile == None: self.pe_profile = pe_vtypes.BasicPEProfile(session=self.session) if "Export" in self.session.GetParameter("name_resolution_strategies"): # Extract all exported symbols into the profile's symbol table. for _, func, name, _ in self.pe_helper.ExportDirectory(): func_address = func.v() name = utils.SmartUnicode(name) symbols[name] = func_address - self.image_base self.pe_profile.image_base = self.image_base self.pe_profile.add_constants(constants_are_addresses=True, constants=symbols) # A section for the header. self.AddModule( PESectionModule(start=self.image_base, end=self.image_base + 0x1000, name="pe", profile=self.pe_profile, session=self.session)) # Find the highest address covered in this executable image. for _, name, virtual_address, length in self.pe_helper.Sections(): if length > 0: virtual_address += self.image_base self.AddModule( PESectionModule(start=virtual_address, end=virtual_address + length, name=self.NormalizeModuleName(name), profile=self.pe_profile, session=self.session)) self._initialized = True
def Convert(self): # Check for an OSX profile. system_map = self.SelectFile("dsymutil$") if system_map: # Parse the system map file. system_map = self.ParseSystemMap(utils.SmartUnicode(system_map)) vtype_file = self.SelectFile(r"\.vtypes$") if vtype_file: # Some maps have keys that are a large integer followed by 'L', # which is invalid Python syntax, so get rid of them vtype_file = re.sub(r"(\d+)L", "\\1", utils.SmartUnicode(vtype_file)) self.session.logging.info( "Converting Darwin profile with vtypes dump output") # The dwarfdump module returns python code so we must exec it. l = {} exec(vtype_file, {}, l) profile_file = self.BuildProfile(system_map, l["mac_types"]) return profile_file raise RuntimeError("Unknown profile format.")
def FindEVTFiles(self): """Search for event log files in memory. We search for processes called 'services.exe' with a vad to and open file ending with '.evt'. """ ps_plugin = self.get_plugin("pslist", proc_regex="services.exe") for task in ps_plugin.filter_processes(): for vad in task.RealVadRoot.traverse(): try: filename = vad.ControlArea.FilePointer.FileName if utils.SmartUnicode(filename).lower().endswith(".evt"): yield task, vad except AttributeError: pass
def convert_row(self, row): """Render the row into the output collection.""" result = {} for k, v in six.iteritems(row): # Render both text and data export for each object. text_rendering = utils.SmartUnicode( self._text_renderer.get_object_renderer(target=v).render_row(v)) data_export_rendering = self._data_renderer.get_object_renderer( target=v).EncodeToJsonSafe(v) self.handle_special_objects(v) result[k] = dict( text=text_rendering, data=data_export_rendering, ) return result
def __init__(self, filename, filesystem=u"API", path_sep=None): super(FileSpec, self).__init__() if isinstance(filename, FileSpec): # Copy the other file spec. self.name = filename.name self.filesystem = filename.filesystem self.path_sep = filename.path_sep elif isinstance(filename, basestring): self.name = utils.SmartUnicode(filename) self.filesystem = filesystem self.path_sep = path_sep or self.default_path_sep else: raise TypeError("Filename must be a string or file spec.")
def render_full(self, target, **options): column_headers = [] row_headers = [] for row_header in target.row_headers or (): row_headers.append(text.Cell(row_header, align="r", padding=1)) # If we're prepending row headers we need an extra column on the left. if row_headers: column_headers.append(text.Cell(target.caption or "-", padding=1)) for column_header in target.column_headers: column_headers.append( text.Cell(column_header, align="c", padding=1)) rows = [text.JoinedCell(*column_headers, tablesep="")] for idx, row in enumerate(target.rows): cells = [] if row_headers: cells.append(row_headers[idx]) for cell in row: fg = cell.get("fg") bg = cell.get("bg") heat = cell.get("heat") if heat and not bg: bg = colors.HeatToRGB(heat, greyscale=target.greyscale) bg = colors.RGBToXTerm(*bg) if bg else None if bg and not fg: fg = colors.XTermTextForBackground(bg) cells.append( text.Cell(value=utils.SmartUnicode(cell.get("value", "-")), highlights=[ dict(bg=bg, fg=fg, start=0, end=-1, bold=True) ], colorizer=self.renderer.colorizer, padding=1)) rows.append(text.JoinedCell(*cells, tablesep="", align="l")) return text.StackedCell(*rows, align="l")
def populate_drivers(self, parent): """Populate the tree view with drivers""" # Build a list of all address spaces address_spaces = [session.kernel_address_space] for task in session.plugins.pslist().filter_processes(): address_spaces.append(task.get_process_address_space()) # Determine architecture specifics if session.profile.metadata("arch") == "AMD64": ptr_mask = 0xffffffffffffffff ptr_width = 16 else: ptr_mask = 0xffffffff ptr_width = 8 for module in session.plugins.modules().lsmod(): # Ensure the driver's image base is non-null image_base = int(module.DllBase) if int(module.DllBase) else 0 if image_base == 0: continue # Locate the address space address_space = None for a in address_spaces: if a.is_valid_address(image_base): address_space = a if not address_space: continue # Create internal filename base_name = os.path.basename(utils.SmartUnicode(module.BaseDllName)) filename = "{} - 0x{:0x}".format(utils.EscapeForFilesystem(base_name), int(module.DllBase)) # Create driver tree view item driver_item = QtGui.QStandardItem("{}".format(utils.EscapeForFilesystem(base_name))) driver_item.setData({"filename": filename, "task": None, "address_space": address_space, "image_base": image_base}, QtCore.Qt.UserRole) driver_item.setCheckable(True) driver_item.setCheckState(QtCore.Qt.Checked if parent.checkState() == QtCore.Qt.Checked else QtCore.Qt.Unchecked) if parent.checkState() == QtCore.Qt.Checked: self.items.add(HashableQStandardItem(driver_item)) parent.appendRow([driver_item, QtGui.QStandardItem("N/A"), QtGui.QStandardItem("0x{:0{w}x}".format(image_base & ptr_mask, w=ptr_width))])
def __init__(self, filename=None, mountpoint=None, dentry=None, is_root=False, session=None): if isinstance(filename, (basestring, basic.String)): self.filename = utils.SmartUnicode(filename).split("/") elif isinstance(filename, list): self.filename = filename elif not filename: self.filename = [] else: raise TypeError("Invalid filename.") self.mountpoint = mountpoint or MountPoint() self.dentry = dentry self.is_root = is_root self.session = session
def render(self, outfd): for reg, key in self.find_count_keys(): if not key: continue outfd.write("----------------------------\n") outfd.write("Registry: {0}\n".format(reg.Name)) outfd.write("Key path: {0}\n".format(key.Path)) outfd.write("Last updated: {0}\n".format(key.LastWriteTime)) outfd.write("\n") outfd.write("Subkeys:\n") for subkey in key.subkeys(): outfd.write(" {0}\n".format(subkey.Name)) outfd.write("\n") outfd.write("Values:\n") for value in list(key.values()): # In windows 7, folder names are replaced by guids. value_name = codecs.getdecoder("rot13")(utils.SmartUnicode( value.Name))[0] value_name = self._resolve_gui_folders(value_name) outfd.write("\n{0:13} {1:15} :\n".format( value.Type, value_name)) # Decode the data if value.Type == "REG_BINARY": # Does this look like a userassist record? if value.DataLength == self.ua_profile.get_obj_size( "_VOLUSER_ASSIST_TYPES"): # Use the specialized profile to instantiate this object. uadata = self.ua_profile.Object( "_VOLUSER_ASSIST_TYPES", offset=value.Data, vm=value.obj_vm) self._render_assist_type(outfd, uadata) # Show a hexdump of the value as well. utils.WriteHexdump(outfd, value.DecodedData)
def commandline(self): if self.mm: # The argv string is initialized inside the process's address space. proc_as = self.get_process_address_space() # read argv from userland argv = utils.SmartUnicode( proc_as.read(self.mm.arg_start, self.mm.arg_end - self.mm.arg_start)) if argv: # split the \x00 buffer into args name = " ".join(argv.split("\x00")) else: name = "" else: # kernel thread name = "[" + self.comm + "]" return name
def _read_all_flows(self): result = [] for job_location in self._config.client.get_jobs_queues(): data = job_location.read_file( if_modified_since=self.writeback.last_flow_time.timestamp) try: if data: job_file = serializer.unserialize(json.loads( utils.SmartUnicode(data)), session=self.session, strict_parsing=False) result.extend(job_file.flows) except Exception as e: if self.session.GetParameter("debug"): raise self.session.logging.error("Error %r: %s", e, e) return result
def filter_processes(self): """Filters eprocess list using pids lists.""" # If eprocess are given specifically only use those. if self.plugin_args.task: for task in self.list_from_task_head(): yield task else: for proc in self.list_tasks(): if not self.filtering_requested: yield proc else: if int(proc.pid) in self.plugin_args.pids: yield proc elif (self.plugin_args.proc_regex and self.plugin_args.proc_regex.match( utils.SmartUnicode(proc.name))): yield proc
def get_buffer(self, truncate=True): """Get the screen buffer. The screen buffer is comprised of the screen's Y coordinate which tells us the number of rows and the X coordinate which tells us the width of each row in characters. These together provide all of the input and output that users see when the console is displayed. @param truncate: True if the empty rows at the end (i.e. bottom) of the screen buffer should be supressed. """ rows = [] for _, row in enumerate(self.Rows.dereference()): if row.Chars.is_valid(): rows.append(utils.SmartUnicode( row.Chars.dereference())[0:self.ScreenX]) # To truncate empty rows at the end, walk the list # backwards and get the last non-empty row. Use that # row index to splice. An "empty" row isn't just "" # as one might assume. It is actually ScreenX number # of space characters if truncate: non_empty_index = 0 for index, row in enumerate(reversed(rows)): ## It seems that when the buffer width is greater than 128 ## characters, its truncated to 128 in memory. if row.count(" ") != min(self.ScreenX, 128): non_empty_index = index break if non_empty_index == 0: rows = [] else: rows = rows[0:len(rows) - non_empty_index] return rows
def FormatName(self, root_dentry): # For sockets we need more info. if len(self.path_components) >= self.MAX_DEPTH: return obj.NoneObject(u"Depth exceeded at %s" % "/".join(self.path_components)) if self.mount_point == "socket:": return "{0}/{1}[{2}]".format(self.mount_point, root_dentry.d_name.name.deref(), self.start_dentry.d_inode.i_ino) elif self.mount_point == "pipe:": return "{0}[{1}]".format(self.mount_point, self.start_dentry.d_inode.i_ino) elif self.mount_point == "anon_inode:": return u"anon_inode:%s" % self.start_dentry.d_name.name.deref() # This is the normal condition for files. else: return re.sub(u"/+", u"/", utils.SmartUnicode(self))
def render(self, renderer): if self.plugin_args.verbosity > 5: self.PrecacheSids() renderer.table_header([("TimeWritten", "timestamp", ""), ("Filename", "filename", ""), ("Computer", "computer", ""), ("Sid", "sid", ""), ("Source", "source", ""), ("Event Id", "event_id", ""), ("Event Type", "event_type", ""), ("Message", "message", "")]) for task, vad in self.FindEVTFiles(): filename = ntpath.basename( utils.SmartUnicode(vad.ControlArea.FilePointer.FileName)) for event in self.ScanEvents(vad, task.get_process_address_space()): renderer.table_row(event.TimeWritten, filename, event.Computer, event.Sid, event.Source, event.EventID, event.EventType, [x for x in event.Data])
def render(self, renderer): renderer.format("Legend: (S) = Stable (V) = Volatile\n\n") for reg, key in self.list_keys(): self.session.report_progress("Printing %s", lambda key=key: key.Path) if key: renderer.format("----------------------------\n") renderer.format("Registry: {0}\n", reg.Name) renderer.format("Key name: {0} {1} @ {2:addrpad}\n", key.Name, self.voltext(key), key.obj_vm.vtop(int(key))) renderer.format("Last updated: {0}\n", key.LastWriteTime) renderer.format("\n") renderer.format("Subkeys:\n") for subkey in key.subkeys(): if not subkey.Name: renderer.format(" Unknown subkey: {0}\n", subkey.Name.reason) else: renderer.format(u" {1} {0}\n", subkey.Name, self.voltext(subkey)) renderer.format("\n") renderer.format("Values:\n") for value in list(key.values()): renderer.format("{0:addrpad} ", value.obj_vm.vtop(value)) if value.Type == 'REG_BINARY': data = value.DecodedData if isinstance(data, basestring): renderer.format( u"{0:width=13} {1:width=15} : {2}\n", value.Type, value.Name, self.voltext(value)) utils.WriteHexdump(renderer, value.DecodedData) else: renderer.format( u"{0:width=13} {1:width=15} : {2} {3}\n", value.Type, value.Name, self.voltext(value), utils.SmartUnicode(value.DecodedData).strip())
def _get_scope(self): """Builds the scope for this query. We add some useful functions to be available to the query: timestamp(): Wrap an int or float in a UnixTimeStamp so it gets rendered properly. substr(): Allows a string to be substringed. file(): Marks a string as a file name. The Rekall Agent will then potentially upload this file. """ scope = helpers.EFILTER_SCOPES.copy() scope["timestamp"] = api.user_func( lambda x, **_: basic.UnixTimeStamp(value=x, session=self.session), arg_types=[float, int, long]) # This function is used to indicate that the string represents # a filename. This will cause the agent to upload it if the # user requested uploading files. # > select file(path.filename.name).filename.name from glob("/*") scope["file"] = api.scalar_function( lambda x: common.FileInformation(session=self.session, filename=x), arg_types=(string.IString,)) scope["substr"] = api.scalar_function( lambda x, start, end: utils.SmartUnicode(x)[int(start):int(end)], arg_types=(string.IString, number.INumber, number.INumber)) scope["hex"] = api.scalar_function( lambda x: hex(int(x)), arg_types=(number.INumber,)) scope["deref"] = api.scalar_function( lambda x: x.deref(), arg_types=(obj.Pointer,)) return scope
def _StoreData(self, name, to_write, **options): path = self.GetAbsolutePathName(name) self.EnsureDirectoryExists(os.path.dirname(path)) # If we are asked to write uncompressed files we do. if options.get("uncompressed"): with open(path, "wt") as out_fd: out_fd.write(utils.SmartUnicode(to_write)) self._dirty = True return # We need to update the file atomically in case someone else is trying # to open it right now. Since the files are expected to be fairly small # its ok to compress into memory and just write atomically. fd = io.BytesIO() with gzip.GzipFile(mode="wb", fileobj=fd) as gzip_fd: gzip_fd.write(utils.SmartStr(to_write)) with open(path + ".gz", "wb") as out_fd: out_fd.write(fd.getvalue()) self._dirty = True
def ParseConfigFile(self, config_file): """Parse the kernel .config file returning it as a dictionary.""" config = {} for line in config_file.splitlines(): line = utils.SmartUnicode(line) if line.startswith("#"): continue try: (config_param, value) = line.strip().split("=") # Remove leading and trailing spaces from the config_param. config_param = config_param.lstrip(" \t").rstrip(" \t") # Massage the value a bit so plugins trying to use them get more # useful values. This deals with config options like # CONFIG_DEFAULT_HOSTNAME="(none)" having a value of # str("(none)") instead of str("\"(none)\""). value = value.rstrip(" \t").lstrip(" \t") value = value.rstrip('"\'').lstrip('"\'') config[config_param] = value except ValueError: pass return config
def __d_path(self, path, root): """A literal copy of the __d_path function from kernel 2.6.26.""" depth = 0 dentry = path.dentry if dentry == None: return "" vfsmnt = path.mnt result = FileName(start_dentry=dentry) if not dentry.is_root and self.d_unhashed(dentry): result.deleted = True # Limit the recursion here to avoid getting stuck. while depth < result.MAX_DEPTH: if dentry == root.dentry and vfsmnt == root.mnt: break if dentry == vfsmnt.mnt_root or dentry.is_root: if vfsmnt.mnt_parent == vfsmnt: break dentry = vfsmnt.mnt_mountpoint vfsmnt = vfsmnt.mnt_parent continue parent = dentry.d_parent result.PrependName(dentry.d_name.name.deref()) dentry = parent # When we get here dentry is a root dentry and mnt is the mount point it # is mounted on. There are some special mount points we want to # highlight. result.mount_point = utils.SmartUnicode( vfsmnt.mnt_mountpoint.d_name.name.deref()) return result.FormatName(dentry)
def collect_from_MiSystemVaType(self): system_va_table = self.profile.get_constant_object( "MiSystemVaType", target="Array", target_args=dict( target="Enumeration", target_args=dict( target="byte", enum_name="_MI_SYSTEM_VA_TYPE" ), ) ) if system_va_table == None: return system_range_start = self.profile.get_constant_object( "MiSystemRangeStart", "unsigned int") # The size varies on PAE profiles. va_table_size = 0x1000 * 0x1000 / self.profile.get_obj_size("_MMPTE") # Coalesce the ranges. range_type = range_start = range_length = 0 for offset in range(system_range_start, 0xffffffff, va_table_size): table_index = old_div((offset - system_range_start), va_table_size) page_type = system_va_table[table_index] if page_type != range_type: if range_type: yield dict(virt_start=range_start, virt_end=range_start + range_length, type=utils.SmartUnicode(range_type)) range_type = page_type range_start = offset range_length = va_table_size else: range_length += va_table_size
def enumerate_handles(self, task): if task.ObjectTable.HandleTableList: for handle in task.ObjectTable.handles(): name = u"" object_type = handle.get_object_type(self.kernel_address_space) if object_type == None: continue if (self.plugin_args.object_types and object_type not in self.plugin_args.object_types): continue elif object_type == "File": file_obj = handle.dereference_as("_FILE_OBJECT") name = file_obj.file_name_with_device() elif object_type == "Key": key_obj = handle.dereference_as("_CM_KEY_BODY") name = key_obj.full_key_name() elif object_type == "Process": proc_obj = handle.dereference_as("_EPROCESS") name = u"{0}({1})".format( utils.SmartUnicode(proc_obj.ImageFileName), proc_obj.UniqueProcessId) elif object_type == "Thread": thrd_obj = handle.dereference_as("_ETHREAD") name = u"TID {0} PID {1}".format( thrd_obj.Cid.UniqueThread, thrd_obj.Cid.UniqueProcess) elif handle.NameInfo.Name == None: name = u"" else: name = handle.NameInfo.Name if not name and self.plugin_args.named_only: continue yield handle, object_type, name
def render(self, renderer): for task, hist in self.generate_hits(): renderer.section() renderer.format(u"CommandProcess: {0} Pid: {1}\n", task.ImageFileName, task.UniqueProcessId) renderer.format( u"CommandHistory: {0:#x} Application: {1} Flags: {2}\n", hist.obj_offset, hist.Application.dereference(), hist.Flags) renderer.format( u"CommandCount: {0} LastAdded: {1} LastDisplayed: {2}\n", hist.CommandCount, hist.LastAdded, hist.LastDisplayed) renderer.format(u"FirstCommand: {0} CommandCountMax: {1}\n", hist.FirstCommand, hist.CommandCountMax) renderer.format(u"ProcessHandle: {0:#x}\n", hist.ProcessHandle) renderer.table_header([("Cmd", "command", ">3"), ("Address", "address", "[addrpad]"), ("Text", "text", "")]) # If the _COMMAND_HISTORY is in use, we would only take # hist.CommandCount but since we're brute forcing, try the # maximum and hope that some slots were not overwritten # or zero-ed out. pointers = hist.obj_profile.Array( target="address", count=hist.CommandCountMax, offset=hist.obj_offset + hist.obj_profile.get_obj_offset( "_COMMAND_HISTORY", "CommandBucket"), vm=hist.obj_vm) for i, p in enumerate(pointers): cmd = p.cast("Pointer", target="_COMMAND").deref() if cmd.obj_offset and cmd.Cmd: renderer.table_row( i, cmd.obj_offset, utils.SmartUnicode(cmd.Cmd).encode("unicode_escape"))
def _walk_proc(self, current, seen, path=""): """Recursively traverse the proc filesystem yielding proc_dir_entry. Yields: tuples of proc_dir_entry, full_path to this proc entry. """ # Prevent infinite recursion here. if current in seen: return seen.add(current) yield current, posixpath.join(path, utils.SmartUnicode(current.Name)) # Yield our peers. for proc_dir_entry in current.walk_list("next"): for x in self._walk_proc(proc_dir_entry, seen, path): yield x # Now also yield the subdirs: if current.subdir: for x in self._walk_proc(current.subdir, seen, posixpath.join(path, str(current.Name))): yield x
def collect(self): for service in self.GenerateServices(): yield dict(Service=service, divider=service.Name) for value in service.values(): k = value.Name v = value.DecodedData if value.Type == "REG_BINARY": continue if isinstance(v, list): v = ",".join([utils.SmartUnicode(x) for x in v if x]) if k == "Type": v = self.SERVICE_TYPE.get(v, v) if k == "Start": v = self.START_TYPE.get(v, v) if k == "ErrorControl": v = self.ERROR_CONTROL.get(v, v) yield dict(Service=service, Key=k, Value=v)
def filter_processes(self): """Filters proc list using phys_proc and pids lists.""" # No filtering required: procs = sorted(self.session.plugins.collect("proc"), key=lambda proc: proc.pid) if not self.filtering_requested: for proc in procs: yield proc else: for offset in self.plugin_args.proc: yield self.profile.proc(vm=self.kernel_address_space, offset=int(offset)) # We need to filter by pids for proc in procs: if int(proc.p_pid) in self.plugin_args.pids: yield proc elif (self.plugin_args.proc_regex and self.plugin_args.proc_regex.match( utils.SmartUnicode(proc.p_comm))): yield proc