def collect(self): plugin_name = self.plugin_args.plugin_name if isinstance(plugin_name, session.PluginRunner): plugin_name = self.plugin_args.plugin_name.plugin_name plugin_cls = self.session.plugins.GetPluginClass(plugin_name) if not plugin_cls: raise plugin.PluginError("Please specify a value plugin.") instance = plugin_cls(session=self.session, ignore_required=True) table_header = getattr(instance, "table_header", None) if not table_header: raise plugin.PluginError( "Plugin %s is not a Typed Plugin. It can not be used in " "searches." % plugin_name) column_types = instance.column_types() for i, column in enumerate(table_header): column_name = column.get("cname", column.get("name")) if isinstance(column_types, dict): column_type_instance = column_types.get(column_name) else: column_type_instance = column_types[i] yield dict( Field=column_name, Type=self._determine_type_name(column_type_instance), ) for x in self.collect_members(column_type_instance, 1): yield x
def render(self, renderer): """Renders the file to disk""" if self.output_image is None: raise plugin.PluginError("Please provide an output-image filename") if (os.path.exists(self.output_image) and os.path.getsize(self.output_image) > 1): raise plugin.PluginError("Refusing to overwrite an existing file, " "please remove it before continuing") blocksize = 1024 * 1024 * 5 with open(self.output_image, "wb") as fd: for _ in self.address_space.get_available_addresses(): range_offset, phys_range_offset, range_length = _ renderer.format("Range {0:#x} - {1:#x}\n", range_offset, range_length) range_end = range_offset + range_length for offset in xrange(range_offset, range_end, blocksize): to_read = min(blocksize, range_end - offset) data = self.address_space.read(offset, to_read) fd.seek(offset) fd.write(data) renderer.RenderProgress("Writing offset %s" % self.human_readable(offset))
def __init__(self, *args, **kwargs): super(ArtifactsCollector, self).__init__(*args, **kwargs) self.artifact_profile = self.session.LoadProfile("artifacts") extra_definitions = [ open(x).read() for x in self.plugin_args.artifact_files ] extra_definitions.extend(self.plugin_args.definitions or []) # Make a copy of the artifact registry. if extra_definitions: self.artifact_profile = self.artifact_profile.copy() for definition in extra_definitions: for definition_data in yaml.safe_load_all(definition): self.artifact_profile.AddDefinition(definition_data) self.seen = set() self.supported_os = self.get_supported_os(self.session) if self.supported_os is None: raise plugin.PluginError( "Unable to determine running environment.") # Make sure the args make sense. if self.plugin_args.output_path is None: if self.plugin_args.copy_files: raise plugin.PluginError( "Can only copy files when an output file is specified.") if self.plugin_args.create_timeline: raise plugin.PluginError( "Can only create timelines when an output file " "is specified.")
def check_dump_dir(self, dump_dir=None): # If the dump_dir parameter is not optional insist its there. if not self.dump_dir_optional and not dump_dir: raise plugin.PluginError("Please specify a dump directory.") if dump_dir and not os.path.isdir(dump_dir): raise plugin.PluginError("%s is not a directory" % self.dump_dir)
def render(self, renderer): """Renders the file to disk""" if self.output_image is None: raise plugin.PluginError("Please provide an output-image filename") if (os.path.exists(self.output_image) and os.path.getsize(self.output_image) > 1): raise plugin.PluginError("Refusing to overwrite an existing file, " "please remove it before continuing") blocksize = 1024 * 1024 * 5 with renderer.open(filename=self.output_image, mode="wb") as fd: for run in self.address_space.get_mappings(): renderer.format("Range {0:#x} - {1:#x}\n", run.start, run.length) for offset in utils.xrange( run.start, run.end, blocksize): to_read = min(blocksize, run.end - offset) data = self.address_space.read(offset, to_read) fd.seek(offset) fd.write(data) renderer.RenderProgress( "Writing offset %s" % self.human_readable(offset))
def collect(self): if self.guid is None: # Try to detect the GUID automatically. module = self.session.address_resolver.GetModuleByName( self.module_name) if not module: raise plugin.PluginError("Unknown module %s." % self.module_name) profile_name = module.detect_profile_name() if not profile_name: raise plugin.PluginError( "Unable to determine GUID for module %s." % self.module_name) else: profile_name = "%s/GUID/%s" % (self.module_name, self.guid) profile = self.session.LoadProfile(profile_name) if profile == None: # Try to build it from the symbol serv profile = module.build_local_profile(profile_name, force=True) if profile == None: raise plugin.PluginError("Unable to fetch or build %s" % profile_name) if profile: module.profile = profile return []
def make_flow_object(self): flow_cls = flow.Flow.ImplementationByClass(self.plugin_args.flow) if not flow_cls: raise plugin.PluginError("Unknown flow %s" % self.plugin_args.flow) args = self.plugin_args.args if isinstance(args, basestring): try: args = json.loads(args) except Exception as e: raise plugin.PluginError( "args should be a JSON string of a dict: %s" % e) if not isinstance(args, dict): raise plugin.PluginError("args should be a dict") flow_obj = flow_cls.from_primitive(args, session=self.session) flow_obj.client_id = self.client_id flow_obj.queue = self.plugin_args.queue flow_obj.session.live = self.plugin_args.live # If a canned condition was specified automatically add it. if self.plugin_args.canned_condition: flow_obj.condition = CANNED_CONDITIONS[ self.plugin_args.canned_condition] elif self.plugin_args.condition: flow_obj.condition = self.plugin_args.condition # Specify flow quota. flow_obj.quota.user_time = self.plugin_args.quota return flow_obj
def run(self, flow_obj=None): # Make sure to notify the flow status about the collection we are about # to create. flow_obj.status.collection_ids.append(self.collection.id) plugin_renderer = PluginRenderer(session=self._session, collection=self.collection, flow_obj=flow_obj) with self.collection.start(): with plugin_renderer: # Find the plugin we need to call. plugin_cls = plugin.Command.ImplementationByClass(self.plugin) if plugin_cls == None: raise plugin.PluginError("Unknown plugin") plugin_obj = plugin_cls(session=self._session, **self.args) if plugin_obj == None: raise plugin.PluginError("Plugin not active") # Sometimes we dont know all the columns until we actually run # the plugin (For example the Search plugin creates columns # dynamically). It is always safer to run the plugin through the # renderer. self._session.logging.info("Running plugin %s", plugin_cls) now = time.time() plugin_obj.render(plugin_renderer) self._session.logging.info("Completed in %s seconds", time.time() - now)
def __init__(self, signature=None, scan_kernel=False, scan_physical=False, **kwargs): """Scan using custom signatures.""" super(WinSigScan, self).__init__(**kwargs) # If nothing is specified just scan the physical address space. if not self.filtering_requested and not scan_kernel: scan_physical = True if not signature: raise plugin.PluginError("No signature given.") sig_re = re.compile("^[0-9A-F*]*$") if isinstance(signature, basestring): signature = [signature] self.signatures = [] for sig in signature: sig = sig.upper() if not sig_re.match(sig): raise plugin.PluginError( "Signature %s has invalid format." % sig) parts = sig.split("*") decoded_parts = [] for p in parts: try: decoded_parts.append(p.decode("hex")) except TypeError: raise plugin.PluginError( "Signature %s has invalid format." % sig) self.signatures.append(decoded_parts) self.scan_physical = scan_physical self.scan_kernel = scan_kernel
def collect(self): # Needs to be called if using com from a thread. pythoncom.CoInitialize() wmi_obj = win32com.client.GetObject(self.plugin_args.baseobj) # This allows our WMI to do some extra things, in particular # it gives it access to find the executable path for all processes. wmi_obj.Security_.Privileges.AddAsString("SeDebugPrivilege") # Run query try: query_results = wmi_obj.ExecQuery(self.plugin_args.query) except pythoncom.com_error as e: raise plugin.PluginError( "Failed to run WMI query \'%s\' err was %s" % (self.plugin_args.query, e)) # Extract results from the returned COMObject and return dicts. try: for result in query_results: yield dict(Result=WmiResult(result)) except pythoncom.com_error as e: raise plugin.PluginError( "WMI query data error on query \'%s\' err was %s" % (e, self.plugin_args.query))
def load_driver(self): """Load the driver if possible.""" # Check the driver is somewhere accessible. if self.driver is None: # Valid values # http://superuser.com/questions/305901/possible-values-of-processor-architecture machine = platform.machine() if machine == "AMD64": driver = "winpmem_x64.sys" elif machine == "x86": driver = "winpmem_x86.sys" else: raise plugin.PluginError("Unsupported architecture") self.driver = rekall.get_resource("WinPmem/%s" % driver) # Try the local directory if self.driver is None: self.driver = os.path.join(os.getcwd(), "WinPmem", driver) self.session.logging.debug("Loading driver from %s", self.driver) if not os.access(self.driver, os.R_OK): raise plugin.PluginError("Driver file %s is not accessible." % self.driver) # Must have absolute path here. self.hScm = win32service.OpenSCManager( None, None, win32service.SC_MANAGER_CREATE_SERVICE) try: self.hSvc = win32service.CreateService( self.hScm, self.name, self.name, win32service.SERVICE_ALL_ACCESS, win32service.SERVICE_KERNEL_DRIVER, win32service.SERVICE_DEMAND_START, win32service.SERVICE_ERROR_IGNORE, self.driver, None, 0, None, None, None) self.session.logging.debug("Created service %s", self.name) # Remember to cleanup afterwards. self.we_started_service = True except win32service.error as e: # Service is already there, try to open it instead. self.hSvc = win32service.OpenService( self.hScm, self.name, win32service.SERVICE_ALL_ACCESS) # Make sure the service is stopped. try: win32service.ControlService(self.hSvc, win32service.SERVICE_CONTROL_STOP) except win32service.error: pass try: win32service.StartService(self.hSvc, []) except win32service.error, e: self.session.logging.debug("%s: will try to continue", e)
def GetVirtualAddressSpace(self, dtb=None): """Load the Kernel Virtual Address Space. Note that this function is usually not used since the Virtual AS is now loaded from guess_profile.ApplyFindDTB() when profiles are guessed. This function is only used when the profile is directly provided by the user. """ if dtb is None: dtb = self.session.GetParameter("dtb") if not self.session.physical_address_space: self.GetPhysicalAddressSpace() if not self.session.physical_address_space: raise plugin.PluginError("Unable to find physical address space.") self.profile = self.session.profile if self.profile == None: raise plugin.PluginError( "Must specify a profile to load virtual AS.") # If we know the DTB, just build the address space. # Otherwise, delegate to a find_dtb plugin. find_dtb = self.session.plugins.find_dtb() if dtb: self.session.kernel_address_space = find_dtb.CreateAS(dtb) else: logging.debug("DTB not specified. Delegating to find_dtb.") for address_space in find_dtb.address_space_hits(): self.session.kernel_address_space = address_space self.session.SetParameter("dtb", address_space.dtb) break if self.session.kernel_address_space is None: logging.info( "A DTB value was found but failed to verify. " "Some troubleshooting steps to consider: " "(1) Is the profile correct? (2) Is the KASLR correct? " "Try running the find_kaslr plugin on systems that " "use KASLR and see if there are more possible values. " "You can specify which offset to use using " "--vm_kernel_slide. (3) If you know the DTB, for " "example from knowing the value of the CR3 register " "at time of acquisition, you can set it using --dtb. " "On most 64-bit systems, you can use the DTB of any " "process, not just the kernel!") raise plugin.PluginError( "A DTB value was found but failed to verify. " "See logging messages for more information.") # Set the default address space for plugins like disassemble and dump. if not self.session.GetParameter("default_address_space"): self.session.SetParameter( "default_address_space", self.session.kernel_address_space) return self.session.kernel_address_space
def __init__(self, target=None, profile=None, **kwargs): """Prints an object to the screen.""" super(DT, self).__init__(**kwargs) self.profile = profile or self.session.profile self.target = target if target is None: raise plugin.PluginError("You must specify something to print.") if not isinstance(target, str): raise plugin.PluginError("Target must be a string.")
def render(self, renderer): # Do we have a query? if not self.query: return self.render_error(renderer) # Get the data we're rendering. try: rows = self.collect() or [] except errors.EfilterError as error: self.query_error = error return self.render_error(renderer) # For queries which name a list of columns we need to get the first row # to know which columns will be output. Surely efilter can provide this # from the AST? This seems like a hack because if the first row the # plugin produces does not include all the columns we will miss them. # If is also buggy because if the plugin does not produce any rows we # can not know if the query is correct or not. For example "select XXXX # from plugin()" can not raise an unknown column XXXX if the plugin does # not produce at least one row. remaining_rows = iter(rows) try: first_row = next(remaining_rows) except StopIteration: renderer.format("No results.") return except errors.EfilterKeyError as e: raise plugin.PluginError( "Column %s not found. " "Use the describe plugin to list all available " "columns. (%s)" % (e.key, e)) except errors.EfilterError as e: raise plugin.PluginError("EFilter Error: %s:" % e) all_rows = itertools.chain((first_row,), remaining_rows) # If we have some output but don't know what it is we can try to use # dict keys as columns. if isinstance(first_row, (dict, row_tuple.RowTuple)): columns = [dict(name=x) for x in structured.getmembers(first_row)] renderer.table_header(columns, auto_widths=True) return self._render_plugin_output(renderer, columns, all_rows) # Sigh. Give up, and render whatever you got, I guess. renderer.table_header([dict(name="result")]) return self._render_whatever_i_guess(renderer, all_rows)
def check_quota(state, cpu_quota, load_quota): """A progress callback which checks quota is not exceeded.""" now = time.time() # In order to not overwhelm psutil we throttle calls to once every # few ms. if now + 0.5 > state["last"]: state["last"] = now start_time = state["start_time"] proc = state["proc"] cpu_times = proc.cpu_times() current = cpu_times.user + cpu_times.system if cpu_quota and current > start_time + cpu_quota: # CPU quota exceeded. raise plugin.PluginError("CPU Quota exceeded (%s Seconds)." % (current - start_time)) if load_quota: while 1: current_cpu_percent = proc.cpu_percent() * 100 # If our current CPU utilization exceeds the specified # limits we sleep a bit. if current_cpu_percent < load_quota: break time.sleep(0.1)
def graph_clients(self, collection): """Draw a graph of client engagement.""" # This is optionally dependent on presence of matplotlib. try: from matplotlib import pyplot except ImportError: raise plugin.PluginError( "You must have matplotlib installed to plot graphs.") total_clients = 0 base = None data_x = [] data_y = [] for row in collection.query(order_by="executed"): total_clients += 1 if base is None: base = row["executed"] data_x.append(row["executed"] - base) data_y.append(total_clients) fig = pyplot.figure() ax = fig.add_subplot(111) ax.plot(data_x, data_y) start_time = arrow.Arrow.fromtimestamp(base) ax.set_title("Clients in Hunt %s" % self.plugin_args.flow_id) ax.set_xlabel("Seconds after %s (%s)" % ( start_time.ctime(), start_time.humanize())) ax.set_ylabel("Total Client Count") pyplot.show()
def render(self, renderer_obj): renderer_obj.format("Starting Manuskript web console.\n") renderer_obj.format( "Press Ctrl-c to return to the interactive shell.\n") # Handle the special file association .rkl if (not os.path.isdir(self.worksheet_path) and self.worksheet_path.endswith(".rkl")): self.worksheet_path = os.path.dirname(self.worksheet_path) if os.path.isdir(self.worksheet_path): # Change path to the worksheet_path to ensure relative filenames # work. cwd = os.getcwd() try: os.chdir(self.worksheet_path) self.worksheet_fd = WebConsoleDocument(self.worksheet_path, session=self.session) if self.export is not None: return self.Export(self.export) return self._serve_wsgi() finally: os.chdir(cwd) raise plugin.PluginError("Worksheet path must be a directory.")
def __init__(self, *args, **kwargs): super(ArtifactsCollector, self).__init__(*args, **kwargs) self.artifact_profile = self.session.LoadProfile("artifacts") # Make a copy of the artifact registry. if self.plugin_args.definitions: self.artifact_profile = self.artifact_profile.copy() for definition in self.plugin_args.definitions: for definition_data in yaml.safe_load_all(definition): self.artifact_profile.AddDefinition(definition_data) self.seen = set() # Determine which context we are running in. If we are running in live # mode, we use the platform to determine the supported OS, otherwise we # determine it from the profile. if self.session.GetParameter("live"): self.supported_os = platform.system() elif self.session.profile.metadata("os") == "linux": self.supported_os = "Linux" elif self.session.profile.metadata("os") == "windows": self.supported_os = "Windows" elif self.session.profile.metadata("os") == "darwin": self.supported_os = "Darwin" else: raise plugin.PluginError( "Unable to determine running environment.")
def collect(self): dir_path = self.plugin_args.dir_path partition = self.session.GetParameter("partition_context") try: for entry in partition.filesystem.get_fs_entry_by_path(dir_path): yield dict( name=entry.name, inode=entry.tsk_file.info.meta.addr, type=entry.type, size=entry.size, ctime=basic.UnixTimeStamp( session=self.session, name="ctime", value=entry.tsk_file.info.meta.ctime), mtime=basic.UnixTimeStamp( session=self.session, name="mtime", value=entry.tsk_file.info.meta.mtime), atime=basic.UnixTimeStamp( session=self.session, name="atime", value=entry.tsk_file.info.meta.atime), ) except IOError as e: raise plugin.PluginError(e)
def _get_aff4_volume(self, resolver, output_urn, action="Writing"): urn_parts = output_urn.Parse() if urn_parts.scheme == "file": if urn_parts.path.endswith("/"): self.session.logging.info( "%s a directory volume on %s", action, output_urn) return aff4_directory.AFF4Directory.NewAFF4Directory( resolver, output_urn) self.session.logging.info( "%s a ZipFile volume on %s", action, output_urn) return zip.ZipFile.NewZipFile(resolver, output_urn) elif urn_parts.scheme == "gs" and aff4_cloud: self.session.logging.info( "%s a cloud volume on %s", action, output_urn) return aff4_cloud.AFF4GStore.NewAFF4GStore( resolver, output_urn) else: raise plugin.PluginError( "URL Scheme: %s not supported for destination: %s" %( urn_parts.scheme, output_urn))
def __init__(self, *args, **kwargs): super(AFF4Acquire, self).__init__(*args, **kwargs) if (not self.plugin_args.destination and not self.plugin_args.destination_url): raise plugin.PluginError( "A destination or destination_url must be specified.") if self.plugin_args.compression == "snappy": self.compression = lexicon.AFF4_IMAGE_COMPRESSION_SNAPPY elif self.plugin_args.compression == "stored": self.compression = lexicon.AFF4_IMAGE_COMPRESSION_STORED elif self.plugin_args.compression == "zlib": self.compression = lexicon.AFF4_IMAGE_COMPRESSION_ZLIB else: raise plugin.InvalidArgs("Unsupported compression %s " % self.plugin_args.compression) # Do not acquire memory if we are told to do something else as well, # unless specifically asked to. if self.plugin_args.also_memory == "auto": if any((self.plugin_args.also_mapped_files, self.plugin_args.also_pagefile, self.plugin_args.files)): self.plugin_args.also_memory = False else: self.plugin_args.also_memory = True
def AddShimProfiles(profile): profile = profile.copy() # Windows 7 uses this constant to store the shimcache. if profile.get_constant("g_ShimCache"): if profile.metadata("arch") == "AMD64": profile.add_overlay(shimcache_win7_x64) else: profile.add_overlay(shimcache_win7_x86) # Windows XP: elif 5 < profile.metadata("version") < 6: if profile.metadata("arch") == "I386": profile.add_overlay(shimcache_xp_x86) # Windows 8 uses a special driver to hold the cache. elif profile.get_constant("AhcCacheHandle"): if profile.metadata("arch") == "AMD64": profile.add_overlay(shimcache_win8_x64) else: profile.add_overlay(shimcache_win8_x86) # Windows 10 uses a special driver to hold the cache. elif profile.session.address_resolver.get_address_by_name("ahcache"): if profile.metadata("arch") == "AMD64": profile.add_overlay(shimcache_win10_x64) else: raise plugin.PluginError("Unable to identify windows version.") return profile
def __init__(self, string=None, scan_physical=False, yara_file=None, yara_expression=None, **kwargs): """Scan using yara signatures. Args: string: A verbatim string to search for. we scan their entire address spaces. scan_physical: If true we scan the physical address space. yara_file: The yara file to read. yara_expression: If provided we scan for this yarra expression. """ super(YaraScan, self).__init__(**kwargs) if yara_expression: self.rules_source = yara_expression self.rules = yara.compile(source=self.rules_source) elif string: self.rules_source = ('rule r1 {strings: $a = "%s" condition: $a}' % string) self.rules = yara.compile(source=self.rules_source) elif yara_file: self.rules = yara.compile(yara_file) else: raise plugin.PluginError("You must specify a yara rule file or " "string to match.") self.scan_physical = scan_physical
def __init__(self, *args, **kwargs): """Scan using yara signatures.""" super(YaraScanMixin, self).__init__(*args, **kwargs) # Compile the yara rules in advance. if self.plugin_args.yara_expression: self.rules_source = self.plugin_args.yara_expression self.rules = yara.compile(source=self.rules_source) elif self.plugin_args.binary_string: self.compile_rule( 'rule r1 {strings: $a = {%s} condition: $a}' % self.plugin_args.binary_string) elif self.plugin_args.string: self.compile_rule( 'rule r1 {strings: $a = "%s" condition: $a}' % self.plugin_args.string) elif self.plugin_args.yara_file: self.compile_rule(open(self.plugin_args.yara_file).read()) elif not self.ignore_required: raise plugin.PluginError("You must specify a yara rule file or " "string to match.")
def __init__(self, output=None, **kwargs): super(DirectoryBasedWriter, self).__init__(**kwargs) self.dump_dir = output # Check if the directory already exists. if not os.path.isdir(self.dump_dir): raise plugin.PluginError("%s is not a directory" % self.dump_dir)
def compile_rule(self, rule): self.rules_source = rule try: self.rules = yara.compile(source=rule) except Exception as e: raise plugin.PluginError( "Failed to compile yara expression: %s" % e)
def load_driver(self): """Unpack and load the driver.""" tarfile_handle = tarfile.open(self.plugin_args.driver_path) # Try to extract the resource into a tempdir. with utils.TempDirectory() as tmp_name: self.session.logging.info("Unpacking driver to %s", tmp_name) tarfile_handle.extractall(tmp_name) # Change ownership of the extracted files to make sure they are # owned by root otherwise they will not load. for root, files, dirs in os.walk(tmp_name): for f in files: os.chown(os.path.join(root, f), 0, 0) for d in dirs: os.chown(os.path.join(root, d), 0, 0) for member_name in tarfile_handle.getnames(): if member_name.endswith(".kext"): self.member_name = member_name.lstrip("/") full_driver_path = os.path.join(tmp_name, self.member_name) self.session.logging.info("Loading driver from %s", full_driver_path) res = subprocess.check_call(["kextload", full_driver_path]) if res != 0: raise plugin.PluginError( "Failed to load driver. Are you root?")
def __init__(self, string=None, scan_physical=False, yara_file=None, yara_expression=None, binary_string=None, hits=10, **kwargs): """Scan using yara signatures.""" super(YaraScanMixin, self).__init__(**kwargs) self.hits = hits if yara_expression: self.rules_source = yara_expression self.rules = yara.compile(source=self.rules_source) elif binary_string: self.compile_rule( 'rule r1 {strings: $a = {%s} condition: $a}' % binary_string ) elif string: self.compile_rule( 'rule r1 {strings: $a = "%s" condition: $a}' % string ) elif yara_file: self.compile_rule(open(yara_file).read()) else: raise plugin.PluginError("You must specify a yara rule file or " "string to match.") self.scan_physical = scan_physical
def __init__(self, *args, **kwargs): super(PteEnumerator, self).__init__(*args, **kwargs) self._init_masks() self._init_enums() self.mmpfn_db = self.profile.get_constant_object("MmPfnDatabase") if self.session.profile.metadata("arch") == 'AMD64': self.get_all_pages_method = self.get_all_pages self.get_exec_pages_method = self.get_executable_pages self.proto_pointer_identifier = 0xffffffff0000 elif self.session.profile.metadata("arch") == 'I386': self.get_all_pages_method = self.get_all_pages_x86 self.get_exec_pages_method = self.get_executable_pages_x86 self.proto_pointer_identifier = 0xffffffff else: raise plugin.PluginError("Unsupported architecture") self.PAGE_SIZE = self.session.kernel_address_space.PAGE_SIZE self.PAGE_BITS = self.PAGE_SIZE.bit_length() - 1 self.PAGE_BITS_MASK = self.PAGE_SIZE - 1 # The empty page test uses this a lot, so we keep it once self.ALL_ZERO_PAGE = b"\x00" * self.PAGE_SIZE # The following pages will probably not occur that much, # and we don't want to keep a gigabyte of zeroes in memory # TODO make dynamic self.LARGE_PAGE_SIZE = 0x200000 self.LARGE_PAGE_BITS = self.LARGE_PAGE_SIZE.bit_length() - 1 self.LARGE_ARM_PAGE_SIZE = self.LARGE_PAGE_SIZE * 2 self.LARGE_ARM_PAGE_BITS = self.LARGE_ARM_PAGE_SIZE.bit_length() - 1 self.HUGE_PAGE_SIZE = 0x40000000 self.HUGE_PAGE_BITS = self.HUGE_PAGE_SIZE.bit_length() - 1
def render(self, renderer): if self.dump_dir is None: raise plugin.PluginError("Dump directory not specified.") for task in self.filter_processes(): filename = u"{0}_{1:d}.dmp".format(task.comm, task.pid) renderer.write(u"Writing {0} {1:6x} to {2}\n".format( task.comm, task, filename)) with renderer.open(directory=self.dump_dir, filename=filename, mode='wb') as fd: maps = self.dump_process(task, fd) # Make an index file. with renderer.open(directory=self.dump_dir, filename=filename + ".idx", mode='wb') as fd: temp_renderer = text.TextRenderer(session=self.session, fd=fd, mode="wb") with temp_renderer.start(): temp_renderer.table_header([ ("File Address", "file_addr", "[addrpad]"), ("Length", "length", "[addrpad]"), ("Virtual Addr", "virtual", "[addrpad]")]) self.write_index(temp_renderer, maps)