def __init__(self, widget, args): # Load configuration self.config_file = os.path.join(self.get_temp_dir(), "pe_tree_volatility.ini") super(VolatilityRuntime, self).__init__(widget, args) # Initialise Volatility3 framework.require_interface_version(1, 0, 0) self.context = contexts.Context() # Input file in URI format single_location = "file:///{}".format(self.args.filename.name) self.context.config[ "automagic.LayerStacker.single_location"] = single_location self.context.config["single_location"] = single_location # Find supported plugins plugins.__path__ = framework.constants.PLUGINS_PATH framework.import_files(plugins, True) self.plugin_list = framework.list_plugins() # Initialise required plugins configuration self._automagic("windows.pslist.PsList") self._automagic("windows.vadinfo.VadInfo") self._automagic("windows.modules.Modules")
def __init__(self, context: interfaces.context.ContextInterface, config_path: str, progress_callback: constants.ProgressCallback = None) -> None: """ Args: context: The context that the plugin will operate within config_path: The path to configuration data within the context configuration data progress_callback: A callable that can provide feedback at progress points """ super().__init__(context, config_path) self._progress_callback = progress_callback or (lambda f, s: None) # Plugins self validate on construction, it makes it more difficult to work with them, but then # the validation doesn't need to be repeated over and over again by externals if self.unsatisfied(context, config_path): vollog.warning("Plugin failed validation") raise exceptions.PluginRequirementException( "The plugin configuration failed to validate") # Populate any optional defaults for requirement in self.get_requirements(): if requirement.name not in self.config: self.config[requirement.name] = requirement.default self._file_handler = FileHandlerInterface # type: Type[FileHandlerInterface] framework.require_interface_version(*self._required_framework_version)
def run(self): """Executes the command line module, taking the system arguments, determining the plugin to run and then running it.""" sys.stdout.write( f"Volshell (Volatility 3 Framework) {constants.PACKAGE_VERSION}\n") framework.require_interface_version(2, 0, 0) parser = argparse.ArgumentParser( prog=self.CLI_NAME, description= "A tool for interactivate forensic analysis of memory images") parser.add_argument("-c", "--config", help="Load the configuration from a json file", default=None, type=str) parser.add_argument( "-e", "--extend", help="Extend the configuration with a new (or changed) setting", default=None, action='append') parser.add_argument( "-p", "--plugin-dirs", help="Semi-colon separated list of paths to find plugins", default="", type=str) parser.add_argument( "-s", "--symbol-dirs", help="Semi-colon separated list of paths to find symbols", default="", type=str) parser.add_argument("-v", "--verbosity", help="Increase output verbosity", default=0, action="count") parser.add_argument( "-o", "--output-dir", help="Directory in which to output any generated files", default=os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..')), type=str) parser.add_argument("-q", "--quiet", help="Remove progress feedback", default=False, action='store_true') parser.add_argument("--log", help="Log output to a file as well as the console", default=None, type=str) parser.add_argument( "-f", "--file", metavar='FILE', default=None, type=str, help= "Shorthand for --single-location=file:// if single-location is not defined" ) parser.add_argument( "--write-config", help="Write configuration JSON file out to config.json", default=False, action='store_true') parser.add_argument("--clear-cache", help="Clears out all short-term cached items", default=False, action='store_true') parser.add_argument( "--cache-path", help= f"Change the default path ({constants.CACHE_PATH}) used to store the cache", default=constants.CACHE_PATH, type=str) # Volshell specific flags os_specific = parser.add_mutually_exclusive_group(required=False) os_specific.add_argument("-w", "--windows", default=False, action="store_true", help="Run a Windows volshell") os_specific.add_argument("-l", "--linux", default=False, action="store_true", help="Run a Linux volshell") os_specific.add_argument("-m", "--mac", default=False, action="store_true", help="Run a Mac volshell") # We have to filter out help, otherwise parse_known_args will trigger the help message before having # processed the plugin choice or had the plugin subparser added. known_args = [ arg for arg in sys.argv if arg != '--help' and arg != '-h' ] partial_args, _ = parser.parse_known_args(known_args) if partial_args.plugin_dirs: volatility3.plugins.__path__ = [ os.path.abspath(p) for p in partial_args.plugin_dirs.split(";") ] + constants.PLUGINS_PATH if partial_args.symbol_dirs: volatility3.symbols.__path__ = [ os.path.abspath(p) for p in partial_args.symbol_dirs.split(";") ] + constants.SYMBOL_BASEPATHS if partial_args.cache_path: constants.CACHE_PATH = partial_args.cache_path vollog.info(f"Volatility plugins path: {volatility3.plugins.__path__}") vollog.info(f"Volatility symbols path: {volatility3.symbols.__path__}") if partial_args.log: file_logger = logging.FileHandler(partial_args.log) file_logger.setLevel(0) file_formatter = logging.Formatter( datefmt='%y-%m-%d %H:%M:%S', fmt='%(asctime)s %(name)-12s %(levelname)-8s %(message)s') file_logger.setFormatter(file_formatter) vollog.addHandler(file_logger) vollog.info("Logging started") if partial_args.verbosity < 3: console.setLevel(30 - (partial_args.verbosity * 10)) else: console.setLevel(10 - (partial_args.verbosity - 2)) if partial_args.clear_cache: framework.clear_cache() # Do the initialization ctx = contexts.Context() # Construct a blank context failures = framework.import_files( volatility3.plugins, True) # Will not log as console's default level is WARNING if failures: parser.epilog = "The following plugins could not be loaded (use -vv to see why): " + \ ", ".join(sorted(failures)) vollog.info(parser.epilog) automagics = automagic.available(ctx) # Initialize the list of plugins in case volshell needs it framework.list_plugins() seen_automagics = set() configurables_list = {} for amagic in automagics: if amagic in seen_automagics: continue seen_automagics.add(amagic) if isinstance(amagic, interfaces.configuration.ConfigurableInterface): self.populate_requirements_argparse(parser, amagic.__class__) configurables_list[amagic.__class__.__name__] = amagic # We don't list plugin arguments, because they can be provided within python volshell_plugin_list = { 'generic': generic.Volshell, 'windows': windows.Volshell } for plugin in volshell_plugin_list: subparser = parser.add_argument_group( title=plugin.capitalize(), description="Configuration options based on {} options".format( plugin.capitalize())) self.populate_requirements_argparse(subparser, volshell_plugin_list[plugin]) configurables_list[plugin] = volshell_plugin_list[plugin] ### # PASS TO UI ### # Hand the plugin requirements over to the CLI (us) and let it construct the config tree # Run the argparser args = parser.parse_args() vollog.log(constants.LOGLEVEL_VVV, f"Cache directory used: {constants.CACHE_PATH}") plugin = generic.Volshell if args.windows: plugin = windows.Volshell if args.linux: plugin = linux.Volshell if args.mac: plugin = mac.Volshell base_config_path = "plugins" plugin_config_path = interfaces.configuration.path_join( base_config_path, plugin.__name__) # Special case the -f argument because people use is so frequently # It has to go here so it can be overridden by single-location if it's defined # NOTE: This will *BREAK* if LayerStacker, or the automagic configuration system, changes at all ### if args.file: try: single_location = self.location_from_file(args.file) ctx.config[ 'automagic.LayerStacker.single_location'] = single_location except ValueError as excp: parser.error(str(excp)) # UI fills in the config, here we load it from the config file and do it before we process the CL parameters if args.config: with open(args.config, "r") as f: json_val = json.load(f) ctx.config.splice( plugin_config_path, interfaces.configuration.HierarchicalDict(json_val)) self.populate_config(ctx, configurables_list, args, plugin_config_path) if args.extend: for extension in args.extend: if '=' not in extension: raise ValueError( "Invalid extension (extensions must be of the format \"conf.path.value='value'\")" ) address, value = extension[:extension.find('=')], json.loads( extension[extension.find('=') + 1:]) ctx.config[address] = value # It should be up to the UI to determine which automagics to run, so this is before BACK TO THE FRAMEWORK automagics = automagic.choose_automagic(automagics, plugin) self.output_dir = args.output_dir ### # BACK TO THE FRAMEWORK ### try: progress_callback = cli.PrintedProgress() if args.quiet: progress_callback = cli.MuteProgress() constructed = plugins.construct_plugin( ctx, automagics, plugin, base_config_path, progress_callback, self.file_handler_class_factory()) if args.write_config: vollog.debug("Writing out configuration data to config.json") with open("config.json", "w") as f: json.dump(dict(constructed.build_configuration()), f, sort_keys=True, indent=2) except exceptions.UnsatisfiedException as excp: self.process_unsatisfied_exceptions(excp) parser.exit( 1, f"Unable to validate the plugin requirements: {[x for x in excp.unsatisfied]}\n" ) try: # Construct and run the plugin if constructed: constructed.run() except exceptions.VolatilityException as excp: self.process_exceptions(excp) parser.exit( 1, f"Unable to validate the plugin requirements: {[x for x in excp.unsatisfied]}\n" )
def __init__(self, *args, **kwargs): framework.require_interface_version(*self._required_framework_version) super().__init__(*args, **kwargs)
class LinuxUtilities(interfaces.configuration.VersionableInterface): """Class with multiple useful linux functions.""" _version = (2, 0, 0) _required_framework_version = (2, 0, 0) framework.require_interface_version(*_required_framework_version) # based on __d_path from the Linux kernel @classmethod def _do_get_path(cls, rdentry, rmnt, dentry, vfsmnt) -> str: ret_path: List[str] = [] while dentry != rdentry or vfsmnt != rmnt: dname = dentry.path() if dname == "": break ret_path.insert(0, dname.strip('/')) if dentry == vfsmnt.get_mnt_root() or dentry == dentry.d_parent: if vfsmnt.get_mnt_parent() == vfsmnt: break dentry = vfsmnt.get_mnt_mountpoint() vfsmnt = vfsmnt.get_mnt_parent() continue parent = dentry.d_parent dentry = parent # if we did not gather any valid dentrys in the path, then the entire file is # either 1) smeared out of memory or 2) de-allocated and corresponding structures overwritten # we return an empty string in this case to avoid confusion with something like a handle to the root # directory (e.g., "/") if not ret_path: return "" ret_val = '/'.join([str(p) for p in ret_path if p != ""]) if ret_val.startswith(("socket:", "pipe:")): if ret_val.find("]") == -1: try: inode = dentry.d_inode ino = inode.i_ino except exceptions.InvalidAddressException: ino = 0 ret_val = ret_val[:-1] + f":[{ino}]" else: ret_val = ret_val.replace("/", "") elif ret_val != "inotify": ret_val = '/' + ret_val return ret_val # method used by 'older' kernels # TODO: lookup when dentry_operations->d_name was merged into the mainline kernel for exact version @classmethod def _get_path_file(cls, task, filp) -> str: rdentry = task.fs.get_root_dentry() rmnt = task.fs.get_root_mnt() dentry = filp.get_dentry() vfsmnt = filp.get_vfsmnt() return LinuxUtilities._do_get_path(rdentry, rmnt, dentry, vfsmnt) @classmethod def _get_new_sock_pipe_path(cls, context, task, filp) -> str: dentry = filp.get_dentry() sym_addr = dentry.d_op.d_dname symbol_table_arr = sym_addr.vol.type_name.split("!") symbol_table = None if len(symbol_table_arr) == 2: symbol_table = symbol_table_arr[0] for module_name in context.modules.get_modules_by_symbol_tables( symbol_table): kernel_module = context.modules[module_name] break else: raise ValueError( f"No module using the symbol table {symbol_table}") symbs = list(kernel_module.get_symbols_by_absolute_location(sym_addr)) if len(symbs) == 1: sym = symbs[0].split(constants.BANG)[1] if sym == "sockfs_dname": pre_name = "socket" elif sym == "anon_inodefs_dname": pre_name = "anon_inode" elif sym == "pipefs_dname": pre_name = "pipe" elif sym == "simple_dname": pre_name = cls._get_path_file(task, filp) else: pre_name = f"<unsupported d_op symbol: {sym}>" ret = f"{pre_name}:[{dentry.d_inode.i_ino:d}]" else: ret = f"<invalid d_dname pointer> {sym_addr:x}" return ret # a 'file' structure doesn't have enough information to properly restore its full path # we need the root mount information from task_struct to determine this @classmethod def path_for_file(cls, context, task, filp) -> str: try: dentry = filp.get_dentry() except exceptions.InvalidAddressException: return "" if dentry == 0: return "" dname_is_valid = False # TODO COMPARE THIS IN LSOF OUTPUT TO VOL2 try: if dentry.d_op and dentry.d_op.has_member( "d_dname") and dentry.d_op.d_dname: dname_is_valid = True except exceptions.InvalidAddressException: dname_is_valid = False if dname_is_valid: ret = LinuxUtilities._get_new_sock_pipe_path(context, task, filp) else: ret = LinuxUtilities._get_path_file(task, filp) return ret @classmethod def files_descriptors_for_process( cls, context: interfaces.context.ContextInterface, symbol_table: str, task: interfaces.objects.ObjectInterface): fd_table = task.files.get_fds() if fd_table == 0: return max_fds = task.files.get_max_fds() # corruption check if max_fds > 500000: return file_type = symbol_table + constants.BANG + 'file' fds = objects.utility.array_of_pointers(fd_table, count=max_fds, subtype=file_type, context=context) for (fd_num, filp) in enumerate(fds): if filp != 0: full_path = LinuxUtilities.path_for_file(context, task, filp) yield fd_num, filp, full_path @classmethod def mask_mods_list( cls, context: interfaces.context.ContextInterface, layer_name: str, mods: Iterator[interfaces.objects.ObjectInterface] ) -> List[Tuple[str, int, int]]: """ A helper function to mask the starting and end address of kernel modules """ mask = context.layers[layer_name].address_mask return [ (utility.array_to_string(mod.name), mod.get_module_base() & mask, (mod.get_module_base() & mask) + mod.get_core_size()) for mod in mods ] @classmethod def generate_kernel_handler_info( cls, context: interfaces.context.ContextInterface, kernel_module_name: str, mods_list: Iterator[interfaces.objects.ObjectInterface] ) -> List[Tuple[str, int, int]]: """ A helper function that gets the beginning and end address of the kernel module """ kernel = context.modules[kernel_module_name] mask = context.layers[kernel.layer_name].address_mask start_addr = kernel.object_from_symbol("_text") start_addr = start_addr.vol.offset & mask end_addr = kernel.object_from_symbol("_etext") end_addr = end_addr.vol.offset & mask return [(constants.linux.KERNEL_NAME, start_addr, end_addr)] + \ LinuxUtilities.mask_mods_list(context, kernel.layer_name, mods_list) @classmethod def lookup_module_address( cls, kernel_module: interfaces.context.ModuleInterface, handlers: List[Tuple[str, int, int]], target_address: int): """ Searches between the start and end address of the kernel module using target_address. Returns the module and symbol name of the address provided. """ mod_name = "UNKNOWN" symbol_name = "N/A" for name, start, end in handlers: if start <= target_address <= end: mod_name = name if name == constants.linux.KERNEL_NAME: symbols = list( kernel_module.get_symbols_by_absolute_location( target_address)) if len(symbols): symbol_name = symbols[0].split(constants.BANG)[1] if constants.BANG in symbols[0] else \ symbols[0] break return mod_name, symbol_name @classmethod def walk_internal_list(cls, vmlinux, struct_name, list_member, list_start): while list_start: list_struct = vmlinux.object(object_type=struct_name, offset=list_start.vol.offset) yield list_struct list_start = getattr(list_struct, list_member)