class RunAuxiliary(object): """Auxiliary modules manager.""" def __init__(self, task, machine): self.task = task self.machine = machine self.cfg = Config("auxiliary") self.enabled = [] def start(self): for module in list_plugins(group="auxiliary"): try: current = module() except: log.exception("Failed to load the auxiliary module " "\"{0}\":".format(module)) return module_name = inspect.getmodule(current).__name__ if "." in module_name: module_name = module_name.rsplit(".", 1)[1] try: options = self.cfg.get(module_name) except DetectorOperationalError: log.debug( "Auxiliary module %s not found in " "configuration file", module_name) continue if not options.enabled: continue current.set_task(self.task) current.set_machine(self.machine) current.set_options(options) try: current.start() except NotImplementedError: pass except Exception as e: log.warning("Unable to start auxiliary module %s: %s", module_name, e) else: log.debug("Started auxiliary module: %s", current.__class__.__name__) self.enabled.append(current) def stop(self): for module in self.enabled: try: module.stop() except NotImplementedError: pass except Exception as e: log.warning("Unable to stop auxiliary module: %s", e) else: log.debug("Stopped auxiliary module: %s", module.__class__.__name__)
class RunReporting(object): """Reporting Engine. This class handles the loading and execution of the enabled reporting modules. It receives the analysis results dictionary from the Processing Engine and pass it over to the reporting modules before executing them. """ def __init__(self, task, results): """@param analysis_path: analysis folder path.""" self.task = task self.results = results self.analysis_path = os.path.join(DETECTOR_ROOT, "storage", "analyses", str(task["id"])) self.cfg = Config("reporting") self.task["options"] = parse_options(self.task["options"]) def process(self, module): """Run a single reporting module. @param module: reporting module. @param results: results results from analysis. """ # Initialize current reporting module. try: current = module() except: log.exception( "Failed to load the reporting module \"{0}\":".format(module)) return # Extract the module name. module_name = inspect.getmodule(current).__name__ if "." in module_name: module_name = module_name.rsplit(".", 1)[1] try: options = self.cfg.get(module_name) except DetectorOperationalError: log.debug("Reporting module %s not found in configuration file", module_name) return # If the reporting module is disabled in the config, skip it. if not options.enabled: return # Give it the path to the analysis results folder. current.set_path(self.analysis_path) # Give it the analysis task object. current.set_task(self.task) # Give it the the relevant reporting.conf section. current.set_options(options) # Load the content of the analysis.conf file. current.cfg = Config(cfg=current.conf_path) try: current.run(self.results) log.debug("Executed reporting module \"%s\"", current.__class__.__name__) except DetectorDependencyError as e: log.warning( "The reporting module \"%s\" has missing dependencies: %s", current.__class__.__name__, e) except DetectorReportError as e: log.warning( "The reporting module \"%s\" returned the following error: %s", current.__class__.__name__, e) except: log.exception("Failed to run the reporting module \"%s\":", current.__class__.__name__) def run(self): """Generates all reports. @raise DetectorReportError: if a report module fails. """ # In every reporting module you can specify a numeric value that # represents at which position that module should be executed among # all the available ones. It can be used in the case where a # module requires another one to be already executed beforehand. reporting_list = list_plugins(group="reporting") # Return if no reporting modules are loaded. if reporting_list: reporting_list.sort(key=lambda module: module.order) # Run every loaded reporting module. for module in reporting_list: self.process(module) else: log.info("No reporting modules loaded")
class RunProcessing(object): """Analysis Results Processing Engine. This class handles the loading and execution of the processing modules. It executes the enabled ones sequentially and generates a dictionary which is then passed over the reporting engine. """ def __init__(self, task): """@param task: task dictionary of the analysis to process.""" self.task = task self.analysis_path = os.path.join(DETECTOR_ROOT, "storage", "analyses", str(task["id"])) self.baseline_path = os.path.join(DETECTOR_ROOT, "storage", "baseline") self.cfg = Config("processing") def process(self, module, results): """Run a processing module. @param module: processing module to run. @param results: results dict. @return: results generated by module. """ # Initialize the specified processing module. try: current = module() except: log.exception("Failed to load the processing module " "\"{0}\":".format(module)) return None, None # Extract the module name. module_name = inspect.getmodule(current).__name__ if "." in module_name: module_name = module_name.rsplit(".", 1)[1] try: options = self.cfg.get(module_name) except DetectorOperationalError: log.debug("Processing module %s not found in configuration file", module_name) return None, None # If the processing module is disabled in the config, skip it. if not options.enabled: return None, None # Give it the path to the baseline directory. current.set_baseline(self.baseline_path) # Give it the path to the analysis results. current.set_path(self.analysis_path) # Give it the analysis task object. current.set_task(self.task) # Give it the options from the relevant processing.conf section. current.set_options(options) # Give the results that we have obtained so far. current.set_results(results) try: # Run the processing module and retrieve the generated data to be # appended to the general results container. data = current.run() log.debug( "Executed processing module \"%s\" on analysis at " "\"%s\"", current.__class__.__name__, self.analysis_path) # If succeeded, return they module's key name and the data. return current.key, data except DetectorDependencyError as e: log.warning( "The processing module \"%s\" has missing dependencies: %s", current.__class__.__name__, e) except DetectorProcessingError as e: log.warning( "The processing module \"%s\" returned the following " "error: %s", current.__class__.__name__, e) except: log.exception( "Failed to run the processing module \"%s\" for task #%d:", current.__class__.__name__, self.task["id"]) return None, None def run(self): """Run all processing modules and all signatures. @return: processing results. """ # This is the results container. It's what will be used by all the # reporting modules to make it consumable by humans and machines. # It will contain all the results generated by every processing # module available. Its structure can be observed through the JSON # dump in the analysis' reports folder. (If jsondump is enabled.) # We friendly call this "fat dict". results = { "_temp": {}, } # Order modules using the user-defined sequence number. # If none is specified for the modules, they are selected in # alphabetical order. processing_list = list_plugins(group="processing") # If no modules are loaded, return an empty dictionary. if processing_list: processing_list.sort(key=lambda module: module.order) # Run every loaded processing module. for module in processing_list: key, result = self.process(module, results) # If the module provided results, append it to the fat dict. if key and result: results[key] = result else: log.info("No processing modules loaded") results.pop("_temp", None) # Return the fat dict. return results
class VolatilityManager(object): """Handle several volatility results.""" PLUGINS = [ "pslist", "psxview", "callbacks", ["idt", "x86"], "ssdt", ["gdt", "x86"], "timers", "messagehooks", "getsids", "privs", "malfind", "apihooks", "dlllist", "handles", "ldrmodules", "mutantscan", "devicetree", "svcscan", "modscan", "yarascan", ["sockscan", "winxp"], ["netscan", "vista", "win7"], ] def __init__(self, memfile, osprofile=None): self.mask_pid = [] self.taint_pid = set() self.memfile = memfile conf_path = os.path.join(DETECTOR_ROOT, "conf", "memory.conf") if not os.path.exists(conf_path): log.error("Configuration file volatility.conf not found".format( conf_path)) self.voptions = False return self.voptions = Config("memory") for pid in self.voptions.mask.pid_generic.split(","): pid = pid.strip() if pid: self.mask_pid.append(int(pid)) self.no_filter = not self.voptions.mask.enabled if self.voptions.basic.guest_profile: self.osprofile = self.voptions.basic.guest_profile else: self.osprofile = osprofile or self.get_osprofile() def get_osprofile(self): """Get the OS profile""" return VolatilityAPI(self.memfile).imageinfo()["data"][0]["osprofile"] def run(self): results = {} # Exit if options were not loaded. if not self.voptions: return vol = VolatilityAPI(self.memfile, self.osprofile) for plugin_name in self.PLUGINS: if isinstance(plugin_name, list): plugin_name, profiles = plugin_name[0], plugin_name[1:] else: profiles = [] # Some plugins can only run in certain profiles (i.e., only in # Windows XP/Vista/7, or only in x86 or x64). osp = self.osprofile.lower() for profile in profiles: if osp.startswith(profile) or osp.endswith(profile): break else: if profiles: continue plugin = self.voptions.get(plugin_name) if not plugin or not plugin.enabled: log.debug("Skipping '%s' volatility module", plugin_name) continue if plugin_name in vol.plugins: log.debug("Executing volatility '%s' module.", plugin_name) results[plugin_name] = getattr(vol, plugin_name)() self.find_taint(results) self.cleanup() return self.mask_filter(results) def mask_filter(self, old): """Filter out masked stuff. Keep tainted stuff.""" new = {} for akey in old.keys(): new[akey] = {"config": old[akey]["config"], "data": []} conf = getattr(self.voptions, akey, None) new[akey]["config"]["filter"] = conf.filter for item in old[akey]["data"]: # TODO: need to improve this logic. if not conf.filter: new[akey]["data"].append(item) elif "process_id" in item and \ item["process_id"] in self.mask_pid and \ item["process_id"] not in self.taint_pid: pass else: new[akey]["data"].append(item) return new def find_taint(self, res): """Find tainted items.""" if "malfind" in res: for item in res["malfind"]["data"]: self.taint_pid.add(item["process_id"]) def cleanup(self): """Delete the memory dump (if configured to do so).""" if self.voptions.basic.delete_memdump: try: os.remove(self.memfile) except OSError: log.error("Unable to delete memory dump file at path \"%s\" ", self.memfile)