class Analyzer(object): """Cuckoo Windows Analyzer. This class handles the initialization and execution of the analysis procedure, including handling of the pipe server, the auxiliary modules and the analysis packages. """ def __init__(self): self.config = None self.target = None self.do_run = True self.time_counter = 0 self.process_lock = threading.Lock() self.default_dll = None self.pid = os.getpid() self.ppid = Process(pid=self.pid).get_parent_pid() self.files = Files() self.process_list = ProcessList() self.package = None self.reboot = [] def prepare(self): """Prepare env for analysis.""" # Get SeDebugPrivilege for the Python process. It will be needed in # order to perform the injections. grant_debug_privilege() # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") # Pass the configuration through to the Process class. Process.set_config(self.config) # Set virtual machine clock. set_clock( datetime.datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S")) # Set the default DLL to be used for this analysis. self.default_dll = self.config.options.get("dll") # If a pipe name has not set, then generate a random one. if "pipe" in self.config.options: self.config.pipe = "\\\\.\\PIPE\\%s" % self.config.options["pipe"] else: self.config.pipe = "\\\\.\\PIPE\\%s" % random_string(16, 32) # Generate a random name for the logging pipe server. self.config.logpipe = "\\\\.\\PIPE\\%s" % random_string(16, 32) # Initialize and start the Command Handler pipe server. This is going # to be used for communicating with the monitored processes. self.command_pipe = PipeServer(PipeDispatcher, self.config.pipe, message=True, dispatcher=CommandPipeHandler(self)) self.command_pipe.daemon = True self.command_pipe.start() # Initialize and start the Log Pipe Server - the log pipe server will # open up a pipe that monitored processes will use to send logs to # before they head off to the host machine. destination = self.config.ip, self.config.port self.log_pipe_server = PipeServer(PipeForwarder, self.config.logpipe, destination=destination) self.log_pipe_server.daemon = True self.log_pipe_server.start() # We update the target according to its category. If it's a file, then # we store the target path. if self.config.category == "file": self.target = os.path.join(os.environ["TEMP"] + os.sep, self.config.file_name) # If it's a URL, well.. we store the URL. else: self.target = self.config.target def stop(self): """Allows an auxiliary module to stop the analysis.""" self.do_run = False def complete(self): """End analysis.""" # Stop the Pipe Servers. self.command_pipe.stop() self.log_pipe_server.stop() # Dump all the notified files. self.files.dump_files() # Hell yeah. log.info("Analysis completed.") def run(self): """Run analysis. @return: operation status. """ self.prepare() self.path = os.getcwd() log.debug("Starting analyzer from: %s", self.path) log.debug("Pipe server name: %s", self.config.pipe) log.debug("Log pipe server name: %s", self.config.logpipe) # If no analysis package was specified at submission, we try to select # one automatically. if not self.config.package: log.debug("No analysis package specified, trying to detect " "it automagically.") # If the analysis target is a file, we choose the package according # to the file format. if self.config.category == "file": package = choose_package(self.config.file_type, self.config.file_name, self.config.pe_exports.split(",")) # If it's an URL, we'll just use the default Internet Explorer # package. else: package = "ie" # If we weren't able to automatically determine the proper package, # we need to abort the analysis. if not package: raise CuckooError("No valid package available for file " "type: {0}".format(self.config.file_type)) log.info("Automatically selected analysis package \"%s\"", package) # Otherwise just select the specified package. else: package = self.config.package # Generate the package path. package_name = "modules.packages.%s" % package # Try to import the analysis package. try: __import__(package_name, globals(), locals(), ["dummy"], -1) # If it fails, we need to abort the analysis. except ImportError: raise CuckooError("Unable to import package \"{0}\", does " "not exist.".format(package_name)) # Initialize the package parent abstract. Package() # Enumerate the abstract subclasses. try: package_class = Package.__subclasses__()[0] except IndexError as e: raise CuckooError("Unable to select package class " "(package={0}): {1}".format(package_name, e)) # Initialize the analysis package. self.package = package_class(self.config.options, analyzer=self) # Move the sample to the current working directory as provided by the # task - one is able to override the starting path of the sample. # E.g., for some samples it might be useful to run from %APPDATA% # instead of %TEMP%. if self.config.category == "file": self.target = self.package.move_curdir(self.target) # Initialize Auxiliary modules Auxiliary() prefix = auxiliary.__name__ + "." for loader, name, ispkg in pkgutil.iter_modules( auxiliary.__path__, prefix): if ispkg: continue # Import the auxiliary module. try: __import__(name, globals(), locals(), ["dummy"], -1) except ImportError as e: log.warning( "Unable to import the auxiliary module " "\"%s\": %s", name, e) # Walk through the available auxiliary modules. aux_enabled, aux_avail = [], [] for module in Auxiliary.__subclasses__(): # Try to start the auxiliary module. try: aux = module(options=self.config.options, analyzer=self) aux_avail.append(aux) aux.start() except (NotImplementedError, AttributeError): log.warning("Auxiliary module %s was not implemented", module.__name__) except CuckooDisableModule: continue except Exception as e: log.warning("Cannot execute auxiliary module %s: %s", module.__name__, e) else: log.debug("Started auxiliary module %s", module.__name__) aux_enabled.append(aux) ########################################################### #new code 2017_05_19 shutil.copyfile(self.target, 'C:\\dbg\\sample.exe') with open('log.txt', 'w') as creation: creation.write('log start') with open(self.target, 'rb') as sample: s = sample.read(2) if s != 'MZ': is32bit = False else: sample.seek(60) s = sample.read(4) header_offset = struct.unpack("<L", s)[0] sample.seek(header_offset + 4) s = sample.read(2) machine = struct.unpack('<H', s)[0] is32bit = (machine == 332) if is32bit: self.target = 'C:\\dbg\\Helper32.exe' else: self.target = 'C:\\dbg\\Helper64.exe' try: proc = Popen(self.target) pids = proc.pid except Exception as e: log.error('custom : fail to open process %s : %s', self.target, e) ########################################################### #origin # Start analysis package. If for any reason, the execution of the # analysis package fails, we have to abort the analysis. # try: # pids = self.package.start(self.target) # except NotImplementedError: # raise CuckooError( # "The package \"%s\" doesn't contain a run function." % # package_name # ) # except CuckooPackageError as e: # raise CuckooError( # "The package \"%s\" start function raised an error: %s" % # (package_name, e) # ) # except Exception as e: # raise CuckooError( # "The package \"%s\" start function encountered an unhandled " # "exception: %s" % (package_name, e) # ) ########################################################### # If the analysis package returned a list of process identifiers, we # add them to the list of monitored processes and enable the process monitor. if pids: self.process_list.add_pids(pids) pid_check = True # If the package didn't return any process ID (for example in the case # where the package isn't enabling any behavioral analysis), we don't # enable the process monitor. else: log.info("No process IDs returned by the package, running " "for the full timeout.") pid_check = False # Check in the options if the user toggled the timeout enforce. If so, # we need to override pid_check and disable process monitor. if self.config.enforce_timeout: log.info("Enabled timeout enforce, running for the full timeout.") pid_check = False while self.do_run: self.time_counter += 1 if self.time_counter == int(self.config.timeout): log.info("Analysis timeout hit, terminating analysis.") break # If the process lock is locked, it means that something is # operating on the list of monitored processes. Therefore we # cannot proceed with the checks until the lock is released. if self.process_lock.locked(): KERNEL32.Sleep(1000) continue try: # If the process monitor is enabled we start checking whether # the monitored processes are still alive. if pid_check: for pid in self.process_list.pids: if not Process(pid=pid).is_alive(): log.info("Process with pid %s has terminated", pid) self.process_list.remove_pid(pid) # If none of the monitored processes are still alive, we # can terminate the analysis. if not self.process_list.pids: log.info("Process list is empty, " "terminating analysis.") break # Update the list of monitored processes available to the # analysis package. It could be used for internal # operations within the module. self.package.set_pids(self.process_list.pids) try: # The analysis packages are provided with a function that # is executed at every loop's iteration. If such function # returns False, it means that it requested the analysis # to be terminate. if not self.package.check(): log.info("The analysis package requested the " "termination of the analysis.") break # If the check() function of the package raised some exception # we don't care, we can still proceed with the analysis but we # throw a warning. except Exception as e: log.warning( "The package \"%s\" check function raised " "an exception: %s", package_name, e) finally: # Zzz. KERNEL32.Sleep(1000) if not self.do_run: log.debug("The analyzer has been stopped on request by an " "auxiliary module.") # Create the shutdown mutex. KERNEL32.CreateMutexA(None, False, SHUTDOWN_MUTEX) try: # Before shutting down the analysis, the package can perform some # final operations through the finish() function. self.package.finish() except Exception as e: log.warning( "The package \"%s\" finish function raised an " "exception: %s", package_name, e) try: # Upload files the package created to package_files in the # results folder. for path, name in self.package.package_files() or []: upload_to_host(path, os.path.join("package_files", name)) except Exception as e: log.warning( "The package \"%s\" package_files function raised an " "exception: %s", package_name, e) # Terminate the Auxiliary modules. for aux in aux_enabled: try: aux.stop() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Cannot terminate auxiliary module %s: %s", aux.__class__.__name__, e) if self.config.terminate_processes: # Try to terminate remaining active processes. log.info("Terminating remaining processes before shutdown.") for pid in self.process_list.pids: proc = Process(pid=pid) if proc.is_alive(): try: proc.terminate() except: continue ############################################################################### #new code time.sleep(3) with open('C:\\dbg\\log.txt') as f_log: raw = f_log.read() data = ''.join(raw.split('\x00')) log.debug('logged : \n%s', data) ############################################################################### # Run the finish callback of every available Auxiliary module. for aux in aux_avail: try: aux.finish() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning( "Exception running finish callback of auxiliary " "module %s: %s", aux.__class__.__name__, e) # Let's invoke the completion procedure. self.complete() return True
class Analyzer(object): """Cuckoo Windows Analyzer. This class handles the initialization and execution of the analysis procedure, including handling of the pipe server, the auxiliary modules and the analysis packages. """ def __init__(self): self.config = None self.target = None self.do_run = True self.time_counter = 0 self.process_lock = threading.Lock() self.default_dll = None self.pid = os.getpid() self.ppid = Process(pid=self.pid).get_parent_pid() self.files = Files() self.process_list = ProcessList() self.package = None def prepare(self): """Prepare env for analysis.""" # Get SeDebugPrivilege for the Python process. It will be needed in # order to perform the injections. grant_debug_privilege() # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") # Pass the configuration through to the Process class. Process.set_config(self.config) # Set virtual machine clock. clock = datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S") # Setting date and time. # NOTE: Windows system has only localized commands with date format # following localization settings, so these commands for english date # format cannot work in other localizations. # In addition DATE and TIME commands are blocking if an incorrect # syntax is provided, so an echo trick is used to bypass the input # request and not block analysis. os.system("echo:|date {0}".format(clock.strftime("%m-%d-%y"))) os.system("echo:|time {0}".format(clock.strftime("%H:%M:%S"))) # Set the default DLL to be used for this analysis. self.default_dll = self.config.options.get("dll") # If a pipe name has not set, then generate a random one. if "pipe" in self.config.options: self.config.pipe = "\\\\.\\PIPE\\%s" % self.config.options["pipe"] else: self.config.pipe = "\\\\.\\PIPE\\%s" % random_string(16, 32) # Generate a random name for the logging pipe server. self.config.logpipe = "\\\\.\\PIPE\\%s" % random_string(16, 32) # Initialize and start the Command Handler pipe server. This is going # to be used for communicating with the monitored processes. self.command_pipe = PipeServer(PipeDispatcher, self.config.pipe, message=True, dispatcher=CommandPipeHandler(self)) self.command_pipe.daemon = True self.command_pipe.start() # Initialize and start the Log Pipe Server - the log pipe server will # open up a pipe that monitored processes will use to send logs to # before they head off to the host machine. destination = self.config.ip, self.config.port self.log_pipe_server = PipeServer(PipeForwarder, self.config.logpipe, destination=destination) self.log_pipe_server.daemon = True self.log_pipe_server.start() # We update the target according to its category. If it's a file, then # we store the target path. if self.config.category == "file": self.target = os.path.join(os.environ["TEMP"] + os.sep, self.config.file_name) # If it's a URL, well.. we store the URL. else: self.target = self.config.target def stop(self): """Allows an auxiliary module to stop the analysis.""" self.do_run = False def complete(self): """End analysis.""" # Stop the Pipe Servers. self.command_pipe.stop() self.log_pipe_server.stop() # Dump all the notified files. self.files.dump_files() # Hell yeah. log.info("Analysis completed.") def run(self): """Run analysis. @return: operation status. """ self.prepare() log.debug("Starting analyzer from: %s", os.getcwd()) log.debug("Pipe server name: %s", self.config.pipe) log.debug("Log pipe server name: %s", self.config.logpipe) # If no analysis package was specified at submission, we try to select # one automatically. if not self.config.package: log.debug("No analysis package specified, trying to detect " "it automagically.") # If the analysis target is a file, we choose the package according # to the file format. if self.config.category == "file": package = choose_package(self.config.file_type, self.config.file_name, self.config.pe_exports.split(",")) # If it's an URL, we'll just use the default Internet Explorer # package. else: package = "ie" # If we weren't able to automatically determine the proper package, # we need to abort the analysis. if not package: raise CuckooError("No valid package available for file " "type: {0}".format(self.config.file_type)) log.info("Automatically selected analysis package \"%s\"", package) # Otherwise just select the specified package. else: package = self.config.package # Generate the package path. package_name = "modules.packages.%s" % package # Try to import the analysis package. try: __import__(package_name, globals(), locals(), ["dummy"], -1) # If it fails, we need to abort the analysis. except ImportError: raise CuckooError("Unable to import package \"{0}\", does " "not exist.".format(package_name)) # Initialize the package parent abstract. Package() # Enumerate the abstract subclasses. try: package_class = Package.__subclasses__()[0] except IndexError as e: raise CuckooError("Unable to select package class " "(package={0}): {1}".format(package_name, e)) # Initialize the analysis package. self.package = package_class(self.config.options) # Move the sample to the current working directory as provided by the # task - one is able to override the starting path of the sample. # E.g., for some samples it might be useful to run from %APPDATA% # instead of %TEMP%. if self.config.category == "file": self.target = self.package.move_curdir(self.target) # Initialize Auxiliary modules Auxiliary() prefix = auxiliary.__name__ + "." for loader, name, ispkg in pkgutil.iter_modules(auxiliary.__path__, prefix): if ispkg: continue # Import the auxiliary module. try: __import__(name, globals(), locals(), ["dummy"], -1) except ImportError as e: log.warning("Unable to import the auxiliary module " "\"%s\": %s", name, e) # Walk through the available auxiliary modules. aux_enabled, aux_avail = [], [] for module in Auxiliary.__subclasses__(): # Try to start the auxiliary module. try: aux = module(options=self.config.options, analyzer=self) aux_avail.append(aux) aux.start() except (NotImplementedError, AttributeError): log.warning("Auxiliary module %s was not implemented", aux.__class__.__name__) except Exception as e: log.warning("Cannot execute auxiliary module %s: %s", aux.__class__.__name__, e) else: log.debug("Started auxiliary module %s", aux.__class__.__name__) aux_enabled.append(aux) # Start analysis package. If for any reason, the execution of the # analysis package fails, we have to abort the analysis. try: pids = self.package.start(self.target) except NotImplementedError: raise CuckooError("The package \"{0}\" doesn't contain a run " "function.".format(package_name)) except CuckooPackageError as e: raise CuckooError("The package \"{0}\" start function raised an " "error: {1}".format(package_name, e)) except Exception as e: raise CuckooError("The package \"{0}\" start function encountered " "an unhandled exception: " "{1}".format(package_name, e)) # If the analysis package returned a list of process identifiers, we # add them to the list of monitored processes and enable the process monitor. if pids: self.process_list.add_pids(pids) pid_check = True # If the package didn't return any process ID (for example in the case # where the package isn't enabling any behavioral analysis), we don't # enable the process monitor. else: log.info("No process IDs returned by the package, running " "for the full timeout.") pid_check = False # Check in the options if the user toggled the timeout enforce. If so, # we need to override pid_check and disable process monitor. if self.config.enforce_timeout: log.info("Enabled timeout enforce, running for the full timeout.") pid_check = False while self.do_run: self.time_counter += 1 if self.time_counter == int(self.config.timeout): log.info("Analysis timeout hit, terminating analysis.") break # If the process lock is locked, it means that something is # operating on the list of monitored processes. Therefore we # cannot proceed with the checks until the lock is released. if self.process_lock.locked(): KERNEL32.Sleep(1000) continue try: # If the process monitor is enabled we start checking whether # the monitored processes are still alive. if pid_check: for pid in self.process_list.pids: if not Process(pid=pid).is_alive(): log.info("Process with pid %s has terminated", pid) self.process_list.remove_pid(pid) # If none of the monitored processes are still alive, we # can terminate the analysis. if not self.process_list.pids: log.info("Process list is empty, " "terminating analysis.") break # Update the list of monitored processes available to the # analysis package. It could be used for internal # operations within the module. self.package.set_pids(self.process_list.pids) try: # The analysis packages are provided with a function that # is executed at every loop's iteration. If such function # returns False, it means that it requested the analysis # to be terminate. if not self.package.check(): log.info("The analysis package requested the " "termination of the analysis.") break # If the check() function of the package raised some exception # we don't care, we can still proceed with the analysis but we # throw a warning. except Exception as e: log.warning("The package \"%s\" check function raised " "an exception: %s", package_name, e) finally: # Zzz. KERNEL32.Sleep(1000) if not self.do_run: log.debug("The analyzer has been stopped on request by an " "auxiliary module.") # Create the shutdown mutex. KERNEL32.CreateMutexA(None, False, SHUTDOWN_MUTEX) try: # Before shutting down the analysis, the package can perform some # final operations through the finish() function. self.package.finish() except Exception as e: log.warning("The package \"%s\" finish function raised an " "exception: %s", package_name, e) try: # Upload files the package created to package_files in the # results folder. for path, name in self.package.package_files() or []: upload_to_host(path, os.path.join("package_files", name)) except Exception as e: log.warning("The package \"%s\" package_files function raised an " "exception: %s", package_name, e) # Terminate the Auxiliary modules. for aux in aux_enabled: try: aux.stop() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Cannot terminate auxiliary module %s: %s", aux.__class__.__name__, e) if self.config.terminate_processes: # Try to terminate remaining active processes. We do this to make sure # that we clean up remaining open handles (sockets, files, etc.). log.info("Terminating remaining processes before shutdown.") for pid in self.process_list.pids: proc = Process(pid=pid) if proc.is_alive(): try: proc.terminate() except: continue # Run the finish callback of every available Auxiliary module. for aux in aux_avail: try: aux.finish() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Exception running finish callback of auxiliary " "module %s: %s", aux.__class__.__name__, e) # Let's invoke the completion procedure. self.complete() return True
class Analyzer(object): """Cuckoo Windows Analyzer. This class handles the initialization and execution of the analysis procedure, including handling of the pipe server, the auxiliary modules and the analysis packages. """ def __init__(self): self.config = None self.target = None self.do_run = True self.process_lock = threading.Lock() self.default_dll = None self.pid = os.getpid() self.ppid = Process(pid=self.pid).get_parent_pid() self.files = Files() self.process_list = ProcessList() self.package = None self.reboot = [] def get_pipe_path(self, name): """Returns \\\\.\\PIPE on Windows XP and \\??\\PIPE elsewhere.""" version = sys.getwindowsversion() if version.major == 5 and version.minor == 1: return "\\\\.\\PIPE\\%s" % name return "\\??\\PIPE\\%s" % name def parse_driver_options(self): driver_options = {} options = self.config.options.get("options", "") for k, v in self.config.options.items(): if "driver_" in k: real_key = k.split("_", 1)[1] driver_options[real_key] = True return driver_options def prepare(self): """Prepare env for analysis.""" # Get SeDebugPrivilege for the Python process. It will be needed in # order to perform the injections. grant_privilege("SeDebugPrivilege") grant_privilege("SeLoadDriverPrivilege") # Initialize logging. init_logging() # Parse the analysis configuration file generated by the agent. self.config = Config(cfg="analysis.conf") # Pass the configuration through to the Process class. Process.set_config(self.config) # Set virtual machine clock. set_clock( datetime.datetime.strptime(self.config.clock, "%Y%m%dT%H:%M:%S")) # Set the default DLL to be used for this analysis. self.default_dll = self.config.options.get("dll") # If a pipe name has not set, then generate a random one. self.config.pipe = self.get_pipe_path( self.config.options.get("pipe", random_string(16, 32))) # Generate a random name for the logging pipe server. self.config.logpipe = self.get_pipe_path(random_string(16, 32)) # Initialize and start the Command Handler pipe server. This is going # to be used for communicating with the monitored processes. self.command_pipe = PipeServer(PipeDispatcher, self.config.pipe, message=True, dispatcher=CommandPipeHandler(self)) self.command_pipe.daemon = True self.command_pipe.start() # Initialize and start the Log Pipe Server - the log pipe server will # open up a pipe that monitored processes will use to send logs to # before they head off to the host machine. destination = self.config.ip, self.config.port self.log_pipe_server = PipeServer(PipeForwarder, self.config.logpipe, destination=destination) self.log_pipe_server.daemon = True self.log_pipe_server.start() # General ones, for configuration to send later to package # self.config.options["dispatcherpipe"] = self.config.logpipe # DISPATCHER # self.config.options["forwarderpipe"] = self.config.pipe # FORWARDER self.config.options["dispatcherpipe"] = self.config.pipe # DISPATCHER self.config.options["forwarderpipe"] = self.config.logpipe # FORWARDER self.config.options["kernel_logpipe"] = "\\\\.\\%s" % (random_string( 16, 32)) self.config.options["destination"] = destination self.config.options["driver_options"] = self.parse_driver_options() # We update the target according to its category. If it's a file, then # we store the target path. if self.config.category == "file": self.target = os.path.join(os.environ["TEMP"], self.config.file_name) elif self.config.category == "archive": zip_path = os.path.join(os.environ["TEMP"], self.config.file_name) zipfile.ZipFile(zip_path).extractall(os.environ["TEMP"]) self.target = os.path.join(os.environ["TEMP"], self.config.options["filename"]) # If it's a URL, well.. we store the URL. else: self.target = self.config.target def stop(self): """Allows an auxiliary module to stop the analysis.""" self.do_run = False def complete(self): """End analysis.""" # Stop the Pipe Servers. self.command_pipe.stop() self.log_pipe_server.stop() # Dump all the notified files. self.files.dump_files() # Hell yeah. log.info("Analysis completed.") def run(self): """Run analysis. @return: operation status. """ start = KERNEL32.GetTickCount() self.prepare() self.path = os.getcwd() log.debug("Starting analyzer from: %s", self.path) log.debug("Pipe server name: %s", self.config.pipe) log.debug("Log pipe server name: %s", self.config.logpipe) # If no analysis package was specified at submission, we try to select # one automatically. if not self.config.package: log.debug("No analysis package specified, trying to detect " "it automagically.") # If the analysis target is a file, we choose the package according # to the file format. if self.config.category == "file": package = choose_package(self.config.file_type, self.config.file_name, self.config.pe_exports.split(",")) # If it's an URL, we'll just use the default Internet Explorer # package. else: package = "ie" # If we weren't able to automatically determine the proper package, # we need to abort the analysis. if not package: raise CuckooError("No valid package available for file " "type: {0}".format(self.config.file_type)) log.info("Automatically selected analysis package \"%s\"", package) # Otherwise just select the specified package. else: package = self.config.package # Generate the package path. package_name = "modules.packages.%s" % package # Try to import the analysis package. try: package_module = __import__(package_name, globals(), locals(), ["dummy"], -1) # If it fails, we need to abort the analysis. except ImportError: raise CuckooError("Unable to import package \"{0}\", does " "not exist.".format(package_name)) # Initialize the package parent abstract. Package() # Find the package class, the file name does not always equal the class name (eg doc.py -> Class _DOC_) class_name = next((attr for attr in dir(package_module) if attr.lower() == package.lower()), None) if not class_name: raise CuckooError("Unable to select package class " "(package={0})".format(package_name)) package_class = getattr(package_module, class_name) # Initialize the analysis package. log.debug("arguments options: [%s]", str(self.config.options)) self.package = package_class(self.config.options, analyzer=self) # Move the sample to the current working directory as provided by the # task - one is able to override the starting path of the sample. # E.g., for some samples it might be useful to run from %APPDATA% # instead of %TEMP%. if self.config.category == "file": self.target = self.package.move_curdir(self.target) # Initialize Auxiliary modules aux_start = KERNEL32.GetTickCount() Auxiliary() prefix = auxiliary.__name__ + "." for loader, name, ispkg in pkgutil.iter_modules( auxiliary.__path__, prefix): if ispkg: continue # Import the auxiliary module. try: __import__(name, globals(), locals(), ["dummy"], -1) except ImportError as e: log.warning( "Unable to import the auxiliary module " "\"%s\": %s", name, e) # Walk through the available auxiliary modules. aux_enabled, aux_avail = [], [] for module in Auxiliary.__subclasses__(): # Try to start the auxiliary module. try: self.config.options[ 'timeout'] = self.config.timeout # pass timeout to aux modules aux = module(options=self.config.options, analyzer=self) aux_avail.append(aux) aux.init() aux.start() except (NotImplementedError, AttributeError): log.exception("Auxiliary module %s was not implemented", module.__name__) except CuckooDisableModule: continue except Exception as e: log.exception("Cannot execute auxiliary module %s: %s", module.__name__, e) else: log.debug("Started auxiliary module %s", module.__name__) aux_enabled.append(aux) aux_end = KERNEL32.GetTickCount() log.debug("Loaded auxiliary modules in {}s".format( str((aux_end - aux_start) / 1000))) # Forward the command pipe and logpipe names on to zer0m0n. zer0m0n.cmdpipe(self.config.pipe) zer0m0n.channel(self.config.logpipe) # Initialize zer0m0n with our compiled Yara rules. zer0m0n.yarald("bin/rules.yarac") # Start analysis package. If for any reason, the execution of the # analysis package fails, we have to abort the analysis. process_monitoring_start = KERNEL32.GetTickCount() pids = self.package.start(self.target) process_monitoring_end = KERNEL32.GetTickCount() log.debug("Monitored first process in {}s".format( str((process_monitoring_end - process_monitoring_start) / 1000))) # If the analysis package returned a list of process identifiers, we # add them to the list of monitored processes and enable the process monitor. if pids: self.process_list.add_pids(pids) pid_check = True # If the package didn't return any process ID (for example in the case # where the package isn't enabling any behavioral analysis), we don't # enable the process monitor. else: log.info("No process IDs returned by the package, running " "for the full timeout.") pid_check = False # Check in the options if the user toggled the timeout enforce. If so, # we need to override pid_check and disable process monitor. if self.config.enforce_timeout: log.info("Enabled timeout enforce, running for the full timeout.") pid_check = False end = KERNEL32.GetTickCount() log.info("Initialized VM in {}s".format(str((end - start) / 1000))) end = KERNEL32.GetTickCount() + int(self.config.timeout) * 1000 while self.do_run: now = KERNEL32.GetTickCount() # log.debug("Time passed: {}, terminating at {}".format((end-now)/1000, str(self.config.timeout))) if now >= end: log.info("Analysis timeout hit, terminating analysis.") break # If the process lock is locked, it means that something is # operating on the list of monitored processes. Therefore we # cannot proceed with the checks until the lock is released. if self.process_lock.locked(): KERNEL32.Sleep(1000) continue try: # If the process monitor is enabled we start checking whether # the monitored processes are still alive. if pid_check: for pid in self.process_list.pids: if not Process(pid=pid).is_alive(): log.info("Process with pid %s has terminated", pid) self.process_list.remove_pid(pid) # If none of the monitored processes are still alive, we # can terminate the analysis. if not self.process_list.pids: log.info("Process list is empty, " "terminating analysis.") break # Update the list of monitored processes available to the # analysis package. It could be used for internal # operations within the module. self.package.set_pids(self.process_list.pids) try: # The analysis packages are provided with a function that # is executed at every loop's iteration. If such function # returns False, it means that it requested the analysis # to be terminate. if not self.package.check(): log.info("The analysis package requested the " "termination of the analysis.") break # If the check() function of the package raised some exception # we don't care, we can still proceed with the analysis but we # throw a warning. except Exception as e: log.warning( "The package \"%s\" check function raised " "an exception: %s", package_name, e) finally: # Zzz. KERNEL32.Sleep(1000) if not self.do_run: log.debug("The analyzer has been stopped on request by an " "auxiliary module.") # Create the shutdown mutex. KERNEL32.CreateMutexA(None, False, SHUTDOWN_MUTEX) try: # Before shutting down the analysis, the package can perform some # final operations through the finish() function. self.package.finish() except Exception as e: log.warning( "The package \"%s\" finish function raised an " "exception: %s", package_name, e) try: # Upload files the package created to package_files in the # results folder. for path, name in self.package.package_files() or []: upload_to_host(path, os.path.join("package_files", name)) except Exception as e: log.warning( "The package \"%s\" package_files function raised an " "exception: %s", package_name, e) # Terminate the Auxiliary modules. for aux in aux_enabled: try: aux.stop() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning("Cannot terminate auxiliary module %s: %s", aux.__class__.__name__, e) if self.config.terminate_processes: # Try to terminate remaining active processes. log.info("Terminating remaining processes before shutdown.") for pid in self.process_list.pids: proc = Process(pid=pid) if proc.is_alive(): try: proc.terminate() except: continue # Run the finish callback of every available Auxiliary module. for aux in aux_avail: try: aux.finish() except (NotImplementedError, AttributeError): continue except Exception as e: log.warning( "Exception running finish callback of auxiliary " "module %s: %s", aux.__class__.__name__, e) # Let's invoke the completion procedure. self.complete() return True
class Analyzer(object): def __init__(self): self.config = Config(cfg="analysis.conf") self.default_dll = self.config.options.get("dll") self.is_running = True self.cleanup_files = [] self.pkg_counter = 0 self.runtime = 0 self.reboot = [] self.packages = {} self.aux_enabled = {} self.aux_available = {} self.pid = os.getpid() self.ppid = Process(pid=self.pid).get_parent_pid() self.path = os.getcwd() self.plock = threading.Lock() self.files = Files() self.plist = ProcessList() def initialize(self): Process.set_config(self.config) self.config.logpipe = get_pipe_path(random_string(16, 32)) self.config.pipe = self.config.options.get( "pipe", get_pipe_path(random_string(16, 32)) ) self.msgclient = MessageClient( self.config.ip, self.config.port, self ) def prepare(self): # Get SeDebugPrivilege for the Python process. It will be needed in # order to perform the injections. for privilege in ("SeDebugPrivilege", "SeLoadDriverPrivilege"): if not grant_privilege(privilege): log.error("Failed to grant '%s' privilege") # Set the system's date and time to given values set_clock(datetime.datetime.strptime( self.config.clock, "%Y%m%dT%H:%M:%S" )) # Initialize and start the Command Handler pipe server. This is going # to be used for communicating with the monitored processes. self.command_pipe = PipeServer( PipeDispatcher, self.config.pipe, message=True, dispatcher=CommandPipeHandler(self) ) self.command_pipe.start() # Initialize and start the Log Pipe Server - the log pipe server will # open up a pipe that monitored processes will use to send logs to # before they head off to the host machine. self.log_pipe = PipeServer( PipeForwarder, self.config.logpipe, destination=(self.config.ip, self.config.port) ) self.log_pipe.start() self.msgclient.connect() if not self.msgclient.connected: return False self.msgclient.start() return True def start_package(self, config): """Start an analysis package. @param config: a dictionary containing at least a target category, options dictionary, and target string or list of targets returns a package id """ pkgname = config.get("package") if not pkgname: log.info( "No analysis package provided, trying to automatically find a " "matching package" ) pkg = choose_package(config) else: pkg = get_package_class(pkgname) if pkgname and not pkg: raise CuckooPackageError( "Could not find analysis package '%r'" % pkgname ) if not pkg: category = config.get("category") raise CuckooPackageError( "No valid analysis package available for target category '%s'." "%s" % ( category, config.get("file_name") if category == "file" else "" ) ) log.info("Using analysis package '%s'", pkg.__name__) options = config.get("options", {}) or {} pkg_instance = pkg(options=options, analyzer=self) category = config.get("category") if category == "file": target = os.path.join(os.environ["TEMP"], config.get("file_name")) pkg_instance.move_curdir(target) elif category == "archive": zippath = os.path.join(os.environ["TEMP"], config.get("file_name")) zipfile.ZipFile(zippath).extractall(os.environ["TEMP"]) if not options.get("filename"): raise CuckooPackageError( "No filename specified to open after unpacking archive" ) target = os.path.join(os.environ["TEMP"], options.get("filename")) elif category == "url": target = config.get("target") else: raise CuckooPackageError( "Unknown category '%s' specified" % category ) pids = pkg_instance.start(target) if pids: self.plist.add_pids(pids) self.pkg_counter += 1 pkg_id = str(config.get("pkg_id") or self.pkg_counter) self.packages[pkg_id] = pkg_instance return { "pkg_id": pkg_id, "name": pkg_instance.__class__.__name__, "pids": pkg_instance.pids_targets } def stop_package(self, pkg_id, procmemdump=False): """Stop the package matching the given package id. Process memory dumps are created if @param pkg_id: a string identifier to specify a running analysis package """ pkg_id = str(pkg_id) pkg = self.packages.get(pkg_id) if not pkg: raise CuckooPackageError( "Cannot stop package. Package with id '%r' does not " "exist" % pkg_id ) if procmemdump or pkg.options.get("procmemdump"): try: pkg.dump_procmem() except Exception as e: log.exception( "Error while creating process memory dumps for " "package '%s'. %s", pkg.__class__.__name__, e ) try: pkg.finish() except Exception as e: log.exception( "Error during analysis package '%s' finishing. %s", pkg.__class__.__name__, e ) try: # Upload files the package created to package_files in the # results folder. for path, name in pkg.package_files() or []: upload_to_host(path, os.path.join("package_files", name)) except Exception as e: log.warning( "The package '%s' package_files function raised an " "exception: %s", pkg.__class__.__name__, e ) pkg.stop() self.packages.pop(pkg_id) return {"pkg_id": pkg_id} def stop_all_packages(self): """Stop all currently running packages""" stopped = [] for pkg_id in self.packages.keys(): self.stop_package(pkg_id) stopped.append(pkg_id) return stopped def list_packages(self): """Return a dict of package identifiers and the package name that is running""" return [ { "pkg_id": pkg_id, "pids": pkg.pids_targets, "name": pkg.__class__.__name__ } for pkg_id, pkg in self.packages.iteritems() ] def dump_memory(self, pid): """Dump the memory of the specified PID""" dump_memory(pid) def list_tracked_pids(self): """Return a list of all tracked pids""" return self.plist.pids def prepare_zer0m0n(self): """Communicate settings to zer0m0n and request actions""" # Inform zer0m0n of the ResultServer address. zer0m0n.resultserver(self.config.ip, self.config.port) # Inform zer0m0n of the command and logpipe names zer0m0n.cmdpipe(self.config.pipe) zer0m0n.channel(self.config.logpipe) # Hide the analyzer and agent pids zer0m0n.hidepid(self.pid) zer0m0n.hidepid(self.ppid) # Initialize zer0m0n with our compiled Yara rules. zer0m0n.yarald("bin/rules.yarac") # Propagate the requested dump interval, if set. zer0m0n.dumpint(int(self.config.options.get("dumpint", "0"))) def start_auxiliaries(self): Auxiliary() iter_aux_modules = pkgutil.iter_modules( auxiliary.__path__, "%s." % auxiliary.__name__ ) for loader, name, ispkg in iter_aux_modules: if ispkg: continue try: importlib.import_module(name) except ImportError as e: log.exception( "Failed to import Auxiliary module: '%s'. %s", name, e ) for aux_module in Auxiliary.__subclasses__(): try: aux = aux_module(options=self.config.options, analyzer=self) self.aux_available[aux_module.__name__.lower()] = aux if not aux.enabled(): log.debug( "Auxiliary module '%s' disabled", aux_module.__name__ ) raise CuckooDisableModule aux.init() aux.start() except (NotImplementedError, AttributeError) as e: log.exception( "Auxiliary module '%s' could not be started. Missing " "attributes or functions. %s", aux_module.__name__, e ) except CuckooDisableModule: continue except Exception as e: log.exception( "Error while starting auxiliary module '%s'. %s", aux_module.__name__, e ) else: self.aux_enabled[aux_module.__name__.lower()] = aux log.debug("Started auxiliary module %s", aux_module.__name__) def stop(self): """Stop the analyzer and all running modules.""" log.info("Stopping analysis") for pkg_id, package in self.packages.iteritems(): if package.options.get("procmemdump"): try: package.dump_procmem() except Exception as e: log.exception( "Error during the creation of a memory dump for" " package '%s'. %s", package.__class__.__name__, e ) try: # Perform final operations for all analysis packages package.finish() except Exception as e: log.exception( "The analysis package '%s' raised an exception in its " "finish method. %s", package.__class__.__name__, e ) try: # Upload files the package created to package_files in the # results folder. for path, name in package.package_files() or []: upload_to_host(path, os.path.join("package_files", name)) except Exception as e: log.exception( "The package '%s' package_files function raised an " "exception: %s", package.__class__.__name__, e ) for aux_name, aux in self.aux_enabled.iteritems(): try: aux.stop() except (NotImplementedError, AttributeError): continue except Exception as e: log.exception( "Failed to terminate auxiliary module: %s. %s", aux_name, e ) if self.config.terminate_processes: self.plist.terminate_tracked() for aux_name, aux in self.aux_enabled.iteritems(): try: aux.finish() except (NotImplementedError, AttributeError): continue except Exception as e: log.exception( "Failed to terminate auxiliary module: %s. %s", aux_name, e ) # Stop the pipe for commands to be sent to the analyzer. self.command_pipe.stop() # Upload all pending files before ending the analysis self.files.dump_files() return True def request_stop(self): """Can be called outside of the analyzer to cause it to go through the proper stopping routine""" self.is_running = False def do_run(self): if not self.is_running: log.info( "Analyzer was requested to stop running, terminating analysis" ) return False if self.runtime >= int(self.config.timeout): log.info("Analysis timeout hit, terminating analysis") self.is_running = False return False if not self.plist.pids and not self.config.enforce_timeout: log.info( "Process list is empty and timeout enforcing is disabled, " "terminating analysis." ) self.is_running = False return False return True def finalize(self): """Close connections, close pipes etc. Steps that are performed just before posting the result to the agent. Nothing can be logged here""" self.log_pipe.stop() self.msgclient.stop() disconnect_pipes() disconnect_logger() def start(self): """Start the analyzer""" log.debug( "Starting analyzer from: '%s'. Command pipe: '%s'. Log pipe: '%s'", self.path, self.config.pipe, self.config.logpipe ) self.start_auxiliaries() self.prepare_zer0m0n() self.start_package(self.config) while self.do_run(): self.runtime += 1 if self.plock.locked(): time.sleep(1) continue self.plock.acquire() try: # See if all running processing are still alive and remove # them from the tracked list if they are not. self.plist.untrack_terminated() self.plist.add_pids(zer0m0n.getpids()) for pkg_cnt, pkg in self.packages.iteritems(): pkg.set_pids(self.plist.pids) try: pkg.check() except Exception as e: log.exception( "The analysis package '%s' raised an exception. " "Error: %s. %s", pkg.__class__.__name__, e ) finally: self.plock.release() time.sleep(1) return True