def launch_analysis(self): """Start analysis.""" succeeded = False dead_machine = False log.info("Starting analysis of %s \"%s\" (task=%d)", self.task.category.upper(), self.task.target, self.task.id) # Initialize the analysis folders. if not self.init_storage(): return False if self.task.category == "file": # Check whether the file has been changed for some unknown reason. # And fail this analysis if it has been modified. if not self.check_file(): return False # Store a copy of the original file. if not self.store_file(): return False # Acquire analysis machine. try: self.acquire_machine() except CuckooOperationalError as e: machine_lock.release() log.error("Cannot acquire machine: {0}".format(e)) return False # At this point we can tell the ResultServer about it. try: ResultServer().add_task(self.task, self.machine) except Exception as e: machinery.release(self.machine.label) self.errors.put(e) aux = RunAuxiliary(task=self.task, machine=self.machine) aux.start() # Generate the analysis configuration file. options = self.build_options() try: unlocked = False # Mark the selected analysis machine in the database as started. guest_log = self.db.guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__) # Start the machine. machinery.start(self.machine.label) # By the time start returns it will have fully started the Virtual # Machine. We can now safely release the machine lock. machine_lock.release() unlocked = True # Initialize the guest manager. guest = GuestManager(self.machine.name, self.machine.ip, self.machine.platform) # Start the analysis. guest.start_analysis(options) guest.wait_for_completion() succeeded = True except CuckooMachineError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}) dead_machine = True except CuckooGuestError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}) finally: # Stop Auxiliary modules. aux.stop() # Take a memory dump of the machine before shutting it off. if self.cfg.cuckoo.memory_dump or self.task.memory: try: dump_path = os.path.join(self.storage, "memory.dmp") machinery.dump_memory(self.machine.label, dump_path) except NotImplementedError: log.error("The memory dump functionality is not available " "for the current machine manager.") except CuckooMachineError as e: log.error(e) try: # Stop the analysis machine. machinery.stop(self.machine.label) except CuckooMachineError as e: log.warning("Unable to stop machine %s: %s", self.machine.label, e) # Mark the machine in the database as stopped. Unless this machine # has been marked as dead, we just keep it as "started" in the # database so it'll not be used later on in this session. self.db.guest_stop(guest_log) # After all this, we can make the ResultServer forget about the # internal state for this analysis task. ResultServer().del_task(self.task, self.machine) if dead_machine: # Remove the guest from the database, so that we can assign a # new guest when the task is being analyzed with another # machine. self.db.guest_remove(guest_log) # Remove the analysis directory that has been created so # far, as launch_analysis() is going to be doing that again. shutil.rmtree(self.storage) # This machine has turned dead, so we throw an exception here # which informs the AnalysisManager that it should analyze # this task again with another available machine. raise CuckooDeadMachine() try: # Release the analysis machine. But only if the machine has # not turned dead yet. machinery.release(self.machine.label) except CuckooMachineError as e: log.error("Unable to release machine %s, reason %s. " "You might need to restore it manually.", self.machine.label, e) return succeeded
def launch_analysis(self): """Start analysis.""" succeeded = False dead_machine = False self.socks5s = _load_socks5_operational() log.info("Task #{0}: Starting analysis of {1} '{2}'".format( self.task.id, self.task.category.upper(), convert_to_printable(self.task.target))) # Initialize the analysis folders. if not self.init_storage(): log.debug("Failed to initialize the analysis folder") return False if self.task.category in ["file", "pcap", "static"]: sha256 = File(self.task.target).get_sha256() # Check whether the file has been changed for some unknown reason. # And fail this analysis if it has been modified. if not self.check_file(sha256): return False # Store a copy of the original file. if not self.store_file(sha256): return False if self.task.category in ("pcap", "static"): if self.task.category == "pcap": if hasattr(os, "symlink"): os.symlink(self.binary, os.path.join(self.storage, "dump.pcap")) else: shutil.copy(self.binary, os.path.join(self.storage, "dump.pcap")) # create the logs/files directories as # normally the resultserver would do it dirnames = ["logs", "files", "aux"] for dirname in dirnames: try: os.makedirs(os.path.join(self.storage, dirname)) except: pass return True # Acquire analysis machine. try: self.acquire_machine() self.db.set_task_vm(self.task.id, self.machine.label, self.machine.id) # At this point we can tell the ResultServer about it. except CuckooOperationalError as e: machine_lock.release() log.error("Task #{0}: Cannot acquire machine: {1}".format( self.task.id, e), exc_info=True) return False # Generate the analysis configuration file. options = self.build_options() try: ResultServer().add_task(self.task, self.machine) except Exception as e: machinery.release(self.machine.label) log.exception(e, exc_info=True) self.errors.put(e) aux = RunAuxiliary(task=self.task, machine=self.machine) try: unlocked = False # Mark the selected analysis machine in the database as started. guest_log = self.db.guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__) # Start the machine. machinery.start(self.machine.label) # Enable network routing. self.route_network() # By the time start returns it will have fully started the Virtual # Machine. We can now safely release the machine lock. machine_lock.release() unlocked = True aux.start() # Initialize the guest manager. guest = GuestManager(self.machine.name, self.machine.ip, self.machine.platform, self.task.id, self) options["clock"] = self.db.update_clock(self.task.id) self.db.guest_set_status(self.task.id, "starting") # Start the analysis. guest.start_analysis(options) if self.db.guest_get_status(self.task.id) == "starting": self.db.guest_set_status(self.task.id, "running") guest.wait_for_completion() self.db.guest_set_status(self.task.id, "stopping") succeeded = True except CuckooMachineError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}, exc_info=True) dead_machine = True except CuckooGuestError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}, exc_info=True) finally: # Stop Auxiliary modules. aux.stop() # Take a memory dump of the machine before shutting it off. if self.cfg.cuckoo.memory_dump or self.task.memory: try: dump_path = get_memdump_path(self.task.id) need_space, space_available = free_space_monitor( os.path.dirname(dump_path), return_value=True) if need_space: log.error( "Not enough free disk space! Could not dump ram (Only %d MB!)", space_available) else: machinery.dump_memory(self.machine.label, dump_path) except NotImplementedError: log.error("The memory dump functionality is not available " "for the current machine manager.") except CuckooMachineError as e: log.error(e, exc_info=True) try: # Stop the analysis machine. machinery.stop(self.machine.label) except CuckooMachineError as e: log.warning( "Task #{0}: Unable to stop machine {1}: {2}".format( self.task.id, self.machine.label, e)) # Mark the machine in the database as stopped. Unless this machine # has been marked as dead, we just keep it as "started" in the # database so it'll not be used later on in this session. self.db.guest_stop(guest_log) # After all this, we can make the ResultServer forget about the # internal state for this analysis task. ResultServer().del_task(self.task, self.machine) # Drop the network routing rules if any. self.unroute_network() if dead_machine: # Remove the guest from the database, so that we can assign a # new guest when the task is being analyzed with another # machine. self.db.guest_remove(guest_log) # Remove the analysis directory that has been created so # far, as launch_analysis() is going to be doing that again. shutil.rmtree(self.storage) # This machine has turned dead, so we throw an exception here # which informs the AnalysisManager that it should analyze # this task again with another available machine. raise CuckooDeadMachine() try: # Release the analysis machine. But only if the machine has # not turned dead yet. machinery.release(self.machine.label) except CuckooMachineError as e: log.error("Task #{0}: Unable to release machine {1}, reason " "{2}. You might need to restore it manually.".format( self.task.id, self.machine.label, e)) return succeeded
def launch_analysis(self): """Start analysis.""" succeeded = False target = self.task.target if self.task.category == "file": target = os.path.basename(target) log.info("Starting analysis of %s \"%s\" (task #%d, options \"%s\")", self.task.category.upper(), target, self.task.id, emit_options(self.task.options)) # Initialize the analysis folders. if not self.init_storage(): return False self.store_task_info() if self.task.category == "file": # Check whether the file has been changed for some unknown reason. # And fail this analysis if it has been modified. if not self.check_file(): return False # Store a copy of the original file. if not self.store_file(): return False # Acquire analysis machine. try: self.acquire_machine() except CuckooOperationalError as e: machine_lock.release() log.error("Cannot acquire machine: {0}".format(e)) return False # At this point we can tell the ResultServer about it. try: ResultServer().add_task(self.task, self.machine) except Exception as e: machinery.release(self.machine.label) self.errors.put(e) # Generate the analysis configuration file. options = self.build_options() ##################################### # THIS PART IS USED FOR PINDEMONIUM # ##################################### if (options["package"][0:6] == 'exePIN'): print(options) log.info( "\033[91m\n\n\t\t ###### Will now proceed with PINDemonium #######\n\n\033[0m" ) options = Unpack(options) print(options) ##################################### # THIS PART IS USED FOR PINDEMONIUM # ##################################### aux = RunAuxiliary(task=self.task, machine=self.machine) aux.start() try: unlocked = False self.interface = None # Mark the selected analysis machine in the database as started. guest_log = self.db.guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__) # Start the machine. machinery.start(self.machine.label, self.task) # Enable network routing. self.route_network() # By the time start returns it will have fully started the Virtual # Machine. We can now safely release the machine lock. machine_lock.release() unlocked = True # Run and manage the components inside the guest unless this # machine has the "noagent" option specified (please refer to the # wait_finish() function for more details on this function). if "noagent" not in self.machine.options: self.guest_manage(options) else: self.wait_finish() succeeded = True except CuckooMachineError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}) log.critical( "A critical error has occurred trying to use the machine " "with name %s during an analysis due to which it is no " "longer in a working state, please report this issue and all " "of the related environment details to the developers so we " "can improve this situation. (Note that before we would " "simply remove this VM from doing any more analyses, but as " "all the VMs will eventually be depleted that way, hopefully " "we'll find a better solution now).", self.machine.name, ) except CuckooGuestError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}) finally: # Stop Auxiliary modules. aux.stop() # Take a memory dump of the machine before shutting it off. if self.cfg.cuckoo.memory_dump or self.task.memory: try: dump_path = os.path.join(self.storage, "memory.dmp") machinery.dump_memory(self.machine.label, dump_path) except NotImplementedError: log.error("The memory dump functionality is not available " "for the current machine manager.") except CuckooMachineError as e: log.error(e) try: # Stop the analysis machine. machinery.stop(self.machine.label) except CuckooMachineError as e: log.warning("Unable to stop machine %s: %s", self.machine.label, e) # Mark the machine in the database as stopped. Unless this machine # has been marked as dead, we just keep it as "started" in the # database so it'll not be used later on in this session. self.db.guest_stop(guest_log) # After all this, we can make the ResultServer forget about the # internal state for this analysis task. ResultServer().del_task(self.task, self.machine) # Drop the network routing rules if any. self.unroute_network() try: # Release the analysis machine. But only if the machine has # not turned dead yet. machinery.release(self.machine.label) except CuckooMachineError as e: log.error( "Unable to release machine %s, reason %s. " "You might need to restore it manually.", self.machine.label, e) return succeeded
def launch_analysis(self): """Start analysis.""" succeeded = False dead_machine = False self.socks5s = _load_socks5_operational() log.info( "Task #%s: Starting analysis of %s '%s'", self.task.id, self.task.category.upper(), convert_to_printable(self.task.target), ) # Initialize the analysis folders. if not self.init_storage(): log.debug("Failed to initialize the analysis folder") return False category_early_escape = self.category_checks() if isinstance(category_early_escape, bool): return category_early_escape # Acquire analysis machine. try: self.acquire_machine() self.db.set_task_vm(self.task.id, self.machine.label, self.machine.id) # At this point we can tell the ResultServer about it. except CuckooOperationalError as e: machine_lock.release() log.error("Task #%s: Cannot acquire machine: %s", self.task.id, e, exc_info=True) return False # Generate the analysis configuration file. options = self.build_options() try: ResultServer().add_task(self.task, self.machine) except Exception as e: machinery.release(self.machine.label) log.exception(e, exc_info=True) self.errors.put(e) aux = RunAuxiliary(task=self.task, machine=self.machine) try: unlocked = False # Mark the selected analysis machine in the database as started. guest_log = self.db.guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__) # Start the machine. machinery.start(self.machine.label) # Enable network routing. self.route_network() # By the time start returns it will have fully started the Virtual # Machine. We can now safely release the machine lock. machine_lock.release() unlocked = True aux.start() # Initialize the guest manager. guest = GuestManager(self.machine.name, self.machine.ip, self.machine.platform, self.task.id, self) options["clock"] = self.db.update_clock(self.task.id) self.db.guest_set_status(self.task.id, "starting") # Start the analysis. guest.start_analysis(options) if self.db.guest_get_status(self.task.id) == "starting": self.db.guest_set_status(self.task.id, "running") guest.wait_for_completion() self.db.guest_set_status(self.task.id, "stopping") succeeded = True except (CuckooMachineError, CuckooNetworkError) as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}, exc_info=True) dead_machine = True except CuckooGuestError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}, exc_info=True) finally: # Stop Auxiliary modules. aux.stop() # Take a memory dump of the machine before shutting it off. if self.cfg.cuckoo.memory_dump or self.task.memory: try: dump_path = get_memdump_path(self.task.id) need_space, space_available = free_space_monitor(os.path.dirname(dump_path), return_value=True) if need_space: log.error("Not enough free disk space! Could not dump ram (Only %d MB!)", space_available) else: machinery.dump_memory(self.machine.label, dump_path) except NotImplementedError: log.error("The memory dump functionality is not available for the current machine manager") except CuckooMachineError as e: log.error(e, exc_info=True) try: # Stop the analysis machine. machinery.stop(self.machine.label) except CuckooMachineError as e: log.warning("Task #%s: Unable to stop machine %s: %s", self.task.id, self.machine.label, e) # Mark the machine in the database as stopped. Unless this machine # has been marked as dead, we just keep it as "started" in the # database so it'll not be used later on in this session. self.db.guest_stop(guest_log) # After all this, we can make the ResultServer forget about the # internal state for this analysis task. ResultServer().del_task(self.task, self.machine) # Drop the network routing rules if any. self.unroute_network() if dead_machine: # Remove the guest from the database, so that we can assign a # new guest when the task is being analyzed with another # machine. self.db.guest_remove(guest_log) # Remove the analysis directory that has been created so # far, as launch_analysis() is going to be doing that again. shutil.rmtree(self.storage) # This machine has turned dead, so we throw an exception here # which informs the AnalysisManager that it should analyze # this task again with another available machine. raise CuckooDeadMachine() try: # Release the analysis machine. But only if the machine has # not turned dead yet. machinery.release(self.machine.label) except CuckooMachineError as e: log.error( "Task #%s: Unable to release machine %s, reason %s. You might need to restore it manually", self.task.id, self.machine.label, e, ) return succeeded
def launch_analysis(self): """Start analysis.""" succeeded = False dead_machine = False log.info("Starting analysis of %s \"%s\" (task=%d)", self.task.category.upper(), self.task.target, self.task.id) # Initialize the analysis folders. if not self.init_storage(): return False if self.task.category == "file": sample = Database().view_sample(self.task.sample_id) is_first_task = not self.task.experiment.times if is_first_task: # Check whether the file has been changed for some unknown reason. # And fail this analysis if it has been modified. if not self.check_file(): return False # Store a copy of the original file. if not self.store_file(): return False self.binary = os.path.join(CUCKOO_ROOT, "storage", "binaries", sample.sha256) self.create_symlink() # TODO Scheduling should happen at the end of the experiment. scheduled = False if self.task.repeat == TASK_RECURRENT: scheduled = Database().schedule(self.task.id) # Acquire analysis machine. try: self.acquire_machine() except CuckooOperationalError as e: machine_lock.release() log.error("Cannot acquire machine: {0}".format(e)) return False # Generate the analysis configuration file. options = self.build_options() # At this point we can tell the ResultServer about it. try: ResultServer().add_task(self.task, self.machine) except Exception as e: machinery.release(self.machine.label) self.errors.put(e) aux = RunAuxiliary(task=self.task, machine=self.machine) aux.start() try: unlocked = False # Mark the selected analysis machine in the database as started. guest_log = Database().guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__) # Starting the machinery takes some time. In order not to run into # race conditions with max_machines_count because acquiring new # machines takes place in parallel next to starting machines, for # which it takes a little delay before the machines' status turns # into "running", we hold the machine lock until the machine has # fully started (or gives an error, of course). try: # Start the machine, revert only if we are the first task in # the experiment. machinery.start(self.machine.label, revert=is_first_task) finally: machine_lock.release() unlocked = True # Initialize the guest manager. # FIXME - The critical timeout options is analysis_timeout + 60 sec # should it be configurable? guest = GuestManager(self.machine.name, self.machine.ip, options["timeout"] + 60, self.machine.platform) # Start the analysis if we are the first task of a series if is_first_task: guest.start_analysis(options) guest.wait_for_completion() succeeded = True except CuckooMachineError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}) dead_machine = True except CuckooGuestError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}) finally: # Stop Auxiliary modules. aux.stop() # Take a memory dump of the machine before shutting it off. if self.cfg.cuckoo.memory_dump or self.task.memory: try: dump_path = os.path.join(self.storage, "memory.dmp") machinery.dump_memory(self.machine.label, dump_path) except NotImplementedError: log.error("The memory dump functionality is not available " "for the current machine manager.") except CuckooMachineError as e: log.error(e) try: # Stop the analysis machine. machinery.stop(self.machine.label) except CuckooMachineError as e: log.warning("Unable to stop machine %s: %s", self.machine.label, e) # Mark the machine in the database as stopped. Unless this machine # has been marked as dead, we just keep it as "started" in the # database so it'll not be used later on in this session. Database().guest_stop(guest_log) # After all this, we can make the ResultServer forget about the # internal state for this analysis task. ResultServer().del_task(self.task, self.machine) if dead_machine: # Remove the guest from the database, so that we can assign a # new guest when the task is being analyzed with another # machine. Database().guest_remove(guest_log) # Remove the analysis directory that has been created so # far, as launch_analysis() is going to be doing that again. shutil.rmtree(self.storage) # This machine has turned dead, so we throw an exception here # which informs the AnalysisManager that it should analyze # this task again with another available machine. raise CuckooDeadMachine() try: # Release the analysis machine. if not scheduled: log.debug( "Freeing the machine since we're the last task of this experiment" ) machinery.release(self.machine.label) # Unset the machine_name of this experiment. if self.task.experiment: Database().update_experiment( None, id=self.task.experiment.id, machine_name=None) except CuckooMachineError as e: log.error( "Unable to release machine %s, reason %s. " "You might need to restore it manually.", self.machine.label, e) return succeeded
def launch_analysis(self): """Start analysis.""" succeeded = False dead_machine = False log.info("Starting analysis of %s \"%s\" (task=%d)", self.task.category.upper(), self.task.target, self.task.id) # Initialize the the analysis folders. if not self.init_storage(): return False if self.task.category == "file": # Check whether the file has been changed for some unknown reason. # And fail this analysis if it has been modified. if not self.check_file(): return False # Store a copy of the original file. if not self.store_file(): return False # Acquire analysis machine. try: self.acquire_machine() except CuckooOperationalError as e: log.error("Cannot acquire machine: {0}".format(e)) return False # Generate the analysis configuration file. options = self.build_options() # At this point we can tell the Resultserver about it. try: Resultserver().set_machinery(machinery) Resultserver().add_task(self.task, self.machine) except Exception as e: machinery.release(self.machine.label) self.errors.put(e) aux = RunAuxiliary(task=self.task, machine=self.machine) aux.start() try: # Mark the selected analysis machine in the database as started. guest_log = Database().guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__) # Start the machine. # machinery.start(self.machine.label) except CuckooMachineError as e: log.error(str(e), extra={"task_id": self.task.id}) dead_machine = True else: try: # Initialize the guest manager. guest = GuestManager(self.machine.name, self.machine.ip, self.machine.platform) # Start the analysis. guest.start_analysis(options) except CuckooGuestError as e: log.error(str(e), extra={"task_id": self.task.id}) # Wait for analysis completion. time_taken_for_analysis = 0 try: guest.wait_for_completion(self.machine, self.storage, machinery) succeeded = True except CuckooGuestError as e: log.error(str(e), extra={"task_id": self.task.id}) succeeded = False finally: """ log.info("Taking last dump before terminating...") mem_dir = os.path.join(self.storage, "memory", "dumps") try: os.makedirs(mem_dir) except: pass if len (os.listdir(mem_dir)) == 0: dump_dir = '1' else: dump_dir = str(int(max(os.listdir(mem_dir))) + 1) try: os.makedirs(os.path.join(mem_dir, dump_dir)) except: pass dump_path = os.path.join(mem_dir, dump_dir, "memory.dmp") machinery.dump_memory(self.machine.label, dump_path) info_dict = {"trigger": {"name" : "End", "args": {}}, "time" : str(time_taken_for_analysis)} json.dump(info_dict, file(os.path.join(mem_dir, dump_dir, "info.json"),"wb"), sort_keys=False, indent=4) """ # Stop Auxiliary modules. aux.stop() try: # Stop the analysis machine. machinery.stop(self.machine.label) machinery.start(self.machine.label) except CuckooMachineError as e: log.warning("Unable to stop machine %s: %s", self.machine.label, e) # Mark the machine in the database as stopped. Unless this machine # has been marked as dead, we just keep it as "started" in the # database so it'll not be used later on in this session. Database().guest_stop(guest_log) # After all this, we can make the Resultserver forget about the # internal state for this analysis task. Resultserver().del_task(self.task, self.machine) if dead_machine: # Remove the guest from the database, so that we can assign a # new guest when the task is being analyzed with another # machine. Database().guest_remove(guest_log) # Remove the analysis directory that has been created so # far, as launch_analysis() is going to be doing that again. shutil.rmtree(self.storage) # This machine has turned dead, so we throw an exception here # which informs the AnalysisManager that it should analyze # this task again with another available machine. raise CuckooDeadMachine() try: # Release the analysis machine. But only if the machine has # not turned dead yet. machinery.release(self.machine.label) except CuckooMachineError as e: log.error( "Unable to release machine %s, reason %s. " "You might need to restore it manually", self.machine.label, e) return succeeded