def guest_manage(self, options): # Handle a special case where we're creating a baseline report of this # particular virtual machine - a report containing all the results # that are gathered if no additional samples are ran in the VM. These # results, such as loaded drivers and opened sockets in volatility, or # DNS requests to hostnames related to Microsoft Windows, etc may be # omitted or at the very least given less priority when creating a # report for an analysis that ran on this VM later on. if self.task.category == "baseline": time.sleep(options["timeout"]) else: # Initialize the guest manager. guest = GuestManager(self.machine.name, self.machine.ip, self.machine.platform, self.task.id) # Start the analysis. self.db.guest_set_status(self.task.id, "starting") monitor = self.task.options.get("monitor", "latest") guest.start_analysis(options, monitor) # In case the Agent didn't respond and we force-quit the analysis # at some point while it was still starting the analysis the state # will be "stop" (or anything but "running", really). if self.db.guest_get_status(self.task.id) == "starting": self.db.guest_set_status(self.task.id, "running") guest.wait_for_completion() self.db.guest_set_status(self.task.id, "stopping")
def _status(self, label): """Gets current status of a physical machine. @param label: physical machine name. @return: status string. """ # For physical machines, the agent can either be contacted or not. # However, there is some information to be garnered from potential # exceptions. log.debug("Getting status for machine: %s.", label) machine = self._get_machine(label) guest = GuestManager(machine.id, machine.ip, machine.platform, None) try: status = guest.server.get_status() except xmlrpclib.Fault as e: # Contacted Agent, but it threw an error. log.debug("Agent error: %s (%s) (Error: %s).", machine.id, machine.ip, e) return self.ERROR except socket.error as e: # Could not contact agent. log.debug("Agent unresponsive: %s (%s) (Error: %s).", machine.id, machine.ip, e) return self.STOPPED except Exception as e: # TODO Handle this better. log.debug("Received unknown exception: %s.", e) return self.ERROR # If the agent responded successfully, then the physical machine # is running if status: return self.RUNNING return self.ERROR
def guest_manage(self, options): # Handle a special case where we're creating a baseline report of this # particular virtual machine - a report containing all the results # that are gathered if no additional samples are ran in the VM. These # results, such as loaded drivers and opened sockets in volatility, or # DNS requests to hostnames related to Microsoft Windows, etc may be # omitted or at the very least given less priority when creating a # report for an analysis that ran on this VM later on. if self.task.category == "baseline": time.sleep(options["timeout"]) else: # Initialize the guest manager. guest = GuestManager(self.machine.name, self.machine.ip, self.machine.platform) # Start the analysis. monitor = self.task.options.get("monitor", "latest") guest.start_analysis(options, monitor) guest.wait_for_completion()
def launch_analysis(self): """Start analysis. @raise CuckooAnalysisError: if unable to start analysis. """ log.info("Starting analysis of file \"%s\"" % self.task.file_path) if not os.path.exists(self.task.file_path): raise CuckooAnalysisError("The file to analyze does not exist at path \"%s\", analysis aborted" % self.task.file_path) self.init_storage() self.store_file() options = self.build_options() while True: machine_lock.acquire() vm = mmanager.acquire(machine_id=self.task.machine, platform=self.task.platform) machine_lock.release() if not vm: log.debug("No machine available") time.sleep(1) else: log.info("Acquired machine %s (Label: %s)" % (vm.id, vm.label)) break # Initialize sniffer if self.cfg.cuckoo.use_sniffer: sniffer = Sniffer(self.cfg.cuckoo.tcpdump) sniffer.start(interface=self.cfg.cuckoo.interface, host=vm.ip, file_path=os.path.join(self.analysis.results_folder, "dump.pcap")) else: sniffer = False try: # Start machine mmanager.start(vm.label) # Initialize guest manager guest = GuestManager(vm.ip, vm.platform) # Launch analysis guest.start_analysis(options) # Wait for analysis to complete success = guest.wait_for_completion() # Stop sniffer if sniffer: sniffer.stop() if not success: raise CuckooAnalysisError("Analysis failed, review previous errors") # Save results guest.save_results(self.analysis.results_folder) except (CuckooMachineError, CuckooGuestError) as e: raise CuckooAnalysisError(e.message) finally: # Stop machine mmanager.stop(vm.label) # Release the machine from lock mmanager.release(vm.label) # Launch reports generation Reporter(self.analysis.results_folder).run(Processor(self.analysis.results_folder).run()) log.info("Reports generation completed (path=%s)" % self.analysis.results_folder)
def launch_analysis(self): """Start analysis.""" succeeded = False dead_machine = False log.info("Starting analysis of %s \"%s\" (task=%d)", self.task.category.upper(), self.task.target, self.task.id) # Initialize the analysis folders. if not self.init_storage(): return False if self.task.category == "file": sample = Database().view_sample(self.task.sample_id) is_first_task = len(Database().list_tasks(experiment=self.task.experiment_id)) == 1 if is_first_task: # Check whether the file has been changed for some unknown reason. # And fail this analysis if it has been modified. if not self.check_file(): return False # Store a copy of the original file. if not self.store_file(): return False self.binary = os.path.join(CUCKOO_ROOT, "storage", "binaries", sample.sha256) self.create_symlink() # FIXME - Scheduling should happen at the end of the experiment, but # since the management of the experiment is not final that # will do it. if self.task.repeat == TASK_RECURRENT: Database().schedule(self.task.id) # Acquire analysis machine. try: self.acquire_machine() except CuckooOperationalError as e: machine_lock.release() log.error("Cannot acquire machine: {0}".format(e)) return False # Generate the analysis configuration file. options = self.build_options() # At this point we can tell the ResultServer about it. try: ResultServer().add_task(self.task, self.machine) except Exception as e: machinery.release(self.machine.label) self.errors.put(e) aux = RunAuxiliary(task=self.task, machine=self.machine) aux.start() try: # Mark the selected analysis machine in the database as started. guest_log = Database().guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__) # Starting the machinery takes some time. In order not to run into # race conditions with max_machines_count because acquiring new # machines takes place in parallel next to starting machines, for # which it takes a little delay before the machines' status turns # into "running", we hold the machine lock until the machine has # fully started (or gives an error, of course). try: # Start the machine, revert only if we are the first task in # the experiment. machinery.start(self.machine.label, revert=is_first_task) finally: machine_lock.release() # Initialize the guest manager. # FIXME - The critical timeout options is analysis_timeout + 60 sec # should it be configurable? guest = GuestManager(self.machine.name, self.machine.ip, options["timeout"] + 60, self.machine.platform) # Start the analysis if we are the first task of a series if is_first_task: guest.start_analysis(options) guest.wait_for_completion() succeeded = True except CuckooMachineError as e: log.error(str(e), extra={"task_id": self.task.id}) dead_machine = True except CuckooGuestError as e: log.error(str(e), extra={"task_id": self.task.id}) finally: # Stop Auxiliary modules. aux.stop() # Take a memory dump of the machine before shutting it off. if self.cfg.cuckoo.memory_dump or self.task.memory: try: dump_path = os.path.join(self.storage, "memory.dmp") machinery.dump_memory(self.machine.label, dump_path) except NotImplementedError: log.error("The memory dump functionality is not available " "for the current machine manager.") except CuckooMachineError as e: log.error(e) try: # Stop the analysis machine. machinery.stop(self.machine.label) except CuckooMachineError as e: log.warning("Unable to stop machine %s: %s", self.machine.label, e) # Mark the machine in the database as stopped. Unless this machine # has been marked as dead, we just keep it as "started" in the # database so it'll not be used later on in this session. Database().guest_stop(guest_log) # After all this, we can make the ResultServer forget about the # internal state for this analysis task. ResultServer().del_task(self.task, self.machine) if dead_machine: # Remove the guest from the database, so that we can assign a # new guest when the task is being analyzed with another # machine. Database().guest_remove(guest_log) # Remove the analysis directory that has been created so # far, as launch_analysis() is going to be doing that again. shutil.rmtree(self.storage) # This machine has turned dead, so we throw an exception here # which informs the AnalysisManager that it should analyze # this task again with another available machine. raise CuckooDeadMachine() try: # Release the analysis machine. But only if the machine has # not turned dead yet. if self.task.repeat == TASK_SINGLE: log.debug("Freeing the machine since we're the last task of this experiment") machinery.release(self.machine.label) except CuckooMachineError as e: log.error("Unable to release machine %s, reason %s. " "You might need to restore it manually.", self.machine.label, e) return succeeded
def launch_analysis(self): """Start analysis.""" sniffer = None succeeded = False log.info("Starting analysis of %s \"%s\" (task=%d)", self.task.category.upper(), self.task.target, self.task.id) # Initialize the the analysis folders. if not self.init_storage(): return False ### JG: added interaction if self.task.category == "file" and self.task.interaction < 2: # Store a copy of the original file. if not self.store_file(): return False # Generate the analysis configuration file. options = self.build_options() ### JG: added log output if options['interaction'] > 0: log.info("Starting analysis by interactive command shell or browser") # Acquire analysis machine. machine = self.acquire_machine() # At this point we can tell the Resultserver about it try: Resultserver().add_task(self.task, machine) except Exception as e: mmanager.release(machine.label) self.errors.put(e) # If enabled in the configuration, start the tcpdump instance. if self.cfg.sniffer.enabled: sniffer = Sniffer(self.cfg.sniffer.tcpdump) sniffer.start(interface=self.cfg.sniffer.interface, host=machine.ip, file_path=os.path.join(self.storage, "dump.pcap")) ### JG: If enabled in the configuration, start the netflow probe instance. if self.cfg.netflow.enabled: fprobe = Netflow(self.cfg.netflow.generator, self.cfg.netflow.collector) nflowPort = int(machine.ip.split('.')[-1]) fprobe.start(interface=self.cfg.sniffer.interface, dst=self.cfg.netflow.destination, dport=nflowPort, file_path=os.path.join(self.storage)) else: fprobe = False ### JG: If enabled in the configuration, start the fake DNS server. if self.cfg.fakedns.enabled: fdns = fakeDNS(self.cfg.fakedns.fakedns) fdns.start(ip=self.cfg.fakedns.dnsip, withInternet=options["internet"]) else: fdns = False ### JG: check if NAT should be enabled if options["internet"]: try: pargs = ['/usr/bin/sudo', self.cfg.nat.enable] enableNAT = subprocess.Popen(pargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except (OSError, ValueError) as e: log.error("Failed to enable NAT" % (e)) else: try: pargs = ['/usr/bin/sudo', self.cfg.nat.disable] disableNAT = subprocess.Popen(pargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except (OSError, ValueError) as e: log.error("Failed to disable NAT" % (e)) try: # Mark the selected analysis machine in the database as started. guest_log = Database().guest_start(self.task.id, machine.name, machine.label, mmanager.__class__.__name__) # Start the machine. mmanager.start(machine.label) except CuckooMachineError as e: log.error(str(e), extra={"task_id" : self.task.id}) # Stop the sniffer. if sniffer: sniffer.stop() ### JG: Stop netflow if fprobe: fprobe.stop() ### JG: Stop fakeDNS if fdns: fdns.stop() ### JG: Disable NAT if options["internet"]: try: pargs = ['/usr/bin/sudo', self.cfg.nat.disable] disableNAT = subprocess.Popen(pargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except (OSError, ValueError) as e: log.error("Failed to enable NAT: %s" % (e)) return False else: try: # Initialize the guest manager. guest = GuestManager(machine.name, machine.ip, machine.platform) # Start the analysis. guest.start_analysis(options) log.info("guest initialization successfull.") except CuckooGuestError as e: log.error(str(e), extra={"task_id" : self.task.id}) # Stop the sniffer. if sniffer: sniffer.stop() ### JG: Stop netflow if fprobe: fprobe.stop() ### JG: Stop fakeDNS if fdns: fdns.stop() ### JG: Disable NAT if options["internet"]: try: pargs = ['/usr/bin/sudo', self.cfg.nat.disable] disableNAT = subprocess.Popen(pargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except (OSError, ValueError) as e: log.error("Failed to enable NAT: %s" % (e)) return False else: # Wait for analysis completion. try: guest.wait_for_completion() succeeded = True except CuckooGuestError as e: log.error(str(e), extra={"task_id" : self.task.id}) succeeded = False finally: # Stop the sniffer. if sniffer: sniffer.stop() ### JG: Stop netflow if fprobe: fprobe.stop() ### JG: Stop fakeDNS if fdns: fdns.stop() ### JG: Disable NAT if options["internet"]: try: pargs = ['/usr/bin/sudo', self.cfg.nat.disable] disableNAT = subprocess.Popen(pargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except (OSError, ValueError) as e: log.error("Failed to enable NAT: %s" % (e)) # Take a memory dump of the machine before shutting it off. if self.cfg.cuckoo.memory_dump or self.task.memory: try: mmanager.dump_memory(machine.label, os.path.join(self.storage, "memory.dmp")) except NotImplementedError: log.error("The memory dump functionality is not available " "for current machine manager") except CuckooMachineError as e: log.error(e) try: # Stop the analysis machine. mmanager.stop(machine.label) except CuckooMachineError as e: log.warning("Unable to stop machine %s: %s", machine.label, e) # Market the machine in the database as stopped. Database().guest_stop(guest_log) try: # Release the analysis machine. mmanager.release(machine.label) except CuckooMachineError as e: log.error("Unable to release machine %s, reason %s. " "You might need to restore it manually", machine.label, e) # after all this, we can make the Resultserver forget about it Resultserver().del_task(self.task, machine) return succeeded
def launch_analysis(self): """Start analysis.""" sniffer = None succeeded = False log.info("Starting analysis of %s \"%s\" (task=%d)", self.task.category.upper(), self.task.target, self.task.id) # Initialize the the analysis folders. if not self.init_storage(): return False if self.task.category == "file": # Store a copy of the original file. if not self.store_file(): return False # Generate the analysis configuration file. options = self.build_options() # Acquire analysis machine. machine = self.acquire_machine() # At this point we can tell the Resultserver about it try: Resultserver().add_task(self.task, machine) except Exception as e: mmanager.release(machine.label) self.errors.put(e) # If enabled in the configuration, start the tcpdump instance. if self.cfg.sniffer.enabled: sniffer = Sniffer(self.cfg.sniffer.tcpdump) sniffer.start(interface=self.cfg.sniffer.interface, host=machine.ip, file_path=os.path.join(self.storage, "dump.pcap")) try: # Mark the selected analysis machine in the database as started. guest_log = Database().guest_start(self.task.id, machine.name, machine.label, mmanager.__class__.__name__) # Start the machine. mmanager.start(machine.label) except CuckooMachineError as e: log.error(str(e), extra={"task_id": self.task.id}) # Stop the sniffer. if sniffer: sniffer.stop() return False else: try: # Initialize the guest manager. guest = GuestManager(machine.name, machine.ip, machine.platform) # Start the analysis. guest.start_analysis(options) except CuckooGuestError as e: log.error(str(e), extra={"task_id": self.task.id}) # Stop the sniffer. if sniffer: sniffer.stop() return False else: # Wait for analysis completion. try: guest.wait_for_completion() succeeded = True except CuckooGuestError as e: log.error(str(e), extra={"task_id": self.task.id}) succeeded = False finally: # Stop the sniffer. if sniffer: sniffer.stop() # Take a memory dump of the machine before shutting it off. if self.cfg.cuckoo.memory_dump or self.task.memory: try: mmanager.dump_memory( machine.label, os.path.join(self.storage, "memory.dmp")) except NotImplementedError: log.error("The memory dump functionality is not available " "for current machine manager") except CuckooMachineError as e: log.error(e) try: # Stop the analysis machine. mmanager.stop(machine.label) except CuckooMachineError as e: log.warning("Unable to stop machine %s: %s", machine.label, e) # Market the machine in the database as stopped. Database().guest_stop(guest_log) try: # Release the analysis machine. mmanager.release(machine.label) except CuckooMachineError as e: log.error( "Unable to release machine %s, reason %s. " "You might need to restore it manually", machine.label, e) # after all this, we can make the Resultserver forget about it Resultserver().del_task(self.task, machine) return succeeded
def launch_analysis(self): """Start analysis.""" sniffer = None succeeded = False stored = False log.info("Starting analysis of %s \"%s\" (task=%d)" % (self.task.category.upper(), self.task.target, self.task.id)) # Initialize the the analysis folders. if not self.init_storage(): return False if self.task.category == "file": # Store a copy of the original file. if not self.store_file(): return False # Generate the analysis configuration file. options = self.build_options() # Acquire analysis machine. machine = self.acquire_machine() # If enabled in the configuration, start the tcpdump instance. if self.cfg.sniffer.enabled: sniffer = Sniffer(self.cfg.sniffer.tcpdump) sniffer.start(interface=self.cfg.sniffer.interface, host=machine.ip, file_path=os.path.join(self.storage, "dump.pcap")) try: # Mark the selected analysis machine in the database as started. guest_log = Database().guest_start(self.task.id, machine.name, machine.label, mmanager.__class__.__name__) # Start the machine. mmanager.start(machine.label) except CuckooMachineError as e: log.error(str(e), extra={"task_id" : self.task.id}) # Stop the sniffer. if sniffer: sniffer.stop() return False else: try: # Initialize the guest manager. guest = GuestManager(machine.name, machine.ip, machine.platform) # Start the analysis. guest.start_analysis(options) except CuckooGuestError as e: log.error(str(e), extra={"task_id" : self.task.id}) # Stop the sniffer. if sniffer: sniffer.stop() return False else: # Wait for analysis completion. try: guest.wait_for_completion() succeeded = True except CuckooGuestError as e: log.error(str(e), extra={"task_id" : self.task.id}) succeeded = False # Retrieve the analysis results and store them. try: guest.save_results(self.storage) stored = True except CuckooGuestError as e: log.error(str(e), extra={"task_id" : self.task.id}) stored = False finally: # Stop the sniffer. if sniffer: sniffer.stop() # If the target is a file and the user enabled the option, # delete the original copy. if self.task.category == "file" and self.cfg.cuckoo.delete_original: try: os.remove(self.task.target) except OSError as e: log.error("Unable to delete original file at path \"%s\": " "%s" % (self.task.target, e)) # Take a memory dump of the machine before shutting it off. do_memory_dump = False if self.cfg.cuckoo.memory_dump: do_memory_dump = True else: if self.task.memory: do_memory_dump = True if do_memory_dump: try: mmanager.dump_memory(machine.label, os.path.join(self.storage, "memory.dmp")) except NotImplementedError: log.error("The memory dump functionality is not available " "for current machine manager") except CuckooMachineError as e: log.error(e) try: # Stop the analysis machine. mmanager.stop(machine.label) # Market the machine in the database as stopped. Database().guest_stop(guest_log) # Release the analysis machine. mmanager.release(machine.label) except CuckooMachineError as e: log.error("Unable to release machine %s, reason %s. " "You might need to restore it manually" % (machine.label, e)) # If the results were correctly stored, we process the results and # generate the reports. if stored: self.process_results() return succeeded
def launch_analysis(self): """Start analysis.""" sniffer = None succeeded = False log.info("Starting analysis of %s \"%s\" (task=%d)", self.task.category.upper(), self.task.target, self.task.id) # Initialize the the analysis folders. if not self.init_storage(): return False if self.task.category == "file": # Store a copy of the original file. if not self.store_file(): return False # Generate the analysis configuration file. options = self.build_options() # Acquire analysis machine. machine = self.acquire_machine() # At this point we can tell the Resultserver about it try: Resultserver().add_task(self.task, machine) except Exception as e: mmanager.release(machine.label) self.errors.put(e) # If enabled in the configuration, start the tcpdump instance. if self.cfg.sniffer.enabled: sniffer = Sniffer(self.cfg.sniffer.tcpdump) sniffer.start(interface=self.cfg.sniffer.interface, host=machine.ip, file_path=os.path.join(self.storage, "dump.pcap")) try: # Mark the selected analysis machine in the database as started. guest_log = Database().guest_start(self.task.id, machine.name, machine.label, mmanager.__class__.__name__) # Start the machine. mmanager.start(machine.label) except CuckooMachineError as e: log.error(str(e), extra={"task_id" : self.task.id}) # Stop the sniffer. if sniffer: sniffer.stop() return False else: try: # Initialize the guest manager. guest = GuestManager(machine.name, machine.ip, machine.platform) # Start the analysis. guest.start_analysis(options) except CuckooGuestError as e: log.error(str(e), extra={"task_id" : self.task.id}) # Stop the sniffer. if sniffer: sniffer.stop() return False else: # Wait for analysis completion. try: guest.wait_for_completion() succeeded = True except CuckooGuestError as e: log.error(str(e), extra={"task_id" : self.task.id}) succeeded = False finally: # Stop the sniffer. if sniffer: sniffer.stop() # Take a memory dump of the machine before shutting it off. if self.cfg.cuckoo.memory_dump or self.task.memory: try: mmanager.dump_memory(machine.label, os.path.join(self.storage, "memory.dmp")) except NotImplementedError: log.error("The memory dump functionality is not available " "for current machine manager") except CuckooMachineError as e: log.error(e) try: # Stop the analysis machine. mmanager.stop(machine.label) except CuckooMachineError as e: log.warning("Unable to stop machine %s: %s", machine.label, e) # Market the machine in the database as stopped. Database().guest_stop(guest_log) try: # Release the analysis machine. mmanager.release(machine.label) except CuckooMachineError as e: log.error("Unable to release machine %s, reason %s. " "You might need to restore it manually", machine.label, e) # after all this, we can make the Resultserver forget about it Resultserver().del_task(self.task, machine) return succeeded
def launch_analysis(self): """Start analysis. @raise CuckooAnalysisError: if unable to start analysis. """ log.info("Starting analysis of file \"%s\" (task=%s)" % (self.task.file_path, self.task.id)) if not os.path.exists(self.task.file_path): raise CuckooAnalysisError( "The file to analyze does not exist at path \"%s\", analysis aborted" % self.task.file_path) self.init_storage() self.store_file() options = self.build_options() while True: machine_lock.acquire() vm = mmanager.acquire(machine_id=self.task.machine, platform=self.task.platform) machine_lock.release() if not vm: log.debug("Task #%s: no machine available" % self.task.id) time.sleep(1) else: log.info("Task #%s: acquired machine %s (label=%s)" % (self.task.id, vm.id, vm.label)) break # Initialize sniffer if self.cfg.cuckoo.use_sniffer: sniffer = Sniffer(self.cfg.cuckoo.tcpdump) sniffer.start(interface=self.cfg.cuckoo.interface, host=vm.ip, file_path=os.path.join(self.analysis.results_folder, "dump.pcap")) else: sniffer = False try: # Start machine mmanager.start(vm.label) # Initialize guest manager guest = GuestManager(vm.id, vm.ip, vm.platform) # Launch analysis guest.start_analysis(options) # Wait for analysis to complete success = guest.wait_for_completion() # Stop sniffer if sniffer: sniffer.stop() if not success: raise CuckooAnalysisError( "Task #%s: analysis failed, review previous errors" % self.task.id) # Save results guest.save_results(self.analysis.results_folder) except (CuckooMachineError, CuckooGuestError) as e: raise CuckooAnalysisError(e) finally: # Stop machine mmanager.stop(vm.label) # Release the machine from lock mmanager.release(vm.label) # Launch reports generation Reporter(self.analysis.results_folder).run( Processor(self.analysis.results_folder).run()) log.info("Task #%s: reports generation completed (path=%s)" % (self.task.id, self.analysis.results_folder))
def launch_analysis(self): """Start analysis.""" succeeded = False dead_machine = False self.socks5s = _load_socks5_operational() log.info( "Task #%s: Starting analysis of %s '%s'", self.task.id, self.task.category.upper(), convert_to_printable(self.task.target), ) # Initialize the analysis folders. if not self.init_storage(): log.debug("Failed to initialize the analysis folder") return False category_early_escape = self.category_checks() if isinstance(category_early_escape, bool): return category_early_escape # Acquire analysis machine. try: self.acquire_machine() self.db.set_task_vm(self.task.id, self.machine.label, self.machine.id) # At this point we can tell the ResultServer about it. except CuckooOperationalError as e: machine_lock.release() log.error("Task #%s: Cannot acquire machine: %s", self.task.id, e, exc_info=True) return False # Generate the analysis configuration file. options = self.build_options() try: ResultServer().add_task(self.task, self.machine) except Exception as e: machinery.release(self.machine.label) log.exception(e, exc_info=True) self.errors.put(e) aux = RunAuxiliary(task=self.task, machine=self.machine) try: unlocked = False # Mark the selected analysis machine in the database as started. guest_log = self.db.guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__) # Start the machine. machinery.start(self.machine.label) # Enable network routing. self.route_network() # By the time start returns it will have fully started the Virtual # Machine. We can now safely release the machine lock. machine_lock.release() unlocked = True aux.start() # Initialize the guest manager. guest = GuestManager(self.machine.name, self.machine.ip, self.machine.platform, self.task.id, self) options["clock"] = self.db.update_clock(self.task.id) self.db.guest_set_status(self.task.id, "starting") # Start the analysis. guest.start_analysis(options) if self.db.guest_get_status(self.task.id) == "starting": self.db.guest_set_status(self.task.id, "running") guest.wait_for_completion() self.db.guest_set_status(self.task.id, "stopping") succeeded = True except (CuckooMachineError, CuckooNetworkError) as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}, exc_info=True) dead_machine = True except CuckooGuestError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}, exc_info=True) finally: # Stop Auxiliary modules. aux.stop() # Take a memory dump of the machine before shutting it off. if self.cfg.cuckoo.memory_dump or self.task.memory: try: dump_path = get_memdump_path(self.task.id) need_space, space_available = free_space_monitor(os.path.dirname(dump_path), return_value=True) if need_space: log.error("Not enough free disk space! Could not dump ram (Only %d MB!)", space_available) else: machinery.dump_memory(self.machine.label, dump_path) except NotImplementedError: log.error("The memory dump functionality is not available for the current machine manager") except CuckooMachineError as e: log.error(e, exc_info=True) try: # Stop the analysis machine. machinery.stop(self.machine.label) except CuckooMachineError as e: log.warning("Task #%s: Unable to stop machine %s: %s", self.task.id, self.machine.label, e) # Mark the machine in the database as stopped. Unless this machine # has been marked as dead, we just keep it as "started" in the # database so it'll not be used later on in this session. self.db.guest_stop(guest_log) # After all this, we can make the ResultServer forget about the # internal state for this analysis task. ResultServer().del_task(self.task, self.machine) # Drop the network routing rules if any. self.unroute_network() if dead_machine: # Remove the guest from the database, so that we can assign a # new guest when the task is being analyzed with another # machine. self.db.guest_remove(guest_log) # Remove the analysis directory that has been created so # far, as launch_analysis() is going to be doing that again. shutil.rmtree(self.storage) # This machine has turned dead, so we throw an exception here # which informs the AnalysisManager that it should analyze # this task again with another available machine. raise CuckooDeadMachine() try: # Release the analysis machine. But only if the machine has # not turned dead yet. machinery.release(self.machine.label) except CuckooMachineError as e: log.error( "Task #%s: Unable to release machine %s, reason %s. You might need to restore it manually", self.task.id, self.machine.label, e, ) return succeeded
def launch_analysis(self): """Start analysis.""" succeeded = False dead_machine = False log.info("Starting analysis of %s \"%s\" (task=%d)", self.task.category.upper(), self.task.target, self.task.id) # Initialize the the analysis folders. if not self.init_storage(): return False if self.task.category == "file": # Check whether the file has been changed for some unknown reason. # And fail this analysis if it has been modified. if not self.check_file(): return False # Store a copy of the original file. if not self.store_file(): return False # Acquire analysis machine. try: self.acquire_machine() except CuckooOperationalError as e: log.error("Cannot acquire machine: {0}".format(e)) return False # Generate the analysis configuration file. options = self.build_options() # At this point we can tell the Resultserver about it. try: Resultserver().set_machinery(machinery) Resultserver().add_task(self.task, self.machine) except Exception as e: machinery.release(self.machine.label) self.errors.put(e) aux = RunAuxiliary(task=self.task, machine=self.machine) aux.start() try: # Mark the selected analysis machine in the database as started. guest_log = Database().guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__) # Start the machine. # machinery.start(self.machine.label) except CuckooMachineError as e: log.error(str(e), extra={"task_id": self.task.id}) dead_machine = True else: try: # Initialize the guest manager. guest = GuestManager(self.machine.name, self.machine.ip, self.machine.platform) # Start the analysis. guest.start_analysis(options) except CuckooGuestError as e: log.error(str(e), extra={"task_id": self.task.id}) # Wait for analysis completion. time_taken_for_analysis = 0 try: guest.wait_for_completion(self.machine, self.storage, machinery) succeeded = True except CuckooGuestError as e: log.error(str(e), extra={"task_id": self.task.id}) succeeded = False finally: """ log.info("Taking last dump before terminating...") mem_dir = os.path.join(self.storage, "memory", "dumps") try: os.makedirs(mem_dir) except: pass if len (os.listdir(mem_dir)) == 0: dump_dir = '1' else: dump_dir = str(int(max(os.listdir(mem_dir))) + 1) try: os.makedirs(os.path.join(mem_dir, dump_dir)) except: pass dump_path = os.path.join(mem_dir, dump_dir, "memory.dmp") machinery.dump_memory(self.machine.label, dump_path) info_dict = {"trigger": {"name" : "End", "args": {}}, "time" : str(time_taken_for_analysis)} json.dump(info_dict, file(os.path.join(mem_dir, dump_dir, "info.json"),"wb"), sort_keys=False, indent=4) """ # Stop Auxiliary modules. aux.stop() try: # Stop the analysis machine. machinery.stop(self.machine.label) machinery.start(self.machine.label) except CuckooMachineError as e: log.warning("Unable to stop machine %s: %s", self.machine.label, e) # Mark the machine in the database as stopped. Unless this machine # has been marked as dead, we just keep it as "started" in the # database so it'll not be used later on in this session. Database().guest_stop(guest_log) # After all this, we can make the Resultserver forget about the # internal state for this analysis task. Resultserver().del_task(self.task, self.machine) if dead_machine: # Remove the guest from the database, so that we can assign a # new guest when the task is being analyzed with another # machine. Database().guest_remove(guest_log) # Remove the analysis directory that has been created so # far, as launch_analysis() is going to be doing that again. shutil.rmtree(self.storage) # This machine has turned dead, so we throw an exception here # which informs the AnalysisManager that it should analyze # this task again with another available machine. raise CuckooDeadMachine() try: # Release the analysis machine. But only if the machine has # not turned dead yet. machinery.release(self.machine.label) except CuckooMachineError as e: log.error("Unable to release machine %s, reason %s. " "You might need to restore it manually", self.machine.label, e) return succeeded
def launch_analysis(self): """Start analysis. @raise CuckooAnalysisError: if unable to start analysis. """ log.info('Starting analysis of file "%s" (task=%s)' % (self.task.file_path, self.task.id)) if not os.path.exists(self.task.file_path): raise CuckooAnalysisError( 'The file to analyze does not exist at path "%s", analysis aborted' % self.task.file_path ) self.init_storage() self.store_file() options = self.build_options() while True: machine_lock.acquire() vm = mmanager.acquire(machine_id=self.task.machine, platform=self.task.platform) machine_lock.release() if not vm: log.debug("Task #%s: no machine available" % self.task.id) time.sleep(1) else: log.info("Task #%s: acquired machine %s (label=%s)" % (self.task.id, vm.id, vm.label)) break # Initialize sniffer if self.cfg.cuckoo.use_sniffer: sniffer = Sniffer(self.cfg.cuckoo.tcpdump) sniffer.start( interface=self.cfg.cuckoo.interface, host=vm.ip, file_path=os.path.join(self.analysis.results_folder, "dump.pcap"), ) else: sniffer = False try: # Start machine mmanager.start(vm.label) # Initialize guest manager guest = GuestManager(vm.id, vm.ip, vm.platform) # Launch analysis guest.start_analysis(options) # Wait for analysis to complete success = guest.wait_for_completion() # Stop sniffer if sniffer: sniffer.stop() # Save results guest.save_results(self.analysis.results_folder) if not success: raise CuckooAnalysisError("Task #%s: analysis failed, review previous errors" % self.task.id) except (CuckooMachineError, CuckooGuestError) as e: raise CuckooAnalysisError(e) finally: # Delete original file if self.cfg.cuckoo.delete_original: try: os.remove(self.task.file_path) except OSError as e: log.error('Unable to delete original file at path "%s": %s' % (self.task.file_path, e)) try: # Stop machine mmanager.stop(vm.label) # Release the machine from lock log.debug("Task #%s: releasing machine %s (label=%s)" % (self.task.id, vm.id, vm.label)) mmanager.release(vm.label) except CuckooMachineError as e: log.error("Unable to release vm %s, reason %s. You have to fix it manually" % (vm.label, e)) # Check analysis file size to avoid memory leaks. try: for csv in os.listdir(os.path.join(self.analysis.results_folder, "logs")): csv = os.path.join(self.analysis.results_folder, "logs", csv) if os.stat(csv).st_size > self.cfg.cuckoo.analysis_size_limit: raise CuckooAnalysisError( "Analysis file %s is too big to be processed. Analysis aborted. You can process it manually" % csv ) except OSError as e: log.warning("Log access error for analysis #%s: %s" % (self.task.id, e)) # Launch reports generation Reporter(self.analysis.results_folder).run(Processor(self.analysis.results_folder).run()) log.info("Task #%s: reports generation completed (path=%s)" % (self.task.id, self.analysis.results_folder))
def launch_analysis(self): """Start analysis.""" succeeded = False dead_machine = False self.socks5s = _load_socks5_operational() log.info("Task #{0}: Starting analysis of {1} '{2}'".format( self.task.id, self.task.category.upper(), convert_to_printable(self.task.target))) # Initialize the analysis folders. if not self.init_storage(): log.debug("Failed to initialize the analysis folder") return False if self.task.category in ["file", "pcap", "static"]: sha256 = File(self.task.target).get_sha256() # Check whether the file has been changed for some unknown reason. # And fail this analysis if it has been modified. if not self.check_file(sha256): return False # Store a copy of the original file. if not self.store_file(sha256): return False if self.task.category in ("pcap", "static"): if self.task.category == "pcap": if hasattr(os, "symlink"): os.symlink(self.binary, os.path.join(self.storage, "dump.pcap")) else: shutil.copy(self.binary, os.path.join(self.storage, "dump.pcap")) # create the logs/files directories as # normally the resultserver would do it dirnames = ["logs", "files", "aux"] for dirname in dirnames: try: os.makedirs(os.path.join(self.storage, dirname)) except: pass return True # Acquire analysis machine. try: self.acquire_machine() self.db.set_task_vm(self.task.id, self.machine.label, self.machine.id) # At this point we can tell the ResultServer about it. except CuckooOperationalError as e: machine_lock.release() log.error("Task #{0}: Cannot acquire machine: {1}".format( self.task.id, e), exc_info=True) return False # Generate the analysis configuration file. options = self.build_options() try: ResultServer().add_task(self.task, self.machine) except Exception as e: machinery.release(self.machine.label) log.exception(e, exc_info=True) self.errors.put(e) aux = RunAuxiliary(task=self.task, machine=self.machine) try: unlocked = False # Mark the selected analysis machine in the database as started. guest_log = self.db.guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__) # Start the machine. machinery.start(self.machine.label) # Enable network routing. self.route_network() # By the time start returns it will have fully started the Virtual # Machine. We can now safely release the machine lock. machine_lock.release() unlocked = True aux.start() # Initialize the guest manager. guest = GuestManager(self.machine.name, self.machine.ip, self.machine.platform, self.task.id, self) options["clock"] = self.db.update_clock(self.task.id) self.db.guest_set_status(self.task.id, "starting") # Start the analysis. guest.start_analysis(options) if self.db.guest_get_status(self.task.id) == "starting": self.db.guest_set_status(self.task.id, "running") guest.wait_for_completion() self.db.guest_set_status(self.task.id, "stopping") succeeded = True except CuckooMachineError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}, exc_info=True) dead_machine = True except CuckooGuestError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}, exc_info=True) finally: # Stop Auxiliary modules. aux.stop() # Take a memory dump of the machine before shutting it off. if self.cfg.cuckoo.memory_dump or self.task.memory: try: dump_path = get_memdump_path(self.task.id) need_space, space_available = free_space_monitor( os.path.dirname(dump_path), return_value=True) if need_space: log.error( "Not enough free disk space! Could not dump ram (Only %d MB!)", space_available) else: machinery.dump_memory(self.machine.label, dump_path) except NotImplementedError: log.error("The memory dump functionality is not available " "for the current machine manager.") except CuckooMachineError as e: log.error(e, exc_info=True) try: # Stop the analysis machine. machinery.stop(self.machine.label) except CuckooMachineError as e: log.warning( "Task #{0}: Unable to stop machine {1}: {2}".format( self.task.id, self.machine.label, e)) # Mark the machine in the database as stopped. Unless this machine # has been marked as dead, we just keep it as "started" in the # database so it'll not be used later on in this session. self.db.guest_stop(guest_log) # After all this, we can make the ResultServer forget about the # internal state for this analysis task. ResultServer().del_task(self.task, self.machine) # Drop the network routing rules if any. self.unroute_network() if dead_machine: # Remove the guest from the database, so that we can assign a # new guest when the task is being analyzed with another # machine. self.db.guest_remove(guest_log) # Remove the analysis directory that has been created so # far, as launch_analysis() is going to be doing that again. shutil.rmtree(self.storage) # This machine has turned dead, so we throw an exception here # which informs the AnalysisManager that it should analyze # this task again with another available machine. raise CuckooDeadMachine() try: # Release the analysis machine. But only if the machine has # not turned dead yet. machinery.release(self.machine.label) except CuckooMachineError as e: log.error("Task #{0}: Unable to release machine {1}, reason " "{2}. You might need to restore it manually.".format( self.task.id, self.machine.label, e)) return succeeded
def launch_analysis(self): """Start analysis.""" succeeded = False dead_machine = False log.info("Starting analysis of %s \"%s\" (task=%d)", self.task.category.upper(), self.task.target, self.task.id) # Initialize the analysis folders. if not self.init_storage(): return False if self.task.category == "file": sample = Database().view_sample(self.task.sample_id) is_first_task = not self.task.experiment.times if is_first_task: # Check whether the file has been changed for some unknown reason. # And fail this analysis if it has been modified. if not self.check_file(): return False # Store a copy of the original file. if not self.store_file(): return False self.binary = os.path.join(CUCKOO_ROOT, "storage", "binaries", sample.sha256) self.create_symlink() # TODO Scheduling should happen at the end of the experiment. scheduled = False if self.task.repeat == TASK_RECURRENT: scheduled = Database().schedule(self.task.id) # Acquire analysis machine. try: self.acquire_machine() except CuckooOperationalError as e: machine_lock.release() log.error("Cannot acquire machine: {0}".format(e)) return False # Generate the analysis configuration file. options = self.build_options() # At this point we can tell the ResultServer about it. try: ResultServer().add_task(self.task, self.machine) except Exception as e: machinery.release(self.machine.label) self.errors.put(e) aux = RunAuxiliary(task=self.task, machine=self.machine) aux.start() try: unlocked = False # Mark the selected analysis machine in the database as started. guest_log = Database().guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__) # Starting the machinery takes some time. In order not to run into # race conditions with max_machines_count because acquiring new # machines takes place in parallel next to starting machines, for # which it takes a little delay before the machines' status turns # into "running", we hold the machine lock until the machine has # fully started (or gives an error, of course). try: # Start the machine, revert only if we are the first task in # the experiment. machinery.start(self.machine.label, revert=is_first_task) finally: machine_lock.release() unlocked = True # Initialize the guest manager. # FIXME - The critical timeout options is analysis_timeout + 60 sec # should it be configurable? guest = GuestManager(self.machine.name, self.machine.ip, options["timeout"] + 60, self.machine.platform) # Start the analysis if we are the first task of a series if is_first_task: guest.start_analysis(options) guest.wait_for_completion() succeeded = True except CuckooMachineError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}) dead_machine = True except CuckooGuestError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}) finally: # Stop Auxiliary modules. aux.stop() # Take a memory dump of the machine before shutting it off. if self.cfg.cuckoo.memory_dump or self.task.memory: try: dump_path = os.path.join(self.storage, "memory.dmp") machinery.dump_memory(self.machine.label, dump_path) except NotImplementedError: log.error("The memory dump functionality is not available " "for the current machine manager.") except CuckooMachineError as e: log.error(e) try: # Stop the analysis machine. machinery.stop(self.machine.label) except CuckooMachineError as e: log.warning("Unable to stop machine %s: %s", self.machine.label, e) # Mark the machine in the database as stopped. Unless this machine # has been marked as dead, we just keep it as "started" in the # database so it'll not be used later on in this session. Database().guest_stop(guest_log) # After all this, we can make the ResultServer forget about the # internal state for this analysis task. ResultServer().del_task(self.task, self.machine) if dead_machine: # Remove the guest from the database, so that we can assign a # new guest when the task is being analyzed with another # machine. Database().guest_remove(guest_log) # Remove the analysis directory that has been created so # far, as launch_analysis() is going to be doing that again. shutil.rmtree(self.storage) # This machine has turned dead, so we throw an exception here # which informs the AnalysisManager that it should analyze # this task again with another available machine. raise CuckooDeadMachine() try: # Release the analysis machine. if not scheduled: log.debug( "Freeing the machine since we're the last task of this experiment" ) machinery.release(self.machine.label) # Unset the machine_name of this experiment. if self.task.experiment: Database().update_experiment( None, id=self.task.experiment.id, machine_name=None) except CuckooMachineError as e: log.error( "Unable to release machine %s, reason %s. " "You might need to restore it manually.", self.machine.label, e) return succeeded
def launch_analysis(self): """Start analysis.""" succeeded = False target = self.task.target if self.task.category == "file": target = os.path.basename(target) log.info("Starting analysis of %s \"%s\" (task #%d, options \"%s\")", self.task.category.upper(), target, self.task.id, emit_options(self.task.options)) # Initialize the analysis folders. if not self.init_storage(): return False # Initiates per-task logging. task_log_start(self.task.id) self.store_task_info() if self.task.category == "file": # Check if we have permissions to access the file. # And fail this analysis if we don't have access to the file. if not self.check_permissions(): return False # Check whether the file has been changed for some unknown reason. # And fail this analysis if it has been modified. if not self.check_file(): return False # Store a copy of the original file. if not self.store_file(): return False # Acquire analysis machine. try: self.acquire_machine() except CuckooOperationalError as e: machine_lock.release() log.error("Cannot acquire machine: {0}".format(e)) return False # At this point we can tell the ResultServer about it. try: ResultServer().add_task(self.task, self.machine) except Exception as e: machinery.release(self.machine.label) self.errors.put(e) # Initialize the guest manager. self.guest_manager = GuestManager(self.machine.name, self.machine.ip, self.machine.platform, self.task.id, self) self.aux = RunAuxiliary(self.task, self.machine, self.guest_manager) self.aux.start() # Generate the analysis configuration file. options = self.build_options() try: unlocked = False self.interface = None # Mark the selected analysis machine in the database as started. guest_log = self.db.guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__) # Start the machine. machinery.start(self.machine.label, self.task) # Enable network routing. self.route_network() # By the time start returns it will have fully started the Virtual # Machine. We can now safely release the machine lock. machine_lock.release() unlocked = True # Run and manage the components inside the guest unless this # machine has the "noagent" option specified (please refer to the # wait_finish() function for more details on this function). if "noagent" not in self.machine.options: self.guest_manage(options) else: self.wait_finish() succeeded = True except CuckooMachineError as e: if not unlocked: machine_lock.release() log.error("Machinery error: %s", e, extra={"task_id": self.task.id}) log.critical( "A critical error has occurred trying to use the machine " "with name %s during an analysis due to which it is no " "longer in a working state, please report this issue and all " "of the related environment details to the developers so we " "can improve this situation. (Note that before we would " "simply remove this VM from doing any more analyses, but as " "all the VMs will eventually be depleted that way, hopefully " "we'll find a better solution now).", self.machine.name, ) except CuckooGuestError as e: if not unlocked: machine_lock.release() log.error("Error from the Cuckoo Guest: %s", e, extra={"task_id": self.task.id}) finally: # Stop Auxiliary modules. self.aux.stop() # Take a memory dump of the machine before shutting it off. if self.cfg.cuckoo.memory_dump or self.task.memory: try: dump_path = os.path.join(self.storage, "memory.dmp") machinery.dump_memory(self.machine.label, dump_path) except NotImplementedError: log.error("The memory dump functionality is not available " "for the current machine manager.") except CuckooMachineError as e: log.error("Machinery error: %s", e) try: # Stop the analysis machine. machinery.stop(self.machine.label) except CuckooMachineError as e: log.warning("Unable to stop machine %s: %s", self.machine.label, e) # Mark the machine in the database as stopped. Unless this machine # has been marked as dead, we just keep it as "started" in the # database so it'll not be used later on in this session. self.db.guest_stop(guest_log) # After all this, we can make the ResultServer forget about the # internal state for this analysis task. ResultServer().del_task(self.task, self.machine) # Drop the network routing rules if any. self.unroute_network() try: # Release the analysis machine. But only if the machine has # not turned dead yet. machinery.release(self.machine.label) except CuckooMachineError as e: log.error( "Unable to release machine %s, reason %s. " "You might need to restore it manually.", self.machine.label, e) return succeeded
def launch_analysis(self): """Start analysis.""" succeeded = False dead_machine = False log.info("Starting analysis of %s \"%s\" (task=%d)", self.task.category.upper(), self.task.target, self.task.id) # Initialize the the analysis folders. if not self.init_storage(): return False if self.task.category == "file": # Check whether the file has been changed for some unknown reason. # And fail this analysis if it has been modified. if not self.check_file(): return False # Store a copy of the original file. if not self.store_file(): return False # Acquire analysis machine. try: self.acquire_machine() except CuckooOperationalError as e: log.error("Cannot acquire machine: {0}".format(e)) return False # Generate the analysis configuration file. options = self.build_options() # At this point we can tell the Resultserver about it. try: Resultserver().set_machinery(machinery) Resultserver().add_task(self.task, self.machine) except Exception as e: machinery.release(self.machine.label) self.errors.put(e) aux = RunAuxiliary(task=self.task, machine=self.machine) aux.start() try: # Mark the selected analysis machine in the database as started. guest_log = Database().guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__) # Start the machine. # machinery.start(self.machine.label) except CuckooMachineError as e: log.error(str(e), extra={"task_id": self.task.id}) dead_machine = True else: try: # Initialize the guest manager. guest = GuestManager(self.machine.name, self.machine.ip, self.machine.platform) # Start the analysis. guest.start_analysis(options) except CuckooGuestError as e: log.error(str(e), extra={"task_id": self.task.id}) # Wait for analysis completion. time_taken_for_analysis = 0 try: guest.wait_for_completion(self.machine, self.storage, machinery) succeeded = True except CuckooGuestError as e: log.error(str(e), extra={"task_id": self.task.id}) succeeded = False finally: """ log.info("Taking last dump before terminating...") mem_dir = os.path.join(self.storage, "memory", "dumps") try: os.makedirs(mem_dir) except: pass if len (os.listdir(mem_dir)) == 0: dump_dir = '1' else: dump_dir = str(int(max(os.listdir(mem_dir))) + 1) try: os.makedirs(os.path.join(mem_dir, dump_dir)) except: pass dump_path = os.path.join(mem_dir, dump_dir, "memory.dmp") machinery.dump_memory(self.machine.label, dump_path) info_dict = {"trigger": {"name" : "End", "args": {}}, "time" : str(time_taken_for_analysis)} json.dump(info_dict, file(os.path.join(mem_dir, dump_dir, "info.json"),"wb"), sort_keys=False, indent=4) """ # Stop Auxiliary modules. aux.stop() try: # Stop the analysis machine. machinery.stop(self.machine.label) machinery.start(self.machine.label) except CuckooMachineError as e: log.warning("Unable to stop machine %s: %s", self.machine.label, e) # Mark the machine in the database as stopped. Unless this machine # has been marked as dead, we just keep it as "started" in the # database so it'll not be used later on in this session. Database().guest_stop(guest_log) # After all this, we can make the Resultserver forget about the # internal state for this analysis task. Resultserver().del_task(self.task, self.machine) if dead_machine: # Remove the guest from the database, so that we can assign a # new guest when the task is being analyzed with another # machine. Database().guest_remove(guest_log) # Remove the analysis directory that has been created so # far, as launch_analysis() is going to be doing that again. shutil.rmtree(self.storage) # This machine has turned dead, so we throw an exception here # which informs the AnalysisManager that it should analyze # this task again with another available machine. raise CuckooDeadMachine() try: # Release the analysis machine. But only if the machine has # not turned dead yet. machinery.release(self.machine.label) except CuckooMachineError as e: log.error( "Unable to release machine %s, reason %s. " "You might need to restore it manually", self.machine.label, e) return succeeded
def launch_analysis(self): """Start analysis.""" succeeded = False dead_machine = False log.info("Starting analysis of %s \"%s\" (task=%d)", self.task.category.upper(), self.task.target, self.task.id) # Initialize the analysis folders. if not self.init_storage(): return False if self.task.category == "file": # Check whether the file has been changed for some unknown reason. # And fail this analysis if it has been modified. if not self.check_file(): return False # Store a copy of the original file. if not self.store_file(): return False # Acquire analysis machine. try: self.acquire_machine() except CuckooOperationalError as e: machine_lock.release() log.error("Cannot acquire machine: {0}".format(e)) return False # At this point we can tell the ResultServer about it. try: ResultServer().add_task(self.task, self.machine) except Exception as e: machinery.release(self.machine.label) self.errors.put(e) aux = RunAuxiliary(task=self.task, machine=self.machine) aux.start() # Generate the analysis configuration file. options = self.build_options() try: unlocked = False # Mark the selected analysis machine in the database as started. guest_log = self.db.guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__) # Start the machine. machinery.start(self.machine.label) # By the time start returns it will have fully started the Virtual # Machine. We can now safely release the machine lock. machine_lock.release() unlocked = True # Initialize the guest manager. guest = GuestManager(self.machine.name, self.machine.ip, self.machine.platform) # Start the analysis. guest.start_analysis(options) guest.wait_for_completion() succeeded = True except CuckooMachineError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}) dead_machine = True except CuckooGuestError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}) finally: # Stop Auxiliary modules. aux.stop() # Take a memory dump of the machine before shutting it off. if self.cfg.cuckoo.memory_dump or self.task.memory: try: dump_path = os.path.join(self.storage, "memory.dmp") machinery.dump_memory(self.machine.label, dump_path) except NotImplementedError: log.error("The memory dump functionality is not available " "for the current machine manager.") except CuckooMachineError as e: log.error(e) try: # Stop the analysis machine. machinery.stop(self.machine.label) except CuckooMachineError as e: log.warning("Unable to stop machine %s: %s", self.machine.label, e) # Mark the machine in the database as stopped. Unless this machine # has been marked as dead, we just keep it as "started" in the # database so it'll not be used later on in this session. self.db.guest_stop(guest_log) # After all this, we can make the ResultServer forget about the # internal state for this analysis task. ResultServer().del_task(self.task, self.machine) if dead_machine: # Remove the guest from the database, so that we can assign a # new guest when the task is being analyzed with another # machine. self.db.guest_remove(guest_log) # Remove the analysis directory that has been created so # far, as launch_analysis() is going to be doing that again. shutil.rmtree(self.storage) # This machine has turned dead, so we throw an exception here # which informs the AnalysisManager that it should analyze # this task again with another available machine. raise CuckooDeadMachine() try: # Release the analysis machine. But only if the machine has # not turned dead yet. machinery.release(self.machine.label) except CuckooMachineError as e: log.error("Unable to release machine %s, reason %s. " "You might need to restore it manually.", self.machine.label, e) return succeeded
def launch_analysis(self): """Start analysis. @raise CuckooAnalysisError: if unable to start analysis. """ log.info("Starting analysis of file \"%s\" (task=%s)" % (self.task.file_path, self.task.id)) if not os.path.exists(self.task.file_path): raise CuckooAnalysisError("The file to analyze does not exist at path \"%s\", analysis aborted" % self.task.file_path) self.init_storage() self.store_file() options = self.build_options() while True: machine_lock.acquire() vm = mmanager.acquire(machine_id=self.task.machine, platform=self.task.platform) machine_lock.release() if not vm: log.debug("Task #%s: no machine available" % self.task.id) time.sleep(1) else: log.info("Task #%s: acquired machine %s (label=%s)" % (self.task.id, vm.id, vm.label)) break # Initialize sniffer if self.cfg.cuckoo.use_sniffer: sniffer = Sniffer(self.cfg.cuckoo.tcpdump) sniffer.start(interface=self.cfg.cuckoo.interface, host=vm.ip, file_path=os.path.join(self.analysis.results_folder, "dump.pcap")) else: sniffer = False # Initialize VMWare ScreenShot MachineManager() module = MachineManager.__subclasses__()[0] mman = module() mman_conf = os.path.join(CUCKOO_ROOT, "conf", "%s.conf" % self.cfg.cuckoo.machine_manager) if not os.path.exists(mman_conf): raise CuckooMachineError("The configuration file for machine manager \"%s\" does not exist at path: %s" % (self.cfg.cuckoo.machine_manager, mman_conf)) mman.set_options(Config(mman_conf)) mman.initialize(self.cfg.cuckoo.machine_manager) screener = Screener(mman.options.vmware.path, vm.label, "avtest", "avtest", self.analysis.results_folder) try: # Start machine mmanager.start(vm.label) # Initialize guest manager guest = GuestManager(vm.id, vm.ip, vm.platform) # Launch analysis guest.start_analysis(options) # Start Screenshots screener.start() # Wait for analysis to complete success = guest.wait_for_completion() # Stop sniffer if sniffer: sniffer.stop() # Stop Screenshots if screener: screener.stop() if not success: raise CuckooAnalysisError("Task #%s: analysis failed, review previous errors" % self.task.id) # Save results guest.save_results(self.analysis.results_folder) except (CuckooMachineError, CuckooGuestError) as e: raise CuckooAnalysisError(e) #""" finally: # Stop machine mmanager.stop(vm.label) # Release the machine from lock mmanager.release(vm.label) #""" # Launch reports generation Reporter(self.analysis.results_folder).run(Processor(self.analysis.results_folder).run()) log.info("Task #%s: reports generation completed (path=%s)" % (self.task.id, self.analysis.results_folder))