def resubmit(request, task_id): task = Database().view_task(task_id) if request.method == "POST": return index(request, task_id) if not task: return render(request, "error.html", { "error": "No Task found with this ID", }) if task.category == "file": return render_index( request, { "sample_id": task.sample_id, "file_name": os.path.basename(task.target), "resubmit": "file", "options": emit_options(task.options), }) elif task.category == "url": return render_index( request, { "url": task.target, "resubmit": "URL", "options": emit_options(task.options), })
def run(self): """Run information gathering. @return: information dict. """ self.key = "info" db = Database() dbtask = db.view_task(self.task["id"], details=True) if dbtask: task = dbtask.to_dict() else: # task is gone from the database if os.path.isfile(self.taskinfo_path): # we've got task.json, so grab info from there task = json_decode(open(self.taskinfo_path).read()) else: # we don't have any info on the task :( emptytask = Task() emptytask.id = self.task["id"] task = emptytask.to_dict() filepath = os.path.join(CUCKOO_ROOT, ".git", "refs", "heads", "master") if os.path.exists(filepath) and os.access(filepath, os.R_OK): git_head = open(filepath, "rb").read().strip() else: git_head = None filepath = os.path.join(CUCKOO_ROOT, ".git", "FETCH_HEAD") if os.path.exists(filepath) and os.access(filepath, os.R_OK): git_fetch_head = open(filepath, "rb").read().strip() # Only obtain the hash. if git_fetch_head: git_fetch_head = git_fetch_head.split()[0] else: git_fetch_head = None return dict( version=CUCKOO_VERSION, git={ "head": git_head, "fetch_head": git_fetch_head, }, started=task["started_on"], ended=task.get("completed_on", "none"), duration=task.get("duration", -1), id=int(task["id"]), category=task["category"], custom=task["custom"], owner=task["owner"], machine=task["guest"], package=task["package"], platform=task["platform"], options=emit_options(task["options"]), route=task["route"], )
def resubmit(request, task_id): task = Database().view_task(task_id) if request.method == "POST": return index(request, task_id) if not task: return render(request, "error.html", { "error": "No Task found with this ID", }) if task.category == "file": return render_index(request, { "sample_id": task.sample_id, "file_name": os.path.basename(task.target), "resubmit": "file", "options": emit_options(task.options), }) elif task.category == "url": return render_index(request, { "url": task.target, "resubmit": "URL", "options": emit_options(task.options), })
def submit_dropped(request, task_id, sha1): if request.method == "POST": return index(request, task_id, sha1) task = Database().view_task(task_id) if not task: return render(request, "error.html", { "error": "No Task found with this ID", }) filepath = dropped_filepath(task_id, sha1) return render_index(request, { "file_name": os.path.basename(filepath), "resubmit": "file", "dropped_file": True, "options": emit_options(task.options), })
def submit_dropped(request, task_id, sha1): if request.method == "POST": return index(request, task_id, sha1) task = Database().view_task(task_id) if not task: return render(request, "error", { "error": "No Task found with this ID", }) filepath = dropped_filepath(task_id, sha1) return render_index(request, { "file_name": os.path.basename(filepath), "resubmit": "file", "dropped_file": True, "options": emit_options(task.options), })
def reschedule(self, task_id, priority=None): """Reschedule a task. @param task_id: ID of the task to reschedule. @return: ID of the newly created task. """ task = self.view_task(task_id) if not task: return if task.category == "file": add = self.add_path elif task.category == "url": add = self.add_url else: return # Change status to recovered. session = self.Session() session.query(Task).get(task_id).status = TASK_RECOVERED try: session.commit() except SQLAlchemyError as e: log.debug("Database error rescheduling task: {0}".format(e)) session.rollback() return False finally: session.close() # Normalize tags. if task.tags: tags = ",".join(tag.name for tag in task.tags) else: tags = task.tags # Assign a new priority. if priority: task.priority = priority options = emit_options(task.options) return add(task.target, task.timeout, task.package, options, task.priority, task.custom, task.owner, task.machine, task.platform, tags, task.memory, task.enforce_timeout, task.clock)
def build_options(self): """Generate analysis options. @return: options dict. """ options = {} if self.task.category == "file": options["file_name"] = File(self.task.target).get_name() options["file_type"] = File(self.task.target).get_type() options["pe_exports"] = \ ",".join(File(self.task.target).get_exported_functions()) package, activity = File(self.task.target).get_apk_entry() self.task.options["apk_entry"] = "%s:%s" % (package, activity) options["id"] = self.task.id options["ip"] = self.machine.resultserver_ip options["port"] = self.machine.resultserver_port options["category"] = self.task.category options["target"] = self.task.target options["package"] = self.task.package options["options"] = emit_options(self.task.options) options["enforce_timeout"] = self.task.enforce_timeout options["clock"] = self.task.clock options["terminate_processes"] = self.cfg.cuckoo.terminate_processes #SW options["files_needed"] = self.cfg.dvasion.files_needed if not self.task.timeout: options["timeout"] = self.cfg.timeouts.default else: options["timeout"] = self.task.timeout # copy in other analyzer specific options, TEMPORARY (most likely) vm_options = getattr(machinery.options, self.machine.name) for k in vm_options: if k.startswith("analyzer_"): options[k] = vm_options[k] return options
def build_options(self): """Generate analysis options. @return: options dict. """ options = {} if self.task.category == "file": options["file_name"] = File(self.task.target).get_name() # Commenting out for now because these aren't populated #options["file_type"] = File(self.task.target).get_type() #options["pe_exports"] = \ # ",".join(File(self.task.target).get_exported_functions()) #package, activity = File(self.task.target).get_apk_entry() #self.task.options["apk_entry"] = "%s:%s" % (package, activity) options["id"] = self.task.id options["ip"] = self.machine.resultserver_ip options["port"] = self.machine.resultserver_port options["category"] = self.task.category options["target"] = self.task.target options["package"] = self.task.package options["options"] = emit_options(self.task.options) options["enforce_timeout"] = self.task.enforce_timeout options["clock"] = self.task.clock options["terminate_processes"] = self.cfg.cuckoo.terminate_processes options["docker_images"] = self.task.docker_images if not self.task.timeout: options["timeout"] = self.cfg.timeouts.default else: options["timeout"] = self.task.timeout # copy in other analyzer specific options, TEMPORARY (most likely) vm_options = getattr(machinery.options, self.machine.name) for k in vm_options: if k.startswith("analyzer_"): options[k] = vm_options[k] return options
def run(self): """Run information gathering. @return: information dict. """ self.key = "info" db = Database() dbtask = db.view_task(self.task["id"], details=True) if dbtask: task = dbtask.to_dict() else: # task is gone from the database if os.path.isfile(self.taskinfo_path): # we've got task.json, so grab info from there task = json_decode(open(self.taskinfo_path).read()) else: # we don't have any info on the task :( emptytask = Task() emptytask.id = self.task["id"] task = emptytask.to_dict() return dict( version=CUCKOO_VERSION, started=task["started_on"], ended=task.get("completed_on", "none"), duration=task.get("duration", -1), id=int(task["id"]), category=task["category"], custom=task["custom"], owner=task["owner"], machine=task["guest"], package=task["package"], platform=task["platform"], options=emit_options(task["options"]), route=task["route"], )
def index(request, task_id=None, sha1=None): if request.method == "GET": return render_index(request) package = request.POST.get("package", "") timeout = force_int(request.POST.get("timeout")) options = request.POST.get("options", "") priority = force_int(request.POST.get("priority")) machine = request.POST.get("machine", "") custom = request.POST.get("custom", "") memory = bool(request.POST.get("memory", False)) enforce_timeout = bool(request.POST.get("enforce_timeout", False)) tags = request.POST.get("tags", None) options = parse_options(options) # The following POST fields take precedence over the options field. if request.POST.get("route"): options["route"] = request.POST.get("route") if request.POST.get("free"): options["free"] = "yes" if request.POST.get("process_memory"): options["procmemdump"] = "yes" if request.POST.get("services"): options["services"] = "yes" db = Database() task_ids = [] task_machines = [] if machine.lower() == "all": for entry in db.list_machines(): task_machines.append(entry.label) else: task_machines.append(machine) # In case of resubmitting a file. if request.POST.get("category") == "file": task = Database().view_task(task_id) for entry in task_machines: task_id = db.add_path(file_path=task.target, package=package, timeout=timeout, options=emit_options(options), priority=priority, machine=entry, custom=custom, memory=memory, enforce_timeout=enforce_timeout, tags=tags) if task_id: task_ids.append(task_id) elif request.FILES.getlist("sample"): samples = request.FILES.getlist("sample") for sample in samples: # Error if there was only one submitted sample and it's empty. # But if there are multiple and one was empty, just ignore it. if not sample.size: if len(samples) != 1: continue return render_to_response("error.html", {"error": "You uploaded an empty file."}, context_instance=RequestContext(request)) elif sample.size > settings.MAX_UPLOAD_SIZE: return render_to_response("error.html", {"error": "You uploaded a file that exceeds that maximum allowed upload size."}, context_instance=RequestContext(request)) # Moving sample from django temporary file to Cuckoo temporary # storage to let it persist between reboot (if user like to # configure it in that way). path = store_temp_file(sample.read(), sample.name) for entry in task_machines: task_id = db.add_path(file_path=path, package=package, timeout=timeout, options=emit_options(options), priority=priority, machine=entry, custom=custom, memory=memory, enforce_timeout=enforce_timeout, tags=tags) if task_id: task_ids.append(task_id) # When submitting a dropped file. elif request.POST.get("category") == "dropped_file": filepath = dropped_filepath(task_id, sha1) for entry in task_machines: task_id = db.add_path(file_path=filepath, package=package, timeout=timeout, options=emit_options(options), priority=priority, machine=entry, custom=custom, memory=memory, enforce_timeout=enforce_timeout, tags=tags) if task_id: task_ids.append(task_id) else: url = request.POST.get("url").strip() if not url: return render_to_response("error.html", {"error": "You specified an invalid URL!"}, context_instance=RequestContext(request)) for entry in task_machines: task_id = db.add_url(url=url, package=package, timeout=timeout, options=emit_options(options), priority=priority, machine=entry, custom=custom, memory=memory, enforce_timeout=enforce_timeout, tags=tags) if task_id: task_ids.append(task_id) tasks_count = len(task_ids) if tasks_count > 0: return render_to_response("submission/complete.html", {"tasks": task_ids, "tasks_count": tasks_count, "baseurl": request.build_absolute_uri('/')[:-1]}, context_instance=RequestContext(request)) else: return render_to_response("error.html", {"error": "Error adding task to Cuckoo's database."}, context_instance=RequestContext(request))
def launch_analysis(self): """Start analysis.""" succeeded = False dead_machine = False target = self.task.target if self.task.category == "file": target = os.path.basename(target) log.info("Starting analysis of %s \"%s\" (task #%d, options \"%s\")", self.task.category.upper(), target, self.task.id, emit_options(self.task.options)) # Initialize the analysis folders. if not self.init_storage(): return False if self.task.category == "file": # Check whether the file has been changed for some unknown reason. # And fail this analysis if it has been modified. if not self.check_file(): return False # Store a copy of the original file. if not self.store_file(): return False # Acquire analysis machine. try: self.acquire_machine() except CuckooOperationalError as e: machine_lock.release() log.error("Cannot acquire machine: {0}".format(e)) return False # At this point we can tell the ResultServer about it. try: ResultServer().add_task(self.task, self.machine) except Exception as e: machinery.release(self.machine.label) self.errors.put(e) aux = RunAuxiliary(task=self.task, machine=self.machine) aux.start() # Generate the analysis configuration file. options = self.build_options() try: unlocked = False # Mark the selected analysis machine in the database as started. guest_log = self.db.guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__) # Start the machine. machinery.start(self.machine.label, self.task) # Enable network routing. self.route_network() # By the time start returns it will have fully started the Virtual # Machine. We can now safely release the machine lock. machine_lock.release() unlocked = True # Run and manage the components inside the guest. self.guest_manage(options) succeeded = True except CuckooMachineError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}) dead_machine = True except CuckooGuestError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}) finally: # Stop Auxiliary modules. aux.stop() # Take a memory dump of the machine before shutting it off. if self.cfg.cuckoo.memory_dump or self.task.memory: try: dump_path = os.path.join(self.storage, "memory.dmp") machinery.dump_memory(self.machine.label, dump_path) except NotImplementedError: log.error("The memory dump functionality is not available " "for the current machine manager.") except CuckooMachineError as e: log.error(e) try: # Stop the analysis machine. machinery.stop(self.machine.label) except CuckooMachineError as e: log.warning("Unable to stop machine %s: %s", self.machine.label, e) # Mark the machine in the database as stopped. Unless this machine # has been marked as dead, we just keep it as "started" in the # database so it'll not be used later on in this session. self.db.guest_stop(guest_log) # After all this, we can make the ResultServer forget about the # internal state for this analysis task. ResultServer().del_task(self.task, self.machine) # Drop the network routing rules if any. self.unroute_network() if dead_machine: # Remove the guest from the database, so that we can assign a # new guest when the task is being analyzed with another # machine. self.db.guest_remove(guest_log) # Remove the analysis directory that has been created so # far, as launch_analysis() is going to be doing that again. shutil.rmtree(self.storage) # This machine has turned dead, so we throw an exception here # which informs the AnalysisManager that it should analyze # this task again with another available machine. raise CuckooDeadMachine() try: # Release the analysis machine. But only if the machine has # not turned dead yet. machinery.release(self.machine.label) except CuckooMachineError as e: log.error("Unable to release machine %s, reason %s. " "You might need to restore it manually.", self.machine.label, e) return succeeded
def run(self): """Run information gathering. @return: information dict. """ self.key = "info" db = Database() dbtask = db.view_task(self.task["id"], details=True) if dbtask: task = dbtask.to_dict() else: # task is gone from the database if os.path.isfile(self.taskinfo_path): # we've got task.json, so grab info from there task = json_decode(open(self.taskinfo_path).read()) else: # we don't have any info on the task :( emptytask = Task() emptytask.id = self.task["id"] task = emptytask.to_dict() filepath = os.path.join( CUCKOO_ROOT, ".git", "refs", "heads", "master" ) if os.path.exists(filepath) and os.access(filepath, os.R_OK): git_head = open(filepath, "rb").read().strip() else: git_head = None filepath = os.path.join(CUCKOO_ROOT, ".git", "FETCH_HEAD") if os.path.exists(filepath) and os.access(filepath, os.R_OK): git_fetch_head = open(filepath, "rb").read().strip() # Only obtain the hash. if git_fetch_head: git_fetch_head = git_fetch_head.split()[0] else: git_fetch_head = None monitor = os.path.join( CUCKOO_ROOT, "data", "monitor", task["options"].get("monitor", "latest") ) if os.path.islink(monitor): monitor = os.readlink(monitor) elif os.path.isfile(monitor): monitor = open(monitor, "rb").read().strip() elif os.path.isdir(monitor): monitor = os.path.basename(monitor) else: monitor = None return dict( version=CUCKOO_VERSION, git={ "head": git_head, "fetch_head": git_fetch_head, }, monitor=monitor, started=task["started_on"], ended=task.get("completed_on", "none"), duration=task.get("duration", -1), id=int(task["id"]), category=task["category"], custom=task["custom"], owner=task["owner"], machine=task["guest"], package=task["package"], platform=task["platform"], options=emit_options(task["options"]), route=task["route"], )
def index(request, task_id=None, sha1=None): if request.method == "GET": return render_index(request) package = request.POST.get("package", "") timeout = force_int(request.POST.get("timeout")) options = request.POST.get("options", "") priority = force_int(request.POST.get("priority")) machine = request.POST.get("machine", "") custom = request.POST.get("custom", "") memory = bool(request.POST.get("memory", False)) enforce_timeout = bool(request.POST.get("enforce_timeout", False)) tags = request.POST.get("tags", None) options = parse_options(options) # The following POST fields take precedence over the options field. if request.POST.get("route"): options["route"] = request.POST.get("route") if request.POST.get("free"): options["free"] = "yes" if request.POST.get("process_memory"): options["procmemdump"] = "yes" if request.POST.get("services"): options["services"] = "yes" if not request.POST.get("human"): options["human"] = "0" if request.POST.get("screenshots"): options["screenshots"] = force_int(request.POST.get("screenshots")) db = Database() task_ids = [] task_machines = [] if machine.lower() == "all": for entry in db.list_machines(): task_machines.append(entry.label) else: task_machines.append(machine) # In case of resubmitting a file. if request.POST.get("category") == "file": task = Database().view_task(task_id) for entry in task_machines: task_id = db.add_path(file_path=task.target, package=package, timeout=timeout, options=emit_options(options), priority=priority, machine=entry, custom=custom, memory=memory, enforce_timeout=enforce_timeout, tags=tags) if task_id: task_ids.append(task_id) elif request.FILES.getlist("sample"): samples = request.FILES.getlist("sample") for sample in samples: # Error if there was only one submitted sample and it's empty. # But if there are multiple and one was empty, just ignore it. if not sample.size: if len(samples) != 1: continue return render(request, "error.html", { "error": "You uploaded an empty file.", }) elif sample.size > settings.MAX_UPLOAD_SIZE: return render( request, "error.html", { "error": "You uploaded a file that exceeds that maximum allowed upload size.", }) # Moving sample from django temporary file to Cuckoo temporary # storage to let it persist between reboot (if user like to # configure it in that way). path = store_temp_file(sample.read(), sample.name) for entry in task_machines: task_id = db.add_path(file_path=path, package=package, timeout=timeout, options=emit_options(options), priority=priority, machine=entry, custom=custom, memory=memory, enforce_timeout=enforce_timeout, tags=tags) if task_id: task_ids.append(task_id) # When submitting a dropped file. elif request.POST.get("category") == "dropped_file": filepath = dropped_filepath(task_id, sha1) for entry in task_machines: task_id = db.add_path(file_path=filepath, package=package, timeout=timeout, options=emit_options(options), priority=priority, machine=entry, custom=custom, memory=memory, enforce_timeout=enforce_timeout, tags=tags) if task_id: task_ids.append(task_id) else: url = request.POST.get("url").strip() if not url: return render(request, "error.html", { "error": "You specified an invalid URL!", }) for entry in task_machines: task_id = db.add_url(url=url, package=package, timeout=timeout, options=emit_options(options), priority=priority, machine=entry, custom=custom, memory=memory, enforce_timeout=enforce_timeout, tags=tags) if task_id: task_ids.append(task_id) tasks_count = len(task_ids) if tasks_count > 0: return render( request, "submission/complete.html", { "tasks": task_ids, "tasks_count": tasks_count, "baseurl": request.build_absolute_uri('/')[:-1], }) else: return render(request, "error.html", { "error": "Error adding task to Cuckoo's database.", })
def launch_analysis(self): """Start analysis.""" succeeded = False target = self.task.target if self.task.category == "file": target = os.path.basename(target) log.info("Starting analysis of %s \"%s\" (task #%d, options \"%s\")", self.task.category.upper(), target, self.task.id, emit_options(self.task.options)) # Initialize the analysis folders. if not self.init_storage(): return False self.store_task_info() if self.task.category == "file": # Check whether the file has been changed for some unknown reason. # And fail this analysis if it has been modified. if not self.check_file(): return False # Store a copy of the original file. if not self.store_file(): return False # Acquire analysis machine. try: self.acquire_machine() except CuckooOperationalError as e: machine_lock.release() log.error("Cannot acquire machine: {0}".format(e)) return False # At this point we can tell the ResultServer about it. try: ResultServer().add_task(self.task, self.machine) except Exception as e: machinery.release(self.machine.label) self.errors.put(e) # Generate the analysis configuration file. options = self.build_options() ##################################### # THIS PART IS USED FOR PINDEMONIUM # ##################################### if (options["package"][0:6] == 'exePIN'): print(options) log.info( "\033[91m\n\n\t\t ###### Will now proceed with PINDemonium #######\n\n\033[0m" ) options = Unpack(options) print(options) ##################################### # THIS PART IS USED FOR PINDEMONIUM # ##################################### aux = RunAuxiliary(task=self.task, machine=self.machine) aux.start() try: unlocked = False self.interface = None # Mark the selected analysis machine in the database as started. guest_log = self.db.guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__) # Start the machine. machinery.start(self.machine.label, self.task) # Enable network routing. self.route_network() # By the time start returns it will have fully started the Virtual # Machine. We can now safely release the machine lock. machine_lock.release() unlocked = True # Run and manage the components inside the guest unless this # machine has the "noagent" option specified (please refer to the # wait_finish() function for more details on this function). if "noagent" not in self.machine.options: self.guest_manage(options) else: self.wait_finish() succeeded = True except CuckooMachineError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}) log.critical( "A critical error has occurred trying to use the machine " "with name %s during an analysis due to which it is no " "longer in a working state, please report this issue and all " "of the related environment details to the developers so we " "can improve this situation. (Note that before we would " "simply remove this VM from doing any more analyses, but as " "all the VMs will eventually be depleted that way, hopefully " "we'll find a better solution now).", self.machine.name, ) except CuckooGuestError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}) finally: # Stop Auxiliary modules. aux.stop() # Take a memory dump of the machine before shutting it off. if self.cfg.cuckoo.memory_dump or self.task.memory: try: dump_path = os.path.join(self.storage, "memory.dmp") machinery.dump_memory(self.machine.label, dump_path) except NotImplementedError: log.error("The memory dump functionality is not available " "for the current machine manager.") except CuckooMachineError as e: log.error(e) try: # Stop the analysis machine. machinery.stop(self.machine.label) except CuckooMachineError as e: log.warning("Unable to stop machine %s: %s", self.machine.label, e) # Mark the machine in the database as stopped. Unless this machine # has been marked as dead, we just keep it as "started" in the # database so it'll not be used later on in this session. self.db.guest_stop(guest_log) # After all this, we can make the ResultServer forget about the # internal state for this analysis task. ResultServer().del_task(self.task, self.machine) # Drop the network routing rules if any. self.unroute_network() try: # Release the analysis machine. But only if the machine has # not turned dead yet. machinery.release(self.machine.label) except CuckooMachineError as e: log.error( "Unable to release machine %s, reason %s. " "You might need to restore it manually.", self.machine.label, e) return succeeded
def launch_analysis(self): """Start analysis.""" succeeded = False target = self.task.target if self.task.category == "file": target = os.path.basename(target) log.info("Starting analysis of %s \"%s\" (task #%d, options \"%s\")", self.task.category.upper(), target, self.task.id, emit_options(self.task.options)) # Initialize the analysis folders. if not self.init_storage(): return False self.store_task_info() if self.task.category == "file": # Check whether the file has been changed for some unknown reason. # And fail this analysis if it has been modified. if not self.check_file(): return False # Store a copy of the original file. if not self.store_file(): return False # Acquire analysis machine. try: self.acquire_machine() except CuckooOperationalError as e: machine_lock.release() log.error("Cannot acquire machine: {0}".format(e)) return False # At this point we can tell the ResultServer about it. try: ResultServer().add_task(self.task, self.machine) except Exception as e: machinery.release(self.machine.label) self.errors.put(e) aux = RunAuxiliary(task=self.task, machine=self.machine) aux.start() # Generate the analysis configuration file. options = self.build_options() try: unlocked = False self.interface = None # Mark the selected analysis machine in the database as started. guest_log = self.db.guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__) # Start the machine. machinery.start(self.machine.label, self.task) # Enable network routing. self.route_network() # By the time start returns it will have fully started the Virtual # Machine. We can now safely release the machine lock. machine_lock.release() unlocked = True # Run and manage the components inside the guest unless this # machine has the "noagent" option specified (please refer to the # wait_finish() function for more details on this function). if "noagent" not in self.machine.options: self.guest_manage(options) else: self.wait_finish() succeeded = True except CuckooMachineError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}) log.critical( "A critical error has occurred trying to use the machine " "with name %s during an analysis due to which it is no " "longer in a working state, please report this issue and all " "of the related environment details to the developers so we " "can improve this situation. (Note that before we would " "simply remove this VM from doing any more analyses, but as " "all the VMs will eventually be depleted that way, hopefully " "we'll find a better solution now).", self.machine.name, ) except CuckooGuestError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}) finally: # Stop Auxiliary modules. aux.stop() # Take a memory dump of the machine before shutting it off. if self.cfg.cuckoo.memory_dump or self.task.memory: try: dump_path = os.path.join(self.storage, "memory.dmp") machinery.dump_memory(self.machine.label, dump_path) except NotImplementedError: log.error("The memory dump functionality is not available " "for the current machine manager.") except CuckooMachineError as e: log.error(e) try: # Stop the analysis machine. machinery.stop(self.machine.label) except CuckooMachineError as e: log.warning("Unable to stop machine %s: %s", self.machine.label, e) # Mark the machine in the database as stopped. Unless this machine # has been marked as dead, we just keep it as "started" in the # database so it'll not be used later on in this session. self.db.guest_stop(guest_log) # After all this, we can make the ResultServer forget about the # internal state for this analysis task. ResultServer().del_task(self.task, self.machine) # Drop the network routing rules if any. self.unroute_network() try: # Release the analysis machine. But only if the machine has # not turned dead yet. machinery.release(self.machine.label) except CuckooMachineError as e: log.error("Unable to release machine %s, reason %s. " "You might need to restore it manually.", self.machine.label, e) return succeeded
def launch_analysis(self): """Start analysis.""" succeeded = False dead_machine = False target = self.task.target if self.task.category == "file": target = os.path.basename(target) log.info("Starting analysis of %s \"%s\" (task #%d, options \"%s\")", self.task.category.upper(), target, self.task.id, emit_options(self.task.options)) # Initialize the analysis folders. if not self.init_storage(): return False if self.task.category == "file": # Check whether the file has been changed for some unknown reason. # And fail this analysis if it has been modified. if not self.check_file(): return False # Store a copy of the original file. if not self.store_file(): return False # Acquire analysis machine. try: self.acquire_machine() except CuckooOperationalError as e: machine_lock.release() log.error("Cannot acquire machine: {0}".format(e)) return False # At this point we can tell the ResultServer about it. try: ResultServer().add_task(self.task, self.machine) except Exception as e: machinery.release(self.machine.label) self.errors.put(e) aux = RunAuxiliary(task=self.task, machine=self.machine) aux.start() # Generate the analysis configuration file. options = self.build_options() try: unlocked = False # Mark the selected analysis machine in the database as started. guest_log = self.db.guest_start(self.task.id, self.machine.name, self.machine.label, machinery.__class__.__name__) # Start the machine. machinery.start(self.machine.label, self.task) # Enable network routing. self.route_network() # By the time start returns it will have fully started the Virtual # Machine. We can now safely release the machine lock. machine_lock.release() unlocked = True # Run and manage the components inside the guest. self.guest_manage(options) succeeded = True except CuckooMachineError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}) dead_machine = True except CuckooGuestError as e: if not unlocked: machine_lock.release() log.error(str(e), extra={"task_id": self.task.id}) finally: # Stop Auxiliary modules. aux.stop() # Take a memory dump of the machine before shutting it off. if self.cfg.cuckoo.memory_dump or self.task.memory: try: dump_path = os.path.join(self.storage, "memory.dmp") machinery.dump_memory(self.machine.label, dump_path) except NotImplementedError: log.error("The memory dump functionality is not available " "for the current machine manager.") except CuckooMachineError as e: log.error(e) try: # Stop the analysis machine. machinery.stop(self.machine.label) except CuckooMachineError as e: log.warning("Unable to stop machine %s: %s", self.machine.label, e) # Mark the machine in the database as stopped. Unless this machine # has been marked as dead, we just keep it as "started" in the # database so it'll not be used later on in this session. self.db.guest_stop(guest_log) # After all this, we can make the ResultServer forget about the # internal state for this analysis task. ResultServer().del_task(self.task, self.machine) # Drop the network routing rules if any. self.unroute_network() if dead_machine: # Remove the guest from the database, so that we can assign a # new guest when the task is being analyzed with another # machine. self.db.guest_remove(guest_log) # Remove the analysis directory that has been created so # far, as launch_analysis() is going to be doing that again. shutil.rmtree(self.storage) # This machine has turned dead, so we throw an exception here # which informs the AnalysisManager that it should analyze # this task again with another available machine. raise CuckooDeadMachine() try: # Release the analysis machine. But only if the machine has # not turned dead yet. machinery.release(self.machine.label) except CuckooMachineError as e: log.error( "Unable to release machine %s, reason %s. " "You might need to restore it manually.", self.machine.label, e) return succeeded