def handle(self): ip, port = self.client_address self.connect_time = datetime.datetime.now() self.storagepath = self.server.build_storage_path(ip) if not self.storagepath: return task, _ = self.server.get_ctx_for_ip(ip) task_log_start(task.id) # Create all missing folders for this analysis. self.create_folders() try: # Initialize the protocol handler class for this connection. self.negotiate_protocol() for event in self.protocol: if isinstance(self.protocol, BsonParser) and event["type"] == "process": self.open_process_log(event) except CuckooResultError as e: log.warning( "ResultServer connection stopping because of " "CuckooResultError: %s.", e ) except (Disconnect, socket.error): pass except: log.exception("FIXME - exception in resultserver connection %s", self.client_address) task_log_stop(task.id)
def handle(self): ip, port = self.client_address self.connect_time = datetime.datetime.now() self.storagepath = self.server.build_storage_path(ip) if not self.storagepath: return task, _ = self.server.get_ctx_for_ip(ip) task_log_start(task.id) # Create all missing folders for this analysis. self.create_folders() try: # Initialize the protocol handler class for this connection. self.negotiate_protocol() for event in self.protocol: if isinstance(self.protocol, BsonParser) and event["type"] == "process": self.open_process_log(event) except CuckooResultError as e: log.warning( "ResultServer connection stopping because of " "CuckooResultError: %s.", e) except (Disconnect, socket.error): pass except: log.exception("FIXME - exception in resultserver connection %s", self.client_address) task_log_stop(task.id)
def run(self): """Run manager thread.""" global active_analysis_count active_analysis_count += 1 try: while True: try: success = self.launch_analysis() except CuckooDeadMachine as e: log.exception(e) continue break self.db.set_status(self.task.id, TASK_COMPLETED) # If the task is still available in the database, update our task # variable with what's in the database, as otherwise we're missing # out on the status and completed_on change. This would then in # turn thrown an exception in the analysisinfo processing module. self.task = self.db.view_task(self.task.id) or self.task log.debug("Task #%s: Released database task with status %s", self.task.id, success) # We make a symbolic link ("latest") which links to the latest # analysis - this is useful for debugging purposes. This is only # supported under systems that support symbolic links. if hasattr(os, "symlink"): latest = os.path.join(CUCKOO_ROOT, "storage", "analyses", "latest") # First we have to remove the existing symbolic link, then we # have to create the new one. # Deal with race conditions using a lock. latest_symlink_lock.acquire() try: # As per documentation, lexists() returns True for dead # symbolic links. if os.path.lexists(latest): os.remove(latest) os.symlink(self.storage, latest) except OSError as e: log.warning( "Task #%s: Error pointing latest analysis symlink: %s", self.task.id, e) finally: latest_symlink_lock.release() log.info("Task #%s: analysis procedure completed", self.task.id) except Exception as e: log.exception("Task #%s: Failure in AnalysisManager.run: %s", self.task.id, e) finally: self.db.set_status(self.task.id, TASK_COMPLETED) task_log_stop(self.task.id) active_analysis_count -= 1
def handle(self, sock, addr): """Handle the incoming connection. Gevent will close the socket when the function returns.""" ipaddr = addr[0] with self.task_mgmt_lock: task_id = self.tasks.get(ipaddr) if not task_id: log.warning("ResultServer did not have a task for IP %s", ipaddr) return self.storagepath = os.path.join(CUCKOO_ROOT, "storage", "analyses", str(task_id)) # Create all missing folders for this analysis. self.create_folders() ctx = HandlerContext(task_id, self.storagepath, sock) task_log_start(task_id) try: try: protocol = self.negotiate_protocol(task_id, ctx) except EOFError: return # Registering the context allows us to abort the handler by # shutting down its socket when the task is deleted; this should # prevent lingering sockets with self.task_mgmt_lock: # NOTE: the task may have been cancelled during the negotation # protocol and a different task for that IP address may have # been registered if self.tasks.get(ipaddr) != task_id: log.warning( "Task #%s for IP %s was cancelled during " "negotiation", task_id, ipaddr) return s = self.handlers.setdefault(task_id, set()) s.add(ctx) try: with protocol: protocol.handle() except CuckooOperationalError as e: log.error(e) finally: with self.task_mgmt_lock: s.discard(ctx) ctx.cancel() if ctx.buf: # This is usually not a good sign log.warning( "Task #%s with protocol %s has unprocessed " "data before getting disconnected", task_id, protocol) finally: task_log_stop(task_id)
def run(self): """Run manager thread.""" global active_analysis_count active_analysis_count += 1 try: self.launch_analysis() self.db.set_status(self.task.id, TASK_COMPLETED) log.debug("Released database task #%d", self.task.id) if self.cfg.cuckoo.process_results: # this updates self.task so processing gets the latest and greatest self.store_task_info() self.process_results() self.db.set_status(self.task.id, TASK_REPORTED) # We make a symbolic link ("latest") which links to the latest # analysis - this is useful for debugging purposes. This is only # supported under systems that support symbolic links. if hasattr(os, "symlink"): latest = os.path.join(CUCKOO_ROOT, "storage", "analyses", "latest") # First we have to remove the existing symbolic link, then we # have to create the new one. # Deal with race conditions using a lock. latest_symlink_lock.acquire() try: # As per documentation, lexists() returns True for dead # symbolic links. if os.path.lexists(latest): os.remove(latest) os.symlink(self.storage, latest) except OSError as e: log.warning("Error pointing latest analysis symlink: %s" % e) finally: latest_symlink_lock.release() # overwrite task.json so we have the latest data inside self.store_task_info() log.info("Task #%d: analysis procedure completed", self.task.id, extra={ "action": "task", "status": "done" }) except: log.exception("Failure in AnalysisManager.run") task_log_stop(self.task.id) active_analysis_count -= 1
def run(self): """Run manager thread.""" global active_analysis_count active_analysis_count += 1 try: self.launch_analysis() self.db.set_status(self.task.id, TASK_COMPLETED) log.debug("Released database task #%d", self.task.id) if self.cfg.cuckoo.process_results: # this updates self.task so processing gets the latest and greatest self.store_task_info() self.process_results() self.db.set_status(self.task.id, TASK_REPORTED) # We make a symbolic link ("latest") which links to the latest # analysis - this is useful for debugging purposes. This is only # supported under systems that support symbolic links. if hasattr(os, "symlink"): latest = os.path.join(CUCKOO_ROOT, "storage", "analyses", "latest") # First we have to remove the existing symbolic link, then we # have to create the new one. # Deal with race conditions using a lock. latest_symlink_lock.acquire() try: # As per documentation, lexists() returns True for dead # symbolic links. if os.path.lexists(latest): os.remove(latest) os.symlink(self.storage, latest) except OSError as e: log.warning("Error pointing latest analysis symlink: %s" % e) finally: latest_symlink_lock.release() # overwrite task.json so we have the latest data inside self.store_task_info() log.info("Task #%d: analysis procedure completed", self.task.id) except: log.exception("Failure in AnalysisManager.run") task_log_stop(self.task.id) active_analysis_count -= 1 log.debug("AWS Setting {}".format(self.cfg.cuckoo.aws)) log.info("Uploading Task #{} to AWS".format(self.task.id)) if self.cfg.cuckoo.aws: storeResultsAWS(self.task.id)