def run(self): self.running_flag.set() while not self.shutdown_requested.is_set(): try: # wait blocking for next job (thread safe) with timeout sample = self.job_queue.dequeue(self.dequeue_timeout) except Empty: continue logger.info('Worker %d: Processing sample %s' % (self.worker_id, sample)) sample.init() try: engine = RulesetEngine(sample, self.ruleset_config) engine.run() engine.report() self.job_queue.done(sample.sha256sum) except CuckooReportPendingException: logger.debug("Report for sample %s still pending" % sample) pass except Exception as e: logger.exception(e) # it's no longer in-flight even though processing seems to have # failed self.job_queue.done(sample.sha256sum) logger.debug('Worker is ready') logger.info('Worker %d: Stopped' % self.worker_id) self.running_flag.clear()
def run(self): while self.active: logger.debug('Worker is ready') sample = JobQueue.jobs.get(True) # wait blocking for next job (thread safe) logger.info('Worker %d: Processing sample %s' % (self.worker_id, sample)) sample.init() try: engine = RulesetEngine(sample) engine.run() engine.report() except CuckooReportPendingException: pass except Exception as e: logger.exception(e)
def run(self): self.running_flag.set() while not self.shutdown_requested.is_set(): logger.debug('Worker %d: Ready', self.worker_id) try: # wait blocking for next job (thread safe) with timeout sample = self.job_queue.dequeue() except Empty: continue if sample is None: # we just got pinged continue logger.info('Worker %d: Processing sample %s', self.worker_id, sample) # The following used to be one big try/except block catching any # exception. This got complicated because in the case of # CuckooReportPending we use exceptions for control flow as well # (which might be questionable in itself). Instead of catching, # logging and ignoring errors here if workers start to die again # because of uncaught exceptions we should improve error handling # in the subroutines causing it. if not sample.init(): logger.error('Sample initialization failed') sample.add_rule_result( RuleResult("Worker", result=Result.failed, reason=_("Sample initialization failed"), further_analysis=False)) self.job_queue.done(sample.sha256sum) continue engine = RulesetEngine(self.ruleset_config, self.db_con) try: engine.run(sample) except PeekabooAnalysisDeferred: logger.debug("Report for sample %s still pending", sample) continue if sample.result >= Result.failed: sample.dump_processing_info() if sample.result != Result.failed: logger.debug('Saving results to database') try: self.db_con.analysis_save(sample) except PeekabooDatabaseError as dberr: logger.error( 'Failed to save analysis result to ' 'database: %s', dberr) # no showstopper, we can limp on without caching in DB else: logger.debug('Not saving results of failed analysis') sample.cleanup() self.job_queue.done(sample) logger.info('Worker %d: Stopped' % self.worker_id) self.running_flag.clear()