def __call__(self): while True: job = jobq.get() ID=int(random() *10000000) TIMING_LOG.info("%s (%d) started" % (str(job.context_str),ID)) try: if isinstance(job, DispatchableJob): pa = job.start() shared_job_obj = [pa[0],pa[1],dict(os.environ),None] self.pqueue.put(shared_job_obj) _LOG.debug("Worker %d put a job tuple on queue %s" %(self.i,str(self.pqueue))) self.pqueue.join() _LOG.debug("Worker %d joined on queue %s" %(self.i,str(self.pqueue))) plj = LightJobForProcess(shared_job_obj[0],shared_job_obj[1],shared_job_obj[2]) plj.error = shared_job_obj[3] plj.return_code = self.err_shared_obj.value if plj.error is not None: job.error = Exception(plj.error) job.return_code = plj.return_code if job.return_code is not None and job.return_code != 0: raise Exception("Job:\n %s\n failed with error code: %d" %(' '.join(plj._invocation), job.return_code)) job.results = job.result_processor() job.finished_event.set() job.get_results() job.postprocess() else: job.start() job.get_results() job.postprocess() except Exception as e: err = BytesIO() traceback.print_exc(file=err) _LOG.error("Worker dying. Error in job.start = %s" % err.getvalue()) job.error=e job.return_code = -1 job.finished_event.set() job.kill() kill_all_jobs() return TIMING_LOG.info("%s (%d) completed" % (str(job.context_str),ID)) jobq.task_done() return
def put(self, job): TIMING_LOG.info("%s queued" % str(job.context_str)) Queue.put(self, job)