class JobManager(object): def __init__(self, configuration): logger.debug("Initialising JobManager.") self.configuration = configuration self.backend = RedisJobBackend(configuration) self.runners = {} self.in_startup = {} self.abort_handlers = {} self.double_process_filter = {} logger.debug("Subscribing to job status updates.") pub.subscribe(self.job_status, 'job.status') def prepare(self, callback, error_callback): """ Prepare the job manager, setting up the appropriate backend. Calls the supplied callbacks when ready. """ def abort_existing_jobs(message): my_uuid = self.configuration.get_node_uuid() def got_running_jobs(joblist): # Abort them. if len(joblist) == 0: # Nothing to do. callback(message) else: # Abort those jobs. def abort_one(aborted_message): try: job_id = joblist.pop() self.force_abort(job_id, my_uuid, abort_one) except KeyError, ex: # No more to pop off. callback(message) # end of abort_one() # Start aborting. abort_one('First job') # end of got_running_jobs() # On startup, mark any jobs that were RUNNING # as ABORTED. This handles jobs that did not finish # due to an error that caused Paasmaker to crash. # This frees up other nodes who might be depending on # the jobs. self.backend.get_node_jobs( my_uuid, got_running_jobs, state=constants.JOB.RUNNING ) # end of abort_existing_jobs() self.backend.setup(abort_existing_jobs, error_callback) self.watchdog = JobManagerBackendWatchdog(self.configuration, self.backend)
def __init__(self, configuration): logger.debug("Initialising JobManager.") self.configuration = configuration self.backend = RedisJobBackend(configuration) self.runners = {} self.in_startup = {} self.abort_handlers = {} self.double_process_filter = {} logger.debug("Subscribing to job status updates.") pub.subscribe(self.job_status, 'job.status')