def send_job_from_queue(self,jobs2send): QueuedJobs = Global.JobQueue() Jobs2Send = Global.JobQueue() clientName = get_client_name(self) with self.QueueLock: QueuedJobs.update(self.Queue.SelAttrVal(attr='status_key', value={'q'})) if not len(QueuedJobs): return (False, None) for k,v in QueuedJobs.items(): if v.possiblehosts == 'all' or clientName in v.possiblehosts: v.status_key = 's' v.runninghost = get_client_name(self) Jobs2Send.update({k:v}) self.Queue.update({k:v}) if len(Jobs2Send) >= jobs2send: break return (True, Jobs2Send)
def print_queue_status(self, onlymine = False): if onlymine: Queue2Send = self.Queue.SelAttrVal(attr='runninghost', value={get_client_name(self)}) else: Queue2Send = Global.JobQueue(self.Queue) for k, v in Queue2Send.items(): Queue2Send.update({k:Global.JobView(v)}) return (True, Queue2Send)
def main(): RequestHandler.Queue = load_existing_Queue() or Global.JobQueue() RequestHandler.IdGen = load_last_id() or int(1) RequestHandler.Configs = load_existing_Configs() or dict() server = None try: server = ManageJobsServer(("", Address[1]), RequestHandler) server.serve_forever() except Exception as err: print("ERROR", err) finally: if server is not None: server.shutdown() save()
def main(): load_jobs_from_last_run() deal_with_finished_jobs() global MyConfigs try: ok, data = handle_request('GIME_CONFIGS', MyConfigs) if ok: MyConfigs = data except Global.ServerDown: wait_for_server() while True: try: MyConfigs.totalJobs = len(MyQueue) num_running = check_running_jobs() deal_with_finished_jobs() num_allowed = deal_with_configs() jobs2Continue = num_allowed - num_running if jobs2Continue >= 0: continued = continue_stopped_jobs(jobs2Continue) jobs2Submit = jobs2Continue - continued if jobs2Submit > 0 and MyConfigs.MoreJobs: ok, NewQueue = handle_request('GIME_JOBS', jobs2Submit) if ok: submit_jobs(NewQueue) elif jobs2Continue < 0: jobs2Stop = -jobs2Continue stop_jobs(jobs2Stop) time.sleep(WAIT_TIME) ok, Queue2Deal = handle_request('STATUS_QUEUE', True) if ok: isbigger = set(Queue2Deal.keys()) - set(MyQueue.keys()) for k in isbigger: Queue2Deal.pop(k) deal_with_signals(Queue2Deal) # Just send the complete jobs if needed, otherwise send jobviews Queue2Send = Global.JobQueue() for k, v in MyQueue.items(): if v.status_key in {'e', 't', 'q'}: Queue2Send.update({k: v}) else: if Queue2Deal.get(k) is not None: Queue2Send.update({k: Global.JobView(v)}) else: Queue2Send.update({k: v}) deal_with_finished_jobs() ok, keys2remove = handle_request('UPDATE_JOBS', Queue2Send) if ok: for key in keys2remove: jobid2proc.pop(key) MyQueue.pop(key) shutil.rmtree('/'.join( [TEMPFOLDER, FOLDERFORMAT.format(key)])) ok, ResQueue = handle_request('GIME_RESULTS') if ok: deal_with_results(ResQueue) ok, data = handle_request('GIME_CONFIGS', MyConfigs) if ok: MyConfigs = data if MyConfigs.shutdown: shutdown() except psutil._error.NoSuchProcess: continue except Global.ServerDown: wait_for_server()
os.killpg(jobid2proc[k].pid, signal.SIGTERM) os.killpg(jobid2proc[k].pid, signal.SIGCONT) v.runninghost = None v.status_key = 'q' MyQueue[k].update(v) elif v != MyQueue[k] and v.status_key == MyQueue[k].status_key: MyQueue[k].update(v) except ProcessLookupError: continue def wait_for_server(): time.sleep(WAIT_TIME) MyQueue = Global.JobQueue() jobid2proc = dict() MyConfigs = Global.Configs() def main(): load_jobs_from_last_run() deal_with_finished_jobs() global MyConfigs try: ok, data = handle_request('GIME_CONFIGS', MyConfigs) if ok: MyConfigs = data except Global.ServerDown: