def run_do(self): logger.info("{} job are running.".format(running_job_amount())) try: if running_job_amount() < MAX_CONCURRENT_JOB_RUN: wait_jobs = get_job_from_queue(status="waiting", limit=1) if wait_jobs: wait_job = wait_jobs[0] run_job_id = wait_job.job_id try: run_job_success = self.run_job(job_id=run_job_id, config=json.loads( wait_job.config)) except Exception as e: run_job_success = False logger.exception(e) if run_job_success: update_job_queue(job_id=run_job_id, role=wait_job.role, party_id=wait_job.party_id, save_data={"status": "ready"}) else: pop_from_job_queue(job_id=run_job_id) logger.info("check waiting jobs done.") self.check_job() except Exception as e: logger.exception(e)
def stop_workflow(job_id, role, party_id): _job_dir = get_job_directory(job_id) task_pid_path = os.path.join(_job_dir, 'pids') if os.path.isdir(task_pid_path): for pid_file in os.listdir(task_pid_path): try: if not pid_file.endswith('.pid'): continue with open(os.path.join(task_pid_path, pid_file), 'r') as f: pids = f.read().split('\n') for pid in pids: try: if len(pid) == 0: continue logger.debug( "terminating process pid:{} {}".format( pid, pid_file)) p = psutil.Process(int(pid)) for child in p.children(recursive=True): child.kill() p.kill() except NoSuchProcess: continue except Exception as e: logger.exception("error") continue set_job_failed(job_id=job_id, role=role, party_id=party_id) pop_from_job_queue(job_id=job_id) clean_job(job_id=job_id) return get_json_result(job_id=job_id)
def update_job(job_id): request_data = request.json update_job_by_id(job_id=job_id, update_data={"status": request_data.get("status")}) update_job_queue(job_id=job_id, update_data={"status": request_data.get("status")}) if request_data.get("status") in ["failed", "deleted"]: stop_job(job_id=job_id) if request_data.get("status") in ["failed", "deleted", "success"]: pop_from_job_queue(job_id=job_id) return get_json_result()
def update_job(job_id, role, party_id): request_data = request.json logger.info('job_id:{} role:{} party_id:{} status:{}'.format( job_id, role, party_id, request_data.get('status'))) job_info = save_job_info(job_id=job_id, role=role, party_id=party_id, save_info={"status": request_data.get("status")}) if not job_info: logger.info( 'job_id {} may not be started by the Task Manager.'.format(job_id)) return get_json_result( job_id=job_id, status=101, msg='this task may not be started by the Task Manager.') update_job_queue(job_id=job_id, role=role, party_id=party_id, save_data={"status": request_data.get("status")}) if request_data.get("status") in ["success", "failed", "deleted"]: pop_from_job_queue(job_id=job_id) if is_job_initiator(job_info.initiator, PARTY_ID): # I am job initiator logger.info('i am job {} initiator'.format(job_id)) # check job status jobs = query_job_by_id(job_id=job_id) job_status = set([job.status for job in jobs]) do_stop_job = False if 'failed' in job_status or 'deleted' in job_status: do_stop_job = True elif len(job_status) == 1 and 'success' in job_status: do_stop_job = True if do_stop_job: stop_job(job_id=job_id) else: # send job status to initiator if not request_data.get('initiatorUpdate', False): request_data['initiatorUpdate'] = True federated_api(job_id=job_id, method='POST', url='/job/jobStatus/{}/{}/{}'.format( job_id, role, party_id), party_id=job_info.initiator, json_body=request_data) return get_json_result(job_id=job_id)