def update_job(job_id, role, party_id): request_data = request.json logger.info('job_id:{} role:{} party_id:{} status:{}'.format( job_id, role, party_id, request_data.get('status'))) job_info = save_job_info(job_id=job_id, role=role, party_id=party_id, save_info={"status": request_data.get("status")}) if not job_info: logger.info( 'job_id {} may not be started by the Task Manager.'.format(job_id)) return get_json_result( job_id=job_id, status=101, msg='this task may not be started by the Task Manager.') update_job_queue(job_id=job_id, role=role, party_id=party_id, save_data={"status": request_data.get("status")}) if request_data.get("status") in ["success", "failed", "deleted"]: pop_from_job_queue(job_id=job_id) if is_job_initiator(job_info.initiator, PARTY_ID): # I am job initiator logger.info('i am job {} initiator'.format(job_id)) # check job status jobs = query_job_by_id(job_id=job_id) job_status = set([job.status for job in jobs]) do_stop_job = False if 'failed' in job_status or 'deleted' in job_status: do_stop_job = True elif len(job_status) == 1 and 'success' in job_status: do_stop_job = True if do_stop_job: stop_job(job_id=job_id) else: # send job status to initiator if not request_data.get('initiatorUpdate', False): request_data['initiatorUpdate'] = True federated_api(job_id=job_id, method='POST', url='/job/jobStatus/{}/{}/{}'.format( job_id, role, party_id), party_id=job_info.initiator, json_body=request_data) return get_json_result(job_id=job_id)
def stop_job(job_id): _job_dir = get_job_directory(job_id) all_party = [] for runtime_conf_path in glob.glob(os.path.join(_job_dir, '**', 'runtime_conf.json'), recursive=True): runtime_conf = file_utils.load_json_conf( os.path.abspath(runtime_conf_path)) for _role, _party_ids in runtime_conf['role'].items(): all_party.extend([(_role, _party_id) for _party_id in _party_ids]) all_party = set(all_party) logger.info('start send stop job to {}'.format(','.join( [i[0] for i in all_party]))) _method = 'DELETE' for _role, _party_id in all_party: federated_api(job_id=job_id, method=_method, url='/workflow/{}/{}/{}'.format(job_id, _role, _party_id), party_id=_party_id) return get_json_result(job_id=job_id)
def load_model(): request_config = request.json _job_id = generate_job_id() all_party = set() for _party_ids in request_config.get('role').values(): all_party.update(set(_party_ids)) for _party_id in all_party: st, msg = federated_api(job_id=_job_id, method='POST', url='/model/load/do', party_id=_party_id, json_body=request_config) return get_json_result(job_id=_job_id)
def stop_workflow(job_id, role, party_id): _job_dir = get_job_directory(job_id) task_pid_path = os.path.join(_job_dir, 'pids') if os.path.isdir(task_pid_path): for pid_file in os.listdir(task_pid_path): try: if not pid_file.endswith('.pid'): continue with open(os.path.join(task_pid_path, pid_file), 'r') as f: pids = f.read().split('\n') for pid in pids: try: if len(pid) == 0: continue logger.debug( "terminating process pid:{} {}".format( pid, pid_file)) p = psutil.Process(int(pid)) for child in p.children(recursive=True): child.kill() p.kill() except NoSuchProcess: continue except Exception as e: logger.exception("error") continue federated_api(job_id=job_id, method='POST', url='/job/jobStatus/{}/{}/{}'.format( job_id, role, party_id), party_id=party_id, json_body={ 'status': 'failed', 'stopJob': True }) clean_job(job_id=job_id) return get_json_result(job_id=job_id)
def run_job(self, job_id, config): default_runtime_dict = file_utils.load_json_conf( 'workflow/conf/default_runtime_conf.json') setting_conf = file_utils.load_json_conf( 'workflow/conf/setting_conf.json') _job_dir = get_job_directory(job_id=job_id) os.makedirs(_job_dir, exist_ok=True) ParameterOverride.override_parameter(default_runtime_dict, setting_conf, config, _job_dir) logger.info('job_id {} parameters overrode {}'.format( config, _job_dir)) run_job_success = True job_param = dict() job_param['job_id'] = job_id job_param['initiator'] = PARTY_ID for runtime_conf_path in glob.glob(os.path.join( _job_dir, '**', 'runtime_conf.json'), recursive=True): runtime_conf = file_utils.load_json_conf( os.path.abspath(runtime_conf_path)) runtime_conf['JobParam'] = job_param _role = runtime_conf['local']['role'] _party_id = runtime_conf['local']['party_id'] _module = runtime_conf['module'] st, msg = federated_api(job_id=job_id, method='POST', url='/workflow/{}/{}/{}'.format( job_id, _module, _role), party_id=_party_id, json_body=runtime_conf) if st == 0: save_job_info(job_id=job_id, role=_role, party_id=_party_id, save_info={ "status": "ready", "initiator": PARTY_ID }, create=True) else: run_job_success = False logger.info("run job done") return run_job_success
def load_model(): request_config = request.json _job_id = generate_job_id() if request_config.get('gen_table_info', False): publish_model.generate_model_info(request_config) for role_name, role_partys in request_config.get("role").items(): if role_name == 'arbiter': continue for _party_id in role_partys: request_config['local'] = { 'role': role_name, 'party_id': _party_id } st, msg = federated_api(job_id=_job_id, method='POST', url='/model/load/do', party_id=_party_id, json_body=request_config) return get_json_result(job_id=_job_id)