def submit_job(cls, job_data, job_id=None): try: if not job_id: job_id = job_utils.generate_job_id() stat_logger.info(f'Trying submit job, job_id {job_id}, body {job_data}') schedule_logger(job_id).info(f'Trying submit job, job_id {job_id}, body {job_data}') job_utils.check_job_conf(job_data) # {'job_conf_path': 'xxx', 'job_runtime_conf_path': 'xxx'} job_conf_path = file_utils.save_job_conf(job_id=job_id, job_data=job_data) job_info = { 'job_id': job_id, 'job_conf': job_data, 'status': JobStatus.WAITING } JobSaver.create_job(job_info=job_info) RuntimeConfig.JOB_QUEUE.put_event() schedule_logger(job_id).info(f"submit job successfully, job id is {job_id}") stat_logger.info(f"submit job successfully, job id is {job_id}") except Exception: stat_logger.error(f"Submit job fail, details: {traceback.format_exc()}") return {}, {} else: return job_id, job_conf_path
def v2_runner_on_skipped(self, result): # stat_logger.info(f'>>>>>>>>>in runner on skipped: {json.dumps(result._result, indent=4)}') schedule_logger(self._job_id).info( f"<TASK SKIPPED> Host {result._host} executes task {result._task._uuid} {result._task._attributes.get('name')} skipped. Details: {json.dumps(result._result, indent=4)}" ) update_info = { 'job_id': self._job_id, 'play_id': self._play_id, 'task_id': result._task._uuid, 'status': TaskStatus.SKIPPED, 'end_time': current_timestamp(), 'host': result._host } JobSaver.update_task(update_info) JobSaver.update_task_status(update_info) # schedule_logger(self._job_id).info(f'>>>>>>>>>in runner on skipped: {json.dumps(result._result, indent=4)}') if self.display_skipped_hosts: self._clean_results(result._result, result._task.action) if self._last_task_banner != result._task._uuid: self._print_task_banner(result._task) if result._task.loop and 'results' in result._result: self._process_items(result) else: msg = "skipping: [%s]" % result._host.get_name() if self._run_is_verbose(result): msg += " => %s" % self._dump_results(result._result) self._display.display(msg, color=C.COLOR_SKIP)
def v2_playbook_on_play_start(self, play): # TODO what if there is no play name = play.get_name().strip() # stat_logger.info(f"<<****<<**>>*****>> in playbook on play start, customise play_id: {self._play_id}") schedule_logger(self._job_id).info( f"<PLAY START> Play id: {self._play_id}, play name: {name}") try: play_info = { 'job_id': self._job_id, 'play_id': self._play_id, 'play_name': name, 'roles': str(play.get_roles()), } schedule_logger(self._job_id).info( f"play details: {json.dumps(play_info, indent=4)}") JobSaver.update_play(play_info) except Exception as e: schedule_logger(self._job_id).warning( f"In v2_playbook_on_play_start: {traceback.format_exc()}") schedule_logger(self._job_id).info( f"<<****<<**>>*****>> tasks name: uuid list: {play.get_tasks()}, roles: {play.get_roles()}, " ) if play.check_mode and self.check_mode_markers: checkmsg = " [CHECK MODE]" else: checkmsg = "" if not name: msg = u"PLAY%s" % checkmsg else: msg = u"PLAY [%s]%s" % (name, checkmsg) self._play = play self._display.banner(msg)
def create_play(job_id, play_id, play_conf, play_hosts): play_info = { "job_id": job_id, "play_id": play_id, "play_conf": play_conf, "play_name": play_id, "hosts": play_hosts, "status": PlayStatus.WAITING, } JobSaver.create_play(play_info=play_info)
def _task_start(self, task, prefix=None): # Create task from this function # schedule_logger(self._job_id).info(f"<CREATING TASK> Trying to create task. Task id: {task._uuid}, task name: {task._attributes.get('name')}, prefix: {prefix}") schedule_logger(self._job_id).info( f"<CREATING TASK> Trying to create task. Task id: {task._uuid}, task name: {task._attributes.get('name')}, module: {task._role}." ) if self._retry: tasks = JobSaver.query_task(play_id=self._play_id) if tasks: pass task_info = { 'job_id': self._job_id, 'play_id': self._play_id, 'task_id': task._uuid, 'task_name': task._attributes.get('name'), 'status': TaskStatus.WAITING, 'create_time': current_timestamp(), 'role': task._role, } JobSaver.create_task(task_info) schedule_logger(self._job_id).info( f"create task with id {task._uuid} successfully") # try: # schedule_logger(self._job_id).info(f"task role: {task._role}") # except Exception: # pass # stat_logger.info(f"<<<<<>>>>> in _task_start, task name: {task._attributes.get('name')}, task id: {task._uuid}") # Cache output prefix for task if provided # This is needed to properly display 'RUNNING HANDLER' and similar # when hiding skipped/ok task results if prefix is not None: self._task_type_cache[task._uuid] = prefix # Preserve task name, as all vars may not be available for templating # when we need it later if self._play.strategy == 'free': # Explicitly set to None for strategy 'free' to account for any cached # task title from a previous non-free play self._last_task_name = None else: self._last_task_name = task.get_name().strip() # Display the task banner immediately if we're not doing any filtering based on task result if self.display_skipped_hosts and self.display_ok_hosts: self._print_task_banner(task)
def check_job_status(job_id: str): plays = JobSaver.query_play(job_id=job_id) job_success = True if plays: for play in plays: job_success = job_success and play.f_status == PlayStatus.SUCCESS else: job_success = False return job_success
def query_play(): plays = JobSaver.query_play(**request.json) if not plays: return get_json_result(retcode=101, retmsg='Query play failed, no play found.') play_filters = [ 'job_id', 'play_id', 'create_time', 'start_time', 'end_time', 'status', 'elapsed' ] data = plays[0].to_json(filters=play_filters) tasks = JobSaver.query_task(reverse=False, **request.json) if tasks: task_filters = [ 'play_id', 'task_id', 'task_name', 'role', 'create_time', 'start_time', 'end_time', 'status', 'elapsed' ] data['f_tasks'] = [task.to_json(task_filters) for task in tasks] return get_json_result(retmsg="Query play successfully.", data=data)
def get(self, block=True, timeout=None): with self.not_empty: if not block: stat_logger.info("in queue get func, in if not block condition") if not self.query_waiting_jobs(): raise Exception elif timeout is None: stat_logger.info("in queue get func, in timeout is none condition") while not self.query_waiting_jobs(): stat_logger.info("in queue get func, in timeout is none condition, in while not loop") self.not_empty.wait() elif timeout < 0: stat_logger.info("in queue get func, in timeout < 0 condition") raise ValueError("'timeout' must be a non-negative number") else: stat_logger.info("in queue get func, in else condition") endtime = time() + timeout while not self.query_waiting_jobs(): remaining = endtime - time() if remaining <= 0.0: raise Exception self.not_empty.wait(remaining) stat_logger.info("in queue get func, ready to get in db") with DB.connection_context(): error = None JobQueue.lock(DB, 'deploy_server_job_queue', 10) try: item = Job.select().where(Job.f_status == JobStatus.WAITING)[0] if item: update_info = { 'job_id': item.f_job_id, 'status': JobStatus.READY } JobSaver.update_job_status(update_info) except Exception as e: error = e JobQueue.unlock(DB, 'deploy_server_job_queue') if error: raise Exception(e) self.not_full.notify() return { 'job_id': item.f_job_id, }
def query_task(): tasks = JobSaver.query_task(**request.json) if not tasks: return get_json_result(retcode=101, retmsg='Query task failed, no task found.') task_filters = [ 'play_id', 'task_id', 'task_name', 'role', 'create_time', 'start_time', 'end_time', 'status', 'elapsed' ] return get_json_result(retmsg="Query task successfully.", data=tasks[0].to_json(task_filters))
def query_job(): jobs = JobSaver.query_job(**request.json) if not jobs: return get_json_result(retcode=101, retmsg='Query job failed, no job found.') data = jobs[0].to_json(filters=[ 'job_id', 'create_time', 'start_time', 'end_time', 'status', 'elapsed' ]) plays = JobSaver.query_play(reverse=False, order_by='play_id', **request.json) if plays: play_filters = [ 'job_id', 'play_id', 'create_time', 'start_time', 'end_time', 'status', 'elapsed' ] data['f_plays'] = [ play.to_json(filters=play_filters) for play in plays ] return get_json_result(retmsg="Query job successfully.", data=data)
def v2_playbook_on_no_hosts_matched(self): schedule_logger( self._job_id).info("<No host matched> Skipping: no hosts matched.") update_info = { 'job_id': self._job_id, 'play_id': self._play_id, 'end_time': current_timestamp(), 'status': PlayStatus.FAILED } JobSaver.update_play(update_info) JobSaver.update_play_status(update_info) JobSaver.update_job(update_info) JobSaver.update_job_status(update_info) self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP)
def stop_play(job_id, play_id, status=PlayStatus.CANCELED): plays = JobSaver.query_play(play_id=play_id) if plays: play = plays[0] kill_status = job_utils.kill_play_process_execution(play) if kill_status: if OngoingStatus.contains(play.f_status): play_info = { 'job_id': job_id, 'play_id': play_id, 'end_time': current_timestamp(), 'status': status, } JobSaver.update_play_status(play_info) if not StandbyStatus.contains(play.f_status): JobSaver.update_play(play_info) return True else: return False else: schedule_logger(job_id).info(f"cannot find and kill process of play {play_id}") return False
def v2_runner_on_start(self, host, task): # schedule_logger(self._job_id).info(f"in v2 runner on start, host: {host}, task: {task}, task id: {task._uuid}") schedule_logger(self._job_id).info( f"<START TASK> Starting task. Host: {host}. task id: {task._uuid}, task name: {task._attributes.get('name')}" ) update_info = { 'job_id': self._job_id, 'play_id': self._play_id, 'task_id': task._uuid, 'task_name': task._attributes.get('name'), 'status': TaskStatus.RUNNING, 'start_time': current_timestamp(), 'role': task._role, 'host': host } JobSaver.update_task(update_info) JobSaver.update_task_status(update_info) # # schedule_logger(self._job_id).info(f"<TASK ON START> host: {host}, task id: {task._uuid}, task name: {task._attribute.get('name')}") # schedule_logger(self._job_id).info(f"<TASK ON START> host: {host}, task id: {task._uuid}, task name: {task._attribute.get('name')}") if self.get_option('show_per_host_start'): self._display.display(" [started %s on %s]" % (task, host), color=C.COLOR_OK)
def run_do(self): try: running_plays = JobSaver.query_play(status='running') stop_job_ids = set() for play in running_plays: try: process_exist = job_utils.check_job_process(int( play.f_pid)) if not process_exist: detect_logger.info( 'job {} play {} process does not exist'.format( play.f_job_id, play.f_pid)) stop_job_ids.add(play.f_job_id) detect_logger.info( f'start to stop play {play.f_play_id}') JobController.stop_play(job_id=play.f_job_id, play_id=play.f_play_id) except Exception as e: detect_logger.exception(e) # ready_plays = JobSaver.query_play(status='ready') # for play in ready_plays: # try: if stop_job_ids: schedule_logger().info( 'start to stop jobs: {}'.format(stop_job_ids)) for job_id in stop_job_ids: jobs = JobSaver.query_job(job_id=job_id) if jobs: if not EndStatus.contains(jobs[0].f_status): JobController.stop_job(job_id=job_id) except Exception as e: detect_logger.exception(e) finally: detect_logger.info('finish detect running job')
def v2_runner_on_unreachable(self, result): try: schedule_logger(self._job_id).error( f"<HOST UNREACHABLE> Host {result._host} executes task {result._task._uuid} {result._task._attributes.get('name')} unreachable. \nDetails: {json.dumps(result._result, indent=4)}" ) # schedule_logger(self._job_id).info(f'>>>>>>>>>in runner on unreachable: {json.dumps(result._result, indent=4)}') # stat_logger.info(f'>>>>>>>>>in runner on unreachable: {json.dumps(result._result, indent=4)}') update_info = { 'job_id': self._job_id, 'play_id': self._play_id, 'task_id': result._task._uuid, 'end_time': current_timestamp(), 'status': TaskStatus.FAILED, } JobSaver.update_task(update_info) JobSaver.update_task_status(update_info) JobSaver.update_play(update_info) JobSaver.update_play_status(update_info) except Exception: schedule_logger(self._job_id).warning( f"In v2_runner_on_unreachable, details: {traceback.format_exc()}" ) if self._last_task_banner != result._task._uuid: self._print_task_banner(result._task) delegated_vars = result._result.get('_ansible_delegated_vars', None) if delegated_vars: msg = "fatal: [%s -> %s]: UNREACHABLE! => %s" % ( result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)) else: msg = "fatal: [%s]: UNREACHABLE! => %s" % ( result._host.get_name(), self._dump_results(result._result)) self._display.display(msg, color=C.COLOR_UNREACHABLE, stderr=self.display_failed_stderr)
def check_ansible(): request_data = request.json if request_data.get('party_id'): # TODO consider case that there are more than one records of specified party parties = JobSaver.query_party(party_id=request_data.get('party_id')) if parties: party_info = parties[0].to_json( filters=['modules', 'version', 'role']) modules = party_info.get('f_modules', {}).get('data', []) status = {} host = None for module in modules: if module.get('name') == 'supervisor': host = module.get('ips')[0] break if host: result = distribute_status_check_task(host=host) if result and result.get('list'): for item in result.get('list'): status[item['name']] = item['status'] if status: for module in modules: module['status'] = status.get(module['name'], None) return get_json_result(retmsg='Query party info successfully', data={ 'party_id': request_data.get('party_id'), 'list': modules, 'fate_version': party_info.get('f_version'), "role": party_info.get('f_role') }) return get_json_result( retcode=0, retmsg= f"can not found info party {request_data.get('party_id')} in database." ) return get_json_result(data={'status': 'success'})
def do_status_check(party_id, module_name=None): parties = JobSaver.query_party(party_id=party_id, reverse=True) if not parties: return 100, f"can not find info of party with party id {party_id}", {} else: party_info = parties[0].to_json(filters=['modules', 'version']) modules = party_info.get('f_modules', {}).get('data', []) for module in modules: if module.get('name') == 'supervisor': host = module.get('ips')[0] break else: return 100, f"can not found module supervisor on party {party_id}", {} if module_name: result = distribute_status_check_task(host=host, module_name=module_name) else: result = distribute_status_check_task(host=host) if result and result.get('list'): for item in result.get('list'): item.update({'version': party_info['f_version']}) result['version'] = party_info['f_version'] return 0, "check module status successfully", result return 100, "check module status failed, please check logs/ansible/ansible_stat.log", {}
def retry_play(job_id, play_id, test_mode=False): plays = JobSaver.query_play(play_id=play_id) if not plays: return 100, f"Retry play {play_id} failed, can not find such play in database." # copy play conf into package dir play_conf_path_dict = file_utils.get_play_conf_path(play_id) with open(file_utils.get_job_conf_path(job_id), 'r') as f: job_conf = json.loads(f.read()) package_dir = get_package_dir_by_version(job_conf.get('version')) play_conf_path_dict['conf_path'] = shutil.copy2( src=play_conf_path_dict['conf_path'], dst=package_dir) update_info = { 'job_id': job_id, 'play_id': play_id, 'status': PlayStatus.WAITING, } JobSaver.update_play_status(update_info) JobSaver.update_job_status(update_info) # clean task records JobSaver.clean_task(play_id=play_id) # execute run_play method try: play_retry_executor_pool.submit( PlayController.run_play, job_id=job_id, play_id=play_id, play_conf_path=play_conf_path_dict['conf_path'], play_hosts_path=play_conf_path_dict['hosts_path'], test_mode=test_mode, retry_mode=True) return 0, f"Start retrying play {play_id}" except Exception as e: stat_logger.exception(e) return 100, f"Retry play {play_id} failed, details: {str(e)}"
def stop_job(job_id, status=JobStatus.CANCELED): jobs = JobSaver.query_job(job_id=job_id) if jobs: plays = JobSaver.query_play(job_id=job_id) stop_result = {} final_status = True for play in [item for item in plays if not EndStatus.contains(item.f_status)]: stop_status = JobController.stop_play(job_id=job_id, play_id=play.f_play_id, status=status) stop_result[play.f_play_id] = 'stopped successfully' if stop_status else 'stopped failed' final_status = final_status & stop_status if final_status: update_info = { 'job_id': job_id, 'end_time': current_timestamp(), 'status': JobStatus.CANCELED } JobSaver.update_job(update_info) JobSaver.update_job_status(update_info) return final_status, stop_result else: return False, {job_id: f"Cannot found job {job_id}"}
def v2_runner_on_ok(self, result): try: schedule_logger(self._job_id).info( f"<TASK SUCCESS> Host {result._host}, type of host: {type(result._host)} " ) host_logger(self._job_id, result._host).info( f"<TASK SUCCESS> Task id: {result._task._uuid}, name: {result._task._attributes.get('name')} executed successfully on host {result._host}" ) schedule_logger(self._job_id).info( f"<TASK SUCCESS> Host {result._host} executes task {result._task._uuid} {result._task._attributes.get('name')} successfully. Details: {json.dumps(result._result, indent=4)}" ) # schedule_logger(self._job_id).info(f"Run on ok, details: {json.dumps(result._result, indent=4)}") schedule_logger( self._job_id).info(f"<RESULT INFO> f{result.__dict__}") update_info = { 'job_id': self._job_id, 'play_id': self._play_id, 'task_id': result._task._uuid, 'task_name': result._task._attributes.get('name'), 'status': TaskStatus.SUCCESS, 'end_time': current_timestamp(), 'role': result._task._role } JobSaver.update_task(update_info) JobSaver.update_task_status(update_info) except Exception: schedule_logger(self._job_id).warning( f"In v2_runner_on_ok: details: {traceback.format_exc()}") delegated_vars = result._result.get('_ansible_delegated_vars', None) if isinstance(result._task, TaskInclude): return elif result._result.get('changed', False): if self._last_task_banner != result._task._uuid: self._print_task_banner(result._task) if delegated_vars: msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "changed: [%s]" % result._host.get_name() color = C.COLOR_CHANGED else: if not self.display_ok_hosts: return if self._last_task_banner != result._task._uuid: self._print_task_banner(result._task) if delegated_vars: msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "ok: [%s]" % result._host.get_name() color = C.COLOR_OK self._handle_warnings(result._result) if result._task.loop and 'results' in result._result: self._process_items(result) else: self._clean_results(result._result, result._task.action) if self._run_is_verbose(result): msg += " => %s" % (self._dump_results(result._result), ) self._display.display(msg, color=color)
def run_job(job_id): job_data = job_utils.get_job_configuration(job_id=job_id) stat_logger.info( f"in play controller run job func, get job data: {json.dumps(job_data, indent=4)}" ) schedule_logger(job_id).info( f"in play controller, func run job: {json.dumps(job_data, indent=4)}" ) play_conf_path_dict = PlayController.initialize_plays( job_id=job_id, job_data=job_data) stat_logger.info( f"in play controller run job func after initialize play\n get play conf path dict: {play_conf_path_dict}" ) # TODO get package dir by version package_dir = get_package_dir_by_version(job_data.get('version')) if not os.path.exists(package_dir) and not os.path.isdir(package_dir): raise Exception( f'Local package directory {package_dir} not exists.') job_info = { 'job_id': job_id, 'status': JobStatus.RUNNING, 'start_time': current_timestamp() } JobSaver.update_job_status(job_info) JobSaver.update_job(job_info) for play_id, conf_dict in play_conf_path_dict.items(): conf_dict['conf_path'] = shutil.copy2(src=conf_dict['conf_path'], dst=package_dir) PlayController.run_play( job_id=job_id, play_id=play_id, play_conf_path=conf_dict.get('conf_path'), play_hosts_path=conf_dict.get('hosts_path'), test_mode=TEST_MODE) if os.path.exists(conf_dict['conf_path']): os.remove(conf_dict['conf_path']) plays = JobSaver.query_play(play_id=play_id) if plays: play = plays[0] status = play.f_status if status != PlayStatus.SUCCESS: if status in [ PlayStatus.CANCELED, PlayStatus.FAILED, PlayStatus.TIMEOUT ]: update_info = { 'job_id': job_id, 'play_id': play_id, 'status': status, 'end_time': current_timestamp() } JobSaver.update_play_status(update_info) JobSaver.update_play(update_info) JobSaver.update_job_status(update_info) JobSaver.update_job(update_info) else: update_info = { 'job_id': job_id, 'play_id': play_id, 'status': PlayStatus.FAILED, 'end_time': current_timestamp() } schedule_logger(job_id).error( f'Unexpected error occured on play {play_id}, job {job_id} failed, previous status of play: {play.f_status}' ) stat_logger.error( f'Unexpected error occured on play {play_id}, job {job_id} failed, previous status of play: {play.f_status}' ) JobSaver.update_play_status(update_info) JobSaver.update_play(update_info) JobSaver.update_job_status(update_info) JobSaver.update_job(update_info) schedule_logger(job_id).info( f"job {job_id} finished, status is {update_info.get('status')}" ) break else: update_info = { 'job_id': job_id, 'play_id': play_id, 'status': PlayStatus.SUCCESS, 'end_time': current_timestamp() } JobSaver.update_play_status(update_info) JobSaver.update_play(update_info) else: raise Exception(f'can not find play {play_id}') else: update_info = { 'job_id': job_id, 'status': JobStatus.SUCCESS, 'end_time': current_timestamp() } JobSaver.update_job(update_info) JobSaver.update_job_status(update_info) schedule_logger(job_id).info( f"job {job_id} finished, status is {update_info.get('status')}" ) if not TEST_MODE: plays = JobSaver.query_play(job_id=job_id, status=PlayStatus.SUCCESS) modules = [] module_names = [] for play in plays: module_name = play.f_roles.strip('[]').replace('_', '') module_names.append(module_name) modules.append({ 'name': module_name, 'ips': job_data.get('modules', {}).get(module_name, {}).get('ips', []), 'port': job_data.get('modules', {}).get(module_name, {}).get('port', None) }) # parties = PartyInfo.get_or_none(f_version=job_data.get('version'), f_party_id=job_data.get('party_id')) parties = PartyInfo.get_or_none( f_party_id=job_data.get('party_id')) if parties: module_mapping = dict(zip(module_names, modules)) stored_modules = parties.f_modules.get("data", []) name_map = {} for offset, item in enumerate(stored_modules): name_map[item.get('name')] = offset for key, value in module_mapping.items(): if key in name_map: schedule_logger(job_id).info( f"{key} in name map, in replace process") stored_modules[name_map[key]] = value else: schedule_logger(job_id).info( f"{key} not in name map, in append process ") stored_modules.append(value) # update_status = False # for offset, module_info in enumerate(stored_modules): # if module_info['name'] in module_mapping: # stored_modules[offset] = module_mapping[module_info['name']] # update_status = True for key in ['role', 'version']: # if parties[key] != job_data[key]: # parties[key] = job_data[key] if getattr(parties, f'f_{key}') != job_data[key]: setattr(parties, f'f_{key}', job_data[key]) # update_status = True # if update_status: parties.f_modules = {'data': stored_modules} parties.save() DB.commit() else: party_info = PartyInfo() # party_info.f_job_id = job_id party_info.f_role = job_data.get('role') party_info.f_version = job_data.get('version') party_info.f_party_id = job_data.get('party_id') party_info.f_modules = {'data': modules} party_info.save(force_insert=True)
def run_play(job_id, play_id, play_conf_path, play_hosts_path, test_mode=False, retry_mode=False): schedule_logger(job_id).info( f'Trying to start to run play with id: {play_id}') # task_process_start_status = False process_cmd = [ 'python3', sys.modules[PlayExecutor.__module__].__file__, '--job_id', job_id, '--play_id', play_id, '--conf_path', play_conf_path, '--hosts_path', play_hosts_path, ] if test_mode: process_cmd.append('--test') if retry_mode: process_cmd.append('--retry') schedule_logger(job_id).info( f"Trying to start job {job_id}, play {play_id} subprocess.") try: config_dir = file_utils.get_play_directory(play_id) std_dir = file_utils.get_job_log_directory(job_id) p = job_utils.run_subprocess(config_dir=config_dir, process_cmd=process_cmd, log_dir=std_dir) if p: # task_process_start_status = True play_info = { 'pid': p.pid, 'job_id': job_id, 'play_id': play_id, 'status': PlayStatus.RUNNING, 'start_time': current_timestamp() } JobSaver.update_play_status(play_info=play_info) JobSaver.update_play(play_info=play_info) p.wait() else: raise Exception(f'play {play_id} start subprocess failed') except Exception as e: play_info = { 'job_id': job_id, 'play_id': play_id, 'status': PlayStatus.FAILED, 'end_time': current_timestamp() } JobSaver.update_play_status(play_info) JobSaver.update_play(play_info) schedule_logger(job_id).exception(e) finally: if retry_mode: job_info = { 'job_id': job_id, 'play_id': play_id, 'end_time': current_timestamp(), 'status': JobStatus.SUCCESS if PlayController.check_job_status( job_id=job_id) else JobStatus.FAILED } JobSaver.update_job(job_info) JobSaver.update_job_status(job_info)
def v2_runner_on_failed(self, result, ignore_errors=False): # schedule_logger(self._job_id).error(f"Run on failed, result: {result._task._uuid}") # schedule_logger(self._job_id).error(f"Run on failed, details: {result.__dict__}") # stat_logger.info(f'>>>>>>>>>run on ok failed: {json.dumps(result._result, indent=4)}') schedule_logger(self._job_id).error( f"<TASK FAILED> Host {result._host} executes task {result._task._uuid} {result._task._attributes.get('name')} failed. Details: {json.dumps(result._result, indent=4)}" ) update_info = { 'job_id': self._job_id, 'play_id': self._play_id, 'task_id': result._task._uuid, 'end_time': current_timestamp(), 'status': TaskStatus.FAILED, } JobSaver.update_task(update_info) JobSaver.update_task_status(update_info) JobSaver.update_play(update_info) JobSaver.update_play_status(update_info) JobSaver.update_job(update_info) JobSaver.update_job_status(update_info) delegated_vars = result._result.get('_ansible_delegated_vars', None) self._clean_results(result._result, result._task.action) if self._last_task_banner != result._task._uuid: self._print_task_banner(result._task) self._handle_exception(result._result, use_stderr=self.display_failed_stderr) self._handle_warnings(result._result) if result._task.loop and 'results' in result._result: self._process_items(result) else: if delegated_vars: self._display.display( "fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR, stderr=self.display_failed_stderr) else: self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR, stderr=self.display_failed_stderr) if ignore_errors: self._display.display("...ignoring", color=C.COLOR_SKIP)
def v2_playbook_on_stats(self, stats): self._display.banner("PLAY RECAP") hosts = sorted(stats.processed.keys()) failed_count = 0 for h in hosts: t = stats.summarize(h) # stat_logger.info(f'>>>>>>>>PLAY RECAP, {t}') schedule_logger(self._job_id).info( f"in playbook on stats: RECAP: {t}, host: {h}") failed_count += t['failures'] failed_count += t['unreachable'] # self._display.display( # u"%s : %s %s %s %s %s %s %s" % ( # hostcolor(h, t), # colorize(u'ok', t['ok'], C.COLOR_OK), # colorize(u'changed', t['changed'], C.COLOR_CHANGED), # colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE), # colorize(u'failed', t['failures'], C.COLOR_ERROR), # colorize(u'skipped', t['skipped'], C.COLOR_SKIP), # colorize(u'rescued', t['rescued'], C.COLOR_OK), # colorize(u'ignored', t['ignored'], C.COLOR_WARN), # ), # screen_only=True # ) self._display.display(u"%s : %s %s %s %s %s %s %s" % ( hostcolor(h, t, False), colorize(u'ok', t['ok'], None), colorize(u'changed', t['changed'], None), colorize(u'unreachable', t['unreachable'], None), colorize(u'failed', t['failures'], None), colorize(u'skipped', t['skipped'], None), colorize(u'rescued', t['rescued'], None), colorize(u'ignored', t['ignored'], None), ), log_only=True) if not failed_count: update_info = { 'job_id': self._job_id, 'play_id': self._play_id, 'end_time': current_timestamp(), 'status': PlayStatus.SUCCESS } JobSaver.update_play(update_info) JobSaver.update_play_status(update_info) self._display.display("", screen_only=True) # print custom stats if required if stats.custom and self.show_custom_stats: self._display.banner("CUSTOM STATS: ") # per host # TODO: come up with 'pretty format' for k in sorted(stats.custom.keys()): if k == '_run': continue self._display.display( '\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace( '\n', ''))) # print per run custom stats if '_run' in stats.custom: self._display.display("", screen_only=True) self._display.display('\tRUN: %s' % self._dump_results( stats.custom['_run'], indent=1).replace('\n', '')) self._display.display("", screen_only=True) if context.CLIARGS['check'] and self.check_mode_markers: self._display.banner("DRY RUN")