def initialize_plays(job_id, job_data): play_conf_path = OrderedDict() adapter = job_utils.get_adapter(job_data.get('version')) try: schedule_logger(job_id).info('Start initializing plays...') stat_logger.info('Start initializing plays...') play_conf_dict = adapter.generate_play_conf(job_id=job_id, job_data=job_data) for play_id, conf in play_conf_dict.items(): schedule_logger(job_id).info( f'Start create and save play conf, play id: {play_id}') schedule_logger(job_id).info( f'Start create and save play conf, play id: {play_id}') path_dict = file_utils.save_play_conf(job_id=job_id, play_id=play_id, play_conf=conf['yml'], play_hosts=conf['hosts']) play_conf_path.update(path_dict) PlayController.create_play(job_id=job_id, play_id=play_id, play_conf=conf['yml'], play_hosts=conf['hosts']) schedule_logger(job_id).info( f'Initializing play successfully, play id: {play_id}') except Exception as e: stat_logger.exception(e) schedule_logger(job_id).exception(e) return {} else: return play_conf_path
def v2_runner_on_skipped(self, result): # stat_logger.info(f'>>>>>>>>>in runner on skipped: {json.dumps(result._result, indent=4)}') schedule_logger(self._job_id).info( f"<TASK SKIPPED> Host {result._host} executes task {result._task._uuid} {result._task._attributes.get('name')} skipped. Details: {json.dumps(result._result, indent=4)}" ) update_info = { 'job_id': self._job_id, 'play_id': self._play_id, 'task_id': result._task._uuid, 'status': TaskStatus.SKIPPED, 'end_time': current_timestamp(), 'host': result._host } JobSaver.update_task(update_info) JobSaver.update_task_status(update_info) # schedule_logger(self._job_id).info(f'>>>>>>>>>in runner on skipped: {json.dumps(result._result, indent=4)}') if self.display_skipped_hosts: self._clean_results(result._result, result._task.action) if self._last_task_banner != result._task._uuid: self._print_task_banner(result._task) if result._task.loop and 'results' in result._result: self._process_items(result) else: msg = "skipping: [%s]" % result._host.get_name() if self._run_is_verbose(result): msg += " => %s" % self._dump_results(result._result) self._display.display(msg, color=C.COLOR_SKIP)
def v2_runner_item_on_ok(self, result): schedule_logger(self._job_id).info( f'>>>>>>>>>>>>>>>> in runner item on ok: {json.dumps(result._result, indent=4)}' ) delegated_vars = result._result.get('_ansible_delegated_vars', None) if isinstance(result._task, TaskInclude): return elif result._result.get('changed', False): if self._last_task_banner != result._task._uuid: self._print_task_banner(result._task) msg = 'changed' color = C.COLOR_CHANGED else: if not self.display_ok_hosts: return if self._last_task_banner != result._task._uuid: self._print_task_banner(result._task) msg = 'ok' color = C.COLOR_OK if delegated_vars: msg += ": [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg += ": [%s]" % result._host.get_name() msg += " => (item=%s)" % (self._get_item_label(result._result), ) self._clean_results(result._result, result._task.action) if self._run_is_verbose(result): msg += " => %s" % self._dump_results(result._result) self._display.display(msg, color=color)
def submit_job(cls, job_data, job_id=None): try: if not job_id: job_id = job_utils.generate_job_id() stat_logger.info(f'Trying submit job, job_id {job_id}, body {job_data}') schedule_logger(job_id).info(f'Trying submit job, job_id {job_id}, body {job_data}') job_utils.check_job_conf(job_data) # {'job_conf_path': 'xxx', 'job_runtime_conf_path': 'xxx'} job_conf_path = file_utils.save_job_conf(job_id=job_id, job_data=job_data) job_info = { 'job_id': job_id, 'job_conf': job_data, 'status': JobStatus.WAITING } JobSaver.create_job(job_info=job_info) RuntimeConfig.JOB_QUEUE.put_event() schedule_logger(job_id).info(f"submit job successfully, job id is {job_id}") stat_logger.info(f"submit job successfully, job id is {job_id}") except Exception: stat_logger.error(f"Submit job fail, details: {traceback.format_exc()}") return {}, {} else: return job_id, job_conf_path
def v2_playbook_on_start(self, playbook): schedule_logger(self._job_id).info( f"in playbook on start, playbook: {playbook._file_name}") if self._display.verbosity > 1: from os.path import basename self._display.banner("PLAYBOOK: %s" % basename(playbook._file_name)) # show CLI arguments if self._display.verbosity > 3: if context.CLIARGS.get('args'): self._display.display('Positional arguments: %s' % ' '.join(context.CLIARGS['args']), color=C.COLOR_VERBOSE, screen_only=True) for argument in (a for a in context.CLIARGS if a != 'args'): val = context.CLIARGS[argument] if val: self._display.display('%s: %s' % (argument, val), color=C.COLOR_VERBOSE, screen_only=True) if context.CLIARGS['check'] and self.check_mode_markers: self._display.banner("DRY RUN")
def v2_playbook_on_include(self, included_file): msg = 'included: %s for %s' % (included_file._filename, ", ".join( [h.name for h in included_file._hosts])) schedule_logger( self._job_id).info(f"in v2 playbook on include, msg: {msg}") if 'item' in included_file._args: msg += " => (item=%s)" % (self._get_item_label( included_file._args), ) self._display.display(msg, color=C.COLOR_SKIP)
def update_job(cls, job_info): schedule_logger(job_info["job_id"]).info( f'Start to update info of job {job_info["job_id"]}') update_status = cls.update_entity_table(entity_model=Job, entity_info=job_info) schedule_logger(job_info["job_id"]).info( f'Update info of job {job_info["job_id"]} {"success" if update_status else "failed"}' ) return update_status
def update_task(cls, task_info): schedule_logger(task_info["job_id"]).info( f'Start to update info of task {task_info["task_id"]}') update_status = cls.update_entity_table(entity_model=Task, entity_info=task_info) schedule_logger(task_info["job_id"]).info( f'Update info of task {task_info["task_id"]} {"success" if update_status else "failed"}' ) return update_status
def update_play(cls, play_info): schedule_logger(play_info["job_id"]).info( f'Start to update info of play {play_info["play_id"]}, play_info: {play_info}' ) update_status = cls.update_entity_table(entity_model=Play, entity_info=play_info) schedule_logger(play_info["job_id"]).info( f'Update info of play {play_info["play_id"]} {"success" if update_status else "failed"}' ) return update_status
def update_job_status(cls, job_info): schedule_logger(job_info["job_id"]).info( f'Start to update status of job {job_info["job_id"]} to <{job_info["status"]}>' ) update_status = cls.update_status(entity_model=Job, entity_info=job_info) schedule_logger(job_info["job_id"]).info( f'Update status of job {job_info["job_id"]} to <{job_info["status"]}> {"success" if update_status else "failed"}' ) return update_status
def update_play_status(cls, play_info): schedule_logger(play_info["job_id"]).info( f'Start to update status of play {play_info["play_id"]} to <{play_info["status"]}>' ) update_status = cls.update_status(entity_model=Play, entity_info=play_info) schedule_logger(play_info["job_id"]).info( f'Update status of play {play_info["play_id"]} to <{play_info["status"]}> {"success" if update_status else "failed"}' ) return update_status
def update_task_status(cls, task_info): schedule_logger(task_info["job_id"]).info( f'Start to update status of task {task_info["task_id"]} to <{task_info["status"]}>' ) update_status = cls.update_status(entity_model=Task, entity_info=task_info) schedule_logger(task_info["job_id"]).info( f'Update status of task {task_info["task_id"]} to <{task_info["status"]}> {"success" if update_status else "failed"}' ) return update_status
def v2_runner_on_failed(self, result, ignore_errors=False): # schedule_logger(self._job_id).error(f"Run on failed, result: {result._task._uuid}") # schedule_logger(self._job_id).error(f"Run on failed, details: {result.__dict__}") # stat_logger.info(f'>>>>>>>>>run on ok failed: {json.dumps(result._result, indent=4)}') schedule_logger(self._job_id).error( f"<TASK FAILED> Host {result._host} executes task {result._task._uuid} {result._task._attributes.get('name')} failed. Details: {json.dumps(result._result, indent=4)}" ) update_info = { 'job_id': self._job_id, 'play_id': self._play_id, 'task_id': result._task._uuid, 'end_time': current_timestamp(), 'status': TaskStatus.FAILED, } JobSaver.update_task(update_info) JobSaver.update_task_status(update_info) JobSaver.update_play(update_info) JobSaver.update_play_status(update_info) JobSaver.update_job(update_info) JobSaver.update_job_status(update_info) delegated_vars = result._result.get('_ansible_delegated_vars', None) self._clean_results(result._result, result._task.action) if self._last_task_banner != result._task._uuid: self._print_task_banner(result._task) self._handle_exception(result._result, use_stderr=self.display_failed_stderr) self._handle_warnings(result._result) if result._task.loop and 'results' in result._result: self._process_items(result) else: if delegated_vars: self._display.display( "fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR, stderr=self.display_failed_stderr) else: self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR, stderr=self.display_failed_stderr) if ignore_errors: self._display.display("...ignoring", color=C.COLOR_SKIP)
def v2_playbook_on_no_hosts_matched(self): schedule_logger( self._job_id).info("<No host matched> Skipping: no hosts matched.") update_info = { 'job_id': self._job_id, 'play_id': self._play_id, 'end_time': current_timestamp(), 'status': PlayStatus.FAILED } JobSaver.update_play(update_info) JobSaver.update_play_status(update_info) JobSaver.update_job(update_info) JobSaver.update_job_status(update_info) self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP)
def v2_playbook_on_play_start(self, play): # TODO what if there is no play name = play.get_name().strip() # stat_logger.info(f"<<****<<**>>*****>> in playbook on play start, customise play_id: {self._play_id}") schedule_logger(self._job_id).info( f"<PLAY START> Play id: {self._play_id}, play name: {name}") try: play_info = { 'job_id': self._job_id, 'play_id': self._play_id, 'play_name': name, 'roles': str(play.get_roles()), } schedule_logger(self._job_id).info( f"play details: {json.dumps(play_info, indent=4)}") JobSaver.update_play(play_info) except Exception as e: schedule_logger(self._job_id).warning( f"In v2_playbook_on_play_start: {traceback.format_exc()}") schedule_logger(self._job_id).info( f"<<****<<**>>*****>> tasks name: uuid list: {play.get_tasks()}, roles: {play.get_roles()}, " ) if play.check_mode and self.check_mode_markers: checkmsg = " [CHECK MODE]" else: checkmsg = "" if not name: msg = u"PLAY%s" % checkmsg else: msg = u"PLAY [%s]%s" % (name, checkmsg) self._play = play self._display.banner(msg)
def v2_runner_item_on_skipped(self, result): # stat_logger.info(f'>>>>>>>>>>>>>>>> in runner item on skipped: {json.dumps(result._result, indent=4)}') schedule_logger(self._job_id).info( f'>>>>>>>>>>>>>>>> in runner item on skipped: {json.dumps(result._result, indent=4)}' ) if self.display_skipped_hosts: if self._last_task_banner != result._task._uuid: self._print_task_banner(result._task) self._clean_results(result._result, result._task.action) msg = "skipping: [%s] => (item=%s) " % ( result._host.get_name(), self._get_item_label(result._result)) if self._run_is_verbose(result): msg += " => %s" % self._dump_results(result._result) self._display.display(msg, color=C.COLOR_SKIP)
def _task_start(self, task, prefix=None): # Create task from this function # schedule_logger(self._job_id).info(f"<CREATING TASK> Trying to create task. Task id: {task._uuid}, task name: {task._attributes.get('name')}, prefix: {prefix}") schedule_logger(self._job_id).info( f"<CREATING TASK> Trying to create task. Task id: {task._uuid}, task name: {task._attributes.get('name')}, module: {task._role}." ) if self._retry: tasks = JobSaver.query_task(play_id=self._play_id) if tasks: pass task_info = { 'job_id': self._job_id, 'play_id': self._play_id, 'task_id': task._uuid, 'task_name': task._attributes.get('name'), 'status': TaskStatus.WAITING, 'create_time': current_timestamp(), 'role': task._role, } JobSaver.create_task(task_info) schedule_logger(self._job_id).info( f"create task with id {task._uuid} successfully") # try: # schedule_logger(self._job_id).info(f"task role: {task._role}") # except Exception: # pass # stat_logger.info(f"<<<<<>>>>> in _task_start, task name: {task._attributes.get('name')}, task id: {task._uuid}") # Cache output prefix for task if provided # This is needed to properly display 'RUNNING HANDLER' and similar # when hiding skipped/ok task results if prefix is not None: self._task_type_cache[task._uuid] = prefix # Preserve task name, as all vars may not be available for templating # when we need it later if self._play.strategy == 'free': # Explicitly set to None for strategy 'free' to account for any cached # task title from a previous non-free play self._last_task_name = None else: self._last_task_name = task.get_name().strip() # Display the task banner immediately if we're not doing any filtering based on task result if self.display_skipped_hosts and self.display_ok_hosts: self._print_task_banner(task)
def kill_play_process_execution(play: Play): try: if play.f_pid: schedule_logger(play.f_job_id).info( f"try to stop play {play.f_play_id} of job {play.f_job_id}, pid: {play.f_pid}" ) pid = int(play.f_pid) if not is_play_executor_process(play): return False if check_job_process(pid): p = psutil.Process(pid) p.kill() kill_status = True else: schedule_logger( play.f_job_id).info(f'pid {play.f_pid} not exists') kill_status = True else: if NoneKillStatus.contains(play.f_status): kill_status = True else: kill_status = False schedule_logger(play.f_job_id).info( f"stop play {play.f_play_id} of job {play.f_job_id} {'success' if kill_status else 'failed'}" ) return kill_status except Exception as e: raise e
def save_play_conf(job_id, play_id, play_conf, play_hosts) -> dict: try: # return {'play_id': {'conf_path': 'xxx', 'hosts_path': 'xxx'}} stat_logger.info(f"in save play conf func, play id: {play_id}") stat_logger.info(f"in save play conf func, play conf: {play_conf}") stat_logger.info(f"in save play conf func, play hosts: {play_hosts}") schedule_logger(job_id).info( f'Saving play {play_id} conf file and hosts file...') stat_logger.info(f'Saving play {play_id} conf file and hosts file...') play_conf_path = get_play_conf_path(play_id) os.makedirs(os.path.dirname(play_conf_path.get('conf_path')), exist_ok=True) with open(play_conf_path.get('conf_path'), 'w') as conf_fp: yaml.dump(play_conf, conf_fp, Dumper=yaml.RoundTripDumper) schedule_logger(job_id).info( f"Saving play {play_id} conf file success, file path {play_conf_path.get('conf_path')}" ) with open(play_conf_path.get('hosts_path'), 'w') as hosts_fp: hosts_fp.write(play_hosts) schedule_logger(job_id).info( f"Saving play {play_id} hosts file success, file path {play_conf_path.get('hosts_path')}" ) stat_logger.info( f"Saving play {play_id} hosts file success, file path {play_conf_path.get('hosts_path')}" ) return { play_id: { "conf_path": play_conf_path.get('conf_path'), "hosts_path": play_conf_path.get('hosts_path') } } except Exception: stat_logger.error(traceback.format_exc()) raise
def stop_play(job_id, play_id, status=PlayStatus.CANCELED): plays = JobSaver.query_play(play_id=play_id) if plays: play = plays[0] kill_status = job_utils.kill_play_process_execution(play) if kill_status: if OngoingStatus.contains(play.f_status): play_info = { 'job_id': job_id, 'play_id': play_id, 'end_time': current_timestamp(), 'status': status, } JobSaver.update_play_status(play_info) if not StandbyStatus.contains(play.f_status): JobSaver.update_play(play_info) return True else: return False else: schedule_logger(job_id).info(f"cannot find and kill process of play {play_id}") return False
def v2_runner_on_unreachable(self, result): try: schedule_logger(self._job_id).error( f"<HOST UNREACHABLE> Host {result._host} executes task {result._task._uuid} {result._task._attributes.get('name')} unreachable. \nDetails: {json.dumps(result._result, indent=4)}" ) # schedule_logger(self._job_id).info(f'>>>>>>>>>in runner on unreachable: {json.dumps(result._result, indent=4)}') # stat_logger.info(f'>>>>>>>>>in runner on unreachable: {json.dumps(result._result, indent=4)}') update_info = { 'job_id': self._job_id, 'play_id': self._play_id, 'task_id': result._task._uuid, 'end_time': current_timestamp(), 'status': TaskStatus.FAILED, } JobSaver.update_task(update_info) JobSaver.update_task_status(update_info) JobSaver.update_play(update_info) JobSaver.update_play_status(update_info) except Exception: schedule_logger(self._job_id).warning( f"In v2_runner_on_unreachable, details: {traceback.format_exc()}" ) if self._last_task_banner != result._task._uuid: self._print_task_banner(result._task) delegated_vars = result._result.get('_ansible_delegated_vars', None) if delegated_vars: msg = "fatal: [%s -> %s]: UNREACHABLE! => %s" % ( result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)) else: msg = "fatal: [%s]: UNREACHABLE! => %s" % ( result._host.get_name(), self._dump_results(result._result)) self._display.display(msg, color=C.COLOR_UNREACHABLE, stderr=self.display_failed_stderr)
def v2_runner_on_start(self, host, task): # schedule_logger(self._job_id).info(f"in v2 runner on start, host: {host}, task: {task}, task id: {task._uuid}") schedule_logger(self._job_id).info( f"<START TASK> Starting task. Host: {host}. task id: {task._uuid}, task name: {task._attributes.get('name')}" ) update_info = { 'job_id': self._job_id, 'play_id': self._play_id, 'task_id': task._uuid, 'task_name': task._attributes.get('name'), 'status': TaskStatus.RUNNING, 'start_time': current_timestamp(), 'role': task._role, 'host': host } JobSaver.update_task(update_info) JobSaver.update_task_status(update_info) # # schedule_logger(self._job_id).info(f"<TASK ON START> host: {host}, task id: {task._uuid}, task name: {task._attribute.get('name')}") # schedule_logger(self._job_id).info(f"<TASK ON START> host: {host}, task id: {task._uuid}, task name: {task._attribute.get('name')}") if self.get_option('show_per_host_start'): self._display.display(" [started %s on %s]" % (task, host), color=C.COLOR_OK)
def v2_runner_item_on_failed(self, result): # stat_logger.info(f'>>>>>>>>>>>>>>>> in runner item on failed: {json.dumps(result._result, indent=4)}') schedule_logger(self._job_id).info( f'>>>>>>>>>>>>>>>> in runner item on failed: {json.dumps(result._result, indent=4)}' ) if self._last_task_banner != result._task._uuid: self._print_task_banner(result._task) delegated_vars = result._result.get('_ansible_delegated_vars', None) self._clean_results(result._result, result._task.action) self._handle_exception(result._result) msg = "failed: " if delegated_vars: msg += "[%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg += "[%s]" % (result._host.get_name()) self._handle_warnings(result._result) self._display.display(msg + " (item=%s) => %s" % (self._get_item_label( result._result), self._dump_results(result._result)), color=C.COLOR_ERROR)
def run_do(self): try: running_plays = JobSaver.query_play(status='running') stop_job_ids = set() for play in running_plays: try: process_exist = job_utils.check_job_process(int( play.f_pid)) if not process_exist: detect_logger.info( 'job {} play {} process does not exist'.format( play.f_job_id, play.f_pid)) stop_job_ids.add(play.f_job_id) detect_logger.info( f'start to stop play {play.f_play_id}') JobController.stop_play(job_id=play.f_job_id, play_id=play.f_play_id) except Exception as e: detect_logger.exception(e) # ready_plays = JobSaver.query_play(status='ready') # for play in ready_plays: # try: if stop_job_ids: schedule_logger().info( 'start to stop jobs: {}'.format(stop_job_ids)) for job_id in stop_job_ids: jobs = JobSaver.query_job(job_id=job_id) if jobs: if not EndStatus.contains(jobs[0].f_status): JobController.stop_job(job_id=job_id) except Exception as e: detect_logger.exception(e) finally: detect_logger.info('finish detect running job')
def run(self): if not self.queue.is_ready(): schedule_logger().error('queue is not ready') return False all_jobs = [] while True: try: schedule_logger().info("Starting in queue detecting loop...") if len(all_jobs) == self.concurrent_num: for future in as_completed(all_jobs): all_jobs.remove(future) break stat_logger.info("Trying get event...") job_event = self.queue.get_event() stat_logger.info("get event success") schedule_logger(job_event['job_id']).info( 'schedule job {}'.format(job_event)) future = self.job_executor_pool.submit( JobScheduler.handle_event, job_event['job_id']) future.add_done_callback(JobScheduler.get_result) all_jobs.append(future) except Exception as e: schedule_logger().exception(e)
def run_play(): parser = argparse.ArgumentParser() parser.add_argument('--job_id', required=True, type=str, help='job id') parser.add_argument('--play_id', required=True, type=str, help='play id') parser.add_argument('--conf_path', required=True, type=str, help='play conf path') parser.add_argument('--hosts_path', required=True, type=str, help='play hosts file path') parser.add_argument('--test', required=False, action='store_true', help='test mode') parser.add_argument('--retry', required=False, action='store_true', help='retry mode') args = parser.parse_args() schedule_logger(args.job_id).info('enter play executor process') schedule_logger(args.job_id).info(args) play_args = ['ansible-playbook', '-i', args.hosts_path, args.conf_path] if args.test: play_args.append('-C') try: play = PlayBook(args=play_args) play.run_play(play_id=args.play_id, retry=args.retry) except Exception as e: schedule_logger().exception(e) raise
def run_job(job_id): job_data = job_utils.get_job_configuration(job_id=job_id) stat_logger.info( f"in play controller run job func, get job data: {json.dumps(job_data, indent=4)}" ) schedule_logger(job_id).info( f"in play controller, func run job: {json.dumps(job_data, indent=4)}" ) play_conf_path_dict = PlayController.initialize_plays( job_id=job_id, job_data=job_data) stat_logger.info( f"in play controller run job func after initialize play\n get play conf path dict: {play_conf_path_dict}" ) # TODO get package dir by version package_dir = get_package_dir_by_version(job_data.get('version')) if not os.path.exists(package_dir) and not os.path.isdir(package_dir): raise Exception( f'Local package directory {package_dir} not exists.') job_info = { 'job_id': job_id, 'status': JobStatus.RUNNING, 'start_time': current_timestamp() } JobSaver.update_job_status(job_info) JobSaver.update_job(job_info) for play_id, conf_dict in play_conf_path_dict.items(): conf_dict['conf_path'] = shutil.copy2(src=conf_dict['conf_path'], dst=package_dir) PlayController.run_play( job_id=job_id, play_id=play_id, play_conf_path=conf_dict.get('conf_path'), play_hosts_path=conf_dict.get('hosts_path'), test_mode=TEST_MODE) if os.path.exists(conf_dict['conf_path']): os.remove(conf_dict['conf_path']) plays = JobSaver.query_play(play_id=play_id) if plays: play = plays[0] status = play.f_status if status != PlayStatus.SUCCESS: if status in [ PlayStatus.CANCELED, PlayStatus.FAILED, PlayStatus.TIMEOUT ]: update_info = { 'job_id': job_id, 'play_id': play_id, 'status': status, 'end_time': current_timestamp() } JobSaver.update_play_status(update_info) JobSaver.update_play(update_info) JobSaver.update_job_status(update_info) JobSaver.update_job(update_info) else: update_info = { 'job_id': job_id, 'play_id': play_id, 'status': PlayStatus.FAILED, 'end_time': current_timestamp() } schedule_logger(job_id).error( f'Unexpected error occured on play {play_id}, job {job_id} failed, previous status of play: {play.f_status}' ) stat_logger.error( f'Unexpected error occured on play {play_id}, job {job_id} failed, previous status of play: {play.f_status}' ) JobSaver.update_play_status(update_info) JobSaver.update_play(update_info) JobSaver.update_job_status(update_info) JobSaver.update_job(update_info) schedule_logger(job_id).info( f"job {job_id} finished, status is {update_info.get('status')}" ) break else: update_info = { 'job_id': job_id, 'play_id': play_id, 'status': PlayStatus.SUCCESS, 'end_time': current_timestamp() } JobSaver.update_play_status(update_info) JobSaver.update_play(update_info) else: raise Exception(f'can not find play {play_id}') else: update_info = { 'job_id': job_id, 'status': JobStatus.SUCCESS, 'end_time': current_timestamp() } JobSaver.update_job(update_info) JobSaver.update_job_status(update_info) schedule_logger(job_id).info( f"job {job_id} finished, status is {update_info.get('status')}" ) if not TEST_MODE: plays = JobSaver.query_play(job_id=job_id, status=PlayStatus.SUCCESS) modules = [] module_names = [] for play in plays: module_name = play.f_roles.strip('[]').replace('_', '') module_names.append(module_name) modules.append({ 'name': module_name, 'ips': job_data.get('modules', {}).get(module_name, {}).get('ips', []), 'port': job_data.get('modules', {}).get(module_name, {}).get('port', None) }) # parties = PartyInfo.get_or_none(f_version=job_data.get('version'), f_party_id=job_data.get('party_id')) parties = PartyInfo.get_or_none( f_party_id=job_data.get('party_id')) if parties: module_mapping = dict(zip(module_names, modules)) stored_modules = parties.f_modules.get("data", []) name_map = {} for offset, item in enumerate(stored_modules): name_map[item.get('name')] = offset for key, value in module_mapping.items(): if key in name_map: schedule_logger(job_id).info( f"{key} in name map, in replace process") stored_modules[name_map[key]] = value else: schedule_logger(job_id).info( f"{key} not in name map, in append process ") stored_modules.append(value) # update_status = False # for offset, module_info in enumerate(stored_modules): # if module_info['name'] in module_mapping: # stored_modules[offset] = module_mapping[module_info['name']] # update_status = True for key in ['role', 'version']: # if parties[key] != job_data[key]: # parties[key] = job_data[key] if getattr(parties, f'f_{key}') != job_data[key]: setattr(parties, f'f_{key}', job_data[key]) # update_status = True # if update_status: parties.f_modules = {'data': stored_modules} parties.save() DB.commit() else: party_info = PartyInfo() # party_info.f_job_id = job_id party_info.f_role = job_data.get('role') party_info.f_version = job_data.get('version') party_info.f_party_id = job_data.get('party_id') party_info.f_modules = {'data': modules} party_info.save(force_insert=True)
def run_play(job_id, play_id, play_conf_path, play_hosts_path, test_mode=False, retry_mode=False): schedule_logger(job_id).info( f'Trying to start to run play with id: {play_id}') # task_process_start_status = False process_cmd = [ 'python3', sys.modules[PlayExecutor.__module__].__file__, '--job_id', job_id, '--play_id', play_id, '--conf_path', play_conf_path, '--hosts_path', play_hosts_path, ] if test_mode: process_cmd.append('--test') if retry_mode: process_cmd.append('--retry') schedule_logger(job_id).info( f"Trying to start job {job_id}, play {play_id} subprocess.") try: config_dir = file_utils.get_play_directory(play_id) std_dir = file_utils.get_job_log_directory(job_id) p = job_utils.run_subprocess(config_dir=config_dir, process_cmd=process_cmd, log_dir=std_dir) if p: # task_process_start_status = True play_info = { 'pid': p.pid, 'job_id': job_id, 'play_id': play_id, 'status': PlayStatus.RUNNING, 'start_time': current_timestamp() } JobSaver.update_play_status(play_info=play_info) JobSaver.update_play(play_info=play_info) p.wait() else: raise Exception(f'play {play_id} start subprocess failed') except Exception as e: play_info = { 'job_id': job_id, 'play_id': play_id, 'status': PlayStatus.FAILED, 'end_time': current_timestamp() } JobSaver.update_play_status(play_info) JobSaver.update_play(play_info) schedule_logger(job_id).exception(e) finally: if retry_mode: job_info = { 'job_id': job_id, 'play_id': play_id, 'end_time': current_timestamp(), 'status': JobStatus.SUCCESS if PlayController.check_job_status( job_id=job_id) else JobStatus.FAILED } JobSaver.update_job(job_info) JobSaver.update_job_status(job_info)
def create_task(cls, task_info): schedule_logger(task_info['job_id']).info( f"Trying to create task {task_info['task_id']}...") cls.create_job_family_entity(Task, task_info) schedule_logger(task_info['job_id']).info( f"Creating task {task_info['task_id']} successfully.")
def create_play(cls, play_info): schedule_logger(play_info['job_id']).info( f"Trying to create play {play_info['play_id']}...") cls.create_job_family_entity(Play, play_info) schedule_logger(play_info['job_id']).info( f"Creating play {play_info['play_id']} successfully.")