def initialize_plays(job_id, job_data): play_conf_path = OrderedDict() adapter = job_utils.get_adapter(job_data.get('version')) try: schedule_logger(job_id).info('Start initializing plays...') stat_logger.info('Start initializing plays...') play_conf_dict = adapter.generate_play_conf(job_id=job_id, job_data=job_data) for play_id, conf in play_conf_dict.items(): schedule_logger(job_id).info( f'Start create and save play conf, play id: {play_id}') schedule_logger(job_id).info( f'Start create and save play conf, play id: {play_id}') path_dict = file_utils.save_play_conf(job_id=job_id, play_id=play_id, play_conf=conf['yml'], play_hosts=conf['hosts']) play_conf_path.update(path_dict) PlayController.create_play(job_id=job_id, play_id=play_id, play_conf=conf['yml'], play_hosts=conf['hosts']) schedule_logger(job_id).info( f'Initializing play successfully, play id: {play_id}') except Exception as e: stat_logger.exception(e) schedule_logger(job_id).exception(e) return {} else: return play_conf_path
def get_event(self): try: job = self.get(block=True) stat_logger.info('get event from queue successfully: {}'.format(job)) return job except Exception as e: stat_logger.error('get job from queue failed') stat_logger.exception(e) return None
def do_download(data): path = os.path.abspath(os.path.join(data.get('dir'), os.pardir, f'temp-{data["version"]}')) os.makedirs(path, exist_ok=True) fp = os.path.join(path, "package.tar.gz") url = data.get('url') p = Package() p.f_status = 'running' p.f_version = data.get('version') p.f_start_time = current_timestamp() p.save(force_insert=True) try: stat_logger.info('Start downloading process') with requests.get(url, stream=True) as req: with open(fp, 'wb') as f: for chunk in req.iter_content(chunk_size=1024*5): if chunk: f.write(chunk) except Exception as e: stat_logger.exception(e) else: end_time = current_timestamp() p.f_end_time = end_time p.f_elapsed = p.f_end_time - p.f_start_time p.f_status = 'success' tar = tarfile.open(fp) try: dir_name = tar.getmembers()[0].name tar.extractall(path=path) stat_logger.info(f"rename: src: {os.path.join(path, dir_name)}") dst = data.get('dir') stat_logger.info(f"rename: dst: {dst}") os.rename(src=os.path.join(path, dir_name), dst=dst) shutil.rmtree(path=path) except Exception as e: stat_logger.exception(e) p.f_status = 'failed' finally: tar.close() p.save() DB.commit()
def retry_play(job_id, play_id, test_mode=False): plays = JobSaver.query_play(play_id=play_id) if not plays: return 100, f"Retry play {play_id} failed, can not find such play in database." # copy play conf into package dir play_conf_path_dict = file_utils.get_play_conf_path(play_id) with open(file_utils.get_job_conf_path(job_id), 'r') as f: job_conf = json.loads(f.read()) package_dir = get_package_dir_by_version(job_conf.get('version')) play_conf_path_dict['conf_path'] = shutil.copy2( src=play_conf_path_dict['conf_path'], dst=package_dir) update_info = { 'job_id': job_id, 'play_id': play_id, 'status': PlayStatus.WAITING, } JobSaver.update_play_status(update_info) JobSaver.update_job_status(update_info) # clean task records JobSaver.clean_task(play_id=play_id) # execute run_play method try: play_retry_executor_pool.submit( PlayController.run_play, job_id=job_id, play_id=play_id, play_conf_path=play_conf_path_dict['conf_path'], play_hosts_path=play_conf_path_dict['hosts_path'], test_mode=test_mode, retry_mode=True) return 0, f"Start retrying play {play_id}" except Exception as e: stat_logger.exception(e) return 100, f"Retry play {play_id} failed, details: {str(e)}"
def internal_server_error(e): stat_logger.exception(e) return get_json_result(retcode=100, retmsg=str(e))