コード例 #1
0
ファイル: job_utils.py プロジェクト: cold-code/FATE-Cloud
def run_subprocess(config_dir, process_cmd, log_dir=None):
    stat_logger.info(f'Starting process command: {process_cmd}')
    stat_logger.info(' '.join(process_cmd))

    os.makedirs(config_dir, exist_ok=True)
    if log_dir:
        os.makedirs(log_dir, exist_ok=True)
    std_log = open(os.path.join(log_dir if log_dir else config_dir, 'std.log'),
                   'a')
    std_err_log = open(
        os.path.join(log_dir if log_dir else config_dir, 'std_err.log'), 'a')
    pid_path = os.path.join(config_dir, 'pid')

    if os.name == 'nt':
        startupinfo = subprocess.STARTUPINFO()
        startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
        startupinfo.wShowWindow = subprocess.SW_HIDE
    else:
        startupinfo = None
    p = subprocess.Popen(process_cmd,
                         stdout=std_log,
                         stderr=std_err_log,
                         startupinfo=startupinfo)
    with open(pid_path, 'w') as f:
        f.truncate()
        f.write(str(p.pid) + "\n")
        f.flush()
    return p
コード例 #2
0
ファイル: job_queue.py プロジェクト: cold-code/FATE-Cloud
 def __init__(self):
     self.ready = True
     self.mutex = threading.Lock()
     self.not_empty = threading.Condition(self.mutex)
     self.not_full = threading.Condition(self.mutex)
     self.maxsize = 0
     stat_logger.info('init queue')
コード例 #3
0
    def initialize_plays(job_id, job_data):
        play_conf_path = OrderedDict()
        adapter = job_utils.get_adapter(job_data.get('version'))
        try:
            schedule_logger(job_id).info('Start initializing plays...')
            stat_logger.info('Start initializing plays...')

            play_conf_dict = adapter.generate_play_conf(job_id=job_id,
                                                        job_data=job_data)

            for play_id, conf in play_conf_dict.items():
                schedule_logger(job_id).info(
                    f'Start create and save play conf, play id: {play_id}')
                schedule_logger(job_id).info(
                    f'Start create and save play conf, play id: {play_id}')
                path_dict = file_utils.save_play_conf(job_id=job_id,
                                                      play_id=play_id,
                                                      play_conf=conf['yml'],
                                                      play_hosts=conf['hosts'])
                play_conf_path.update(path_dict)
                PlayController.create_play(job_id=job_id,
                                           play_id=play_id,
                                           play_conf=conf['yml'],
                                           play_hosts=conf['hosts'])
                schedule_logger(job_id).info(
                    f'Initializing play successfully, play id: {play_id}')
        except Exception as e:
            stat_logger.exception(e)
            schedule_logger(job_id).exception(e)
            return {}
        else:
            return play_conf_path
コード例 #4
0
    def submit_job(cls, job_data, job_id=None):
        try:
            if not job_id:
                job_id = job_utils.generate_job_id()
            stat_logger.info(f'Trying submit job, job_id {job_id}, body {job_data}')
            schedule_logger(job_id).info(f'Trying submit job, job_id {job_id}, body {job_data}')

            job_utils.check_job_conf(job_data)
            # {'job_conf_path': 'xxx', 'job_runtime_conf_path': 'xxx'}
            job_conf_path = file_utils.save_job_conf(job_id=job_id, job_data=job_data)

            job_info = {
                'job_id': job_id,
                'job_conf': job_data,
                'status': JobStatus.WAITING
            }
            JobSaver.create_job(job_info=job_info)
            RuntimeConfig.JOB_QUEUE.put_event()
            schedule_logger(job_id).info(f"submit job successfully, job id is {job_id}")
            stat_logger.info(f"submit job successfully, job id is {job_id}")
        except Exception:
            stat_logger.error(f"Submit job fail, details: {traceback.format_exc()}")
            return {}, {}
        else:
            return job_id, job_conf_path
コード例 #5
0
def save_play_conf(job_id, play_id, play_conf, play_hosts) -> dict:
    try:
        # return {'play_id': {'conf_path': 'xxx', 'hosts_path': 'xxx'}}
        stat_logger.info(f"in save play conf func, play id: {play_id}")
        stat_logger.info(f"in save play conf func, play conf: {play_conf}")
        stat_logger.info(f"in save play conf func, play hosts: {play_hosts}")
        schedule_logger(job_id).info(
            f'Saving play {play_id} conf file and hosts file...')
        stat_logger.info(f'Saving play {play_id} conf file and hosts file...')

        play_conf_path = get_play_conf_path(play_id)
        os.makedirs(os.path.dirname(play_conf_path.get('conf_path')),
                    exist_ok=True)
        with open(play_conf_path.get('conf_path'), 'w') as conf_fp:
            yaml.dump(play_conf, conf_fp, Dumper=yaml.RoundTripDumper)
        schedule_logger(job_id).info(
            f"Saving play {play_id} conf file success, file path {play_conf_path.get('conf_path')}"
        )
        with open(play_conf_path.get('hosts_path'), 'w') as hosts_fp:
            hosts_fp.write(play_hosts)
        schedule_logger(job_id).info(
            f"Saving play {play_id} hosts file success, file path {play_conf_path.get('hosts_path')}"
        )
        stat_logger.info(
            f"Saving play {play_id} hosts file success, file path {play_conf_path.get('hosts_path')}"
        )
        return {
            play_id: {
                "conf_path": play_conf_path.get('conf_path'),
                "hosts_path": play_conf_path.get('hosts_path')
            }
        }
    except Exception:
        stat_logger.error(traceback.format_exc())
        raise
コード例 #6
0
def save_job_conf(job_id, job_data):
    fp = os.path.join(get_job_directory(job_id), 'job_conf.json')
    stat_logger.info(f"in save job conf, file path: {fp}")
    stat_logger.info(
        f"in save job conf, job data: {json.dumps(job_data, indent=4)}")
    with open(fp, 'w') as f:
        f.write(json.dumps(job_data, indent=4))
    return fp
コード例 #7
0
ファイル: job_queue.py プロジェクト: cold-code/FATE-Cloud
 def get_event(self):
     try:
         job = self.get(block=True)
         stat_logger.info('get event from queue successfully: {}'.format(job))
         return job
     except Exception as e:
         stat_logger.error('get job from queue failed')
         stat_logger.exception(e)
         return None
コード例 #8
0
def get_play_conf_path(play_id):
    result = {
        'conf_path':
        os.path.join(get_play_directory(play_id), f'{play_id}_conf.yml'),
        'hosts_path':
        os.path.join(get_play_directory(play_id), f'{play_id}_hosts')
    }
    stat_logger.info(
        f"in get play conf path, path dict: {json.dumps(result, indent=4)}")
    return result
コード例 #9
0
ファイル: job_queue.py プロジェクト: cold-code/FATE-Cloud
 def lock(db, lock_name, timeout):
     sql = "SELECT GET_LOCK('%s', %s)" % (lock_name, timeout)
     stat_logger.info(f"lock mysql database, name of lock: {lock_name}")
     ret = db.execute_sql(sql).fetchone()
     if ret[0] == 0:
         raise Exception(f"mysql lock {lock_name} is already used")
     elif ret[0] == 1:
         return True
     else:
         raise Exception(f'unknown mysql lock {lock_name} error occurred!')
コード例 #10
0
ファイル: job_queue.py プロジェクト: cold-code/FATE-Cloud
 def unlock(db, lock_name):
     sql = "SELECT RELEASE_LOCK('%s')" % (lock_name)
     stat_logger.info('unlock mysql, lockname {}'.format(lock_name))
     cursor = db.execute_sql(sql)
     ret = cursor.fetchone()
     if ret[0] == 0:
         raise Exception('mysql lock {} is not released'.format(lock_name))
     elif ret[0] == 1:
         return True
     else:
         raise Exception('mysql lock {} did not exist.'.format(lock_name))
コード例 #11
0
ファイル: status_utils.py プロジェクト: cold-code/FATE-Cloud
def get_test_log(host, test_type, limit=LOG_LINE_LIMIT):
    shell_cmd = f'tail -{limit} {FATE_ROOT}/{test_type}_test.log'
    cmd = ANSIBLE_SHELL.format(host, shell_cmd)
    result = subprocess.getoutput(cmd)
    res_list = result.split('\n')
    if 'CHANGED' in res_list[0] or 'SUCCESS' in res_list[0]:
        stat_logger.info(
            f"execute query log of {test_type} test on host {host} successfully."
        )
        return 0, 'success', res_list[1:]
    stat_logger.error(
        f"execute query log of {test_type} test on host {host} failed. Details: {result}"
    )
    return 100, 'failed', []
コード例 #12
0
ファイル: status_utils.py プロジェクト: cold-code/FATE-Cloud
def run_test():
    parser = argparse.ArgumentParser()
    parser.add_argument('--host', required=True, type=str)
    parser.add_argument('--shell_cmd', required=True, type=str)
    parser.add_argument('--test_type', required=True, type=str)
    args = parser.parse_args()

    if not if_exists(args.host, f'{FATE_ROOT}/python/examples'):
        args.shell_cmd = args.shell_cmd.replace('/python/examples',
                                                '/examples')
    if if_exists(args.host, f'{FATE_ROOT}/init_env.sh'):
        shell_cmd = f"source {FATE_ROOT}/init_env.sh && " + args.shell_cmd
    else:
        shell_cmd = f"source {FATE_ROOT}/bin/init_env.sh && " + args.shell_cmd
    cmd = ANSIBLE_SHELL.format(args.host, shell_cmd)
    result = subprocess.getoutput(cmd)
    stat_logger.info(f'{args.test_type}_test response: {result}')
コード例 #13
0
ファイル: job_utils.py プロジェクト: cold-code/FATE-Cloud
def wait_child_process(signum, frame):
    try:
        while True:
            child_pid, status = os.waitpid(-1, os.WNOHANG)
            if child_pid == 0:
                stat_logger.info('no child process was immediately available')
                break
            exitcode = status >> 8
            stat_logger.info('child process %s exit with exitcode %s',
                             child_pid, exitcode)
    except OSError as e:
        if e.errno == errno.ECHILD:
            stat_logger.warning(
                'current process has no existing unwaited-for child processes.'
            )
        else:
            raise
コード例 #14
0
ファイル: status_utils.py プロジェクト: cold-code/FATE-Cloud
def distribute_pre_check_task(
        host: str,
        task_name: str,
        script_dir: str = '/data/projects/check') -> dict:
    stat_logger.info(
        f"Trying to distribute pre check task to host {host}, task: {task_name}"
    )
    allow_check = PRE_CHECK_ITEM
    if task_name not in allow_check:
        raise Exception(
            f"check task {task_name} not in allow check list, allow list: {allow_check}"
        )
    cmd = f'ansible all -i "{host}," -m shell -a "{script_dir}/{task_name}.sh"'
    result = subprocess.getoutput(cmd)
    res_list = result.split('\n')
    data = []
    if 'CHANGED' in res_list[0] or 'SUCCESS' in res_list[0]:
        stat_logger.info(f"executed pre check on host {host} successfully.")
        for item in res_list[1:]:
            item_list = item.split(': ')
            data.append(dict(zip(['name', 'status', 'details'], item_list)))
        return {'ip': host, 'list': data}
    stat_logger.info(
        f"executed pre check on host {host} failed. Details: {result}")
    return {}
コード例 #15
0
 def run(self):
     if not self.queue.is_ready():
         schedule_logger().error('queue is not ready')
         return False
     all_jobs = []
     while True:
         try:
             schedule_logger().info("Starting in queue detecting loop...")
             if len(all_jobs) == self.concurrent_num:
                 for future in as_completed(all_jobs):
                     all_jobs.remove(future)
                     break
             stat_logger.info("Trying get event...")
             job_event = self.queue.get_event()
             stat_logger.info("get event success")
             schedule_logger(job_event['job_id']).info(
                 'schedule job {}'.format(job_event))
             future = self.job_executor_pool.submit(
                 JobScheduler.handle_event, job_event['job_id'])
             future.add_done_callback(JobScheduler.get_result)
             all_jobs.append(future)
         except Exception as e:
             schedule_logger().exception(e)
コード例 #16
0
ファイル: status_utils.py プロジェクト: cold-code/FATE-Cloud
def distribute_pre_check_script(host: str,
                                dst_dir: str = '/data/projects',
                                mode: str = '0755') -> bool:
    stat_logger.info(
        f"Trying to distribute pre chcck scripts to host {host}, dst dir is: {dst_dir}"
    )
    tool_dir = os.path.abspath(
        os.path.join(os.path.dirname(__file__), os.pardir, 'tools'))
    cmd = f"ansible all -i '{host},' -m copy -a 'src={tool_dir}/check dest={dst_dir} mode={mode}'"
    result = subprocess.getoutput(cmd)
    stat_logger.info(f"subprocess result: {result}")
    stat_logger.info(
        f"Distribute pre chcck scripts to host {host} {'success' if 'CHANGED' in result or 'SUCCESS' in result else 'failed'}"
    )
    return 'CHANGED' in result or 'SUCCESS' in result
コード例 #17
0
ファイル: v1_4_0.py プロジェクト: cold-code/FATE-Cloud
    def generate_variable_files(self, job_id: str, count: str, version,
                                conf: dict):
        stat_logger.info(
            f"in generate var file func, conf: {json_dumps(conf, indent=4)}")

        result = []
        job_dir = file_utils.get_job_directory(job_id)
        var_dir = os.path.join(job_dir, f'var_files_{count}')
        role = conf.get('role')
        if role not in ["guest", "host"]:
            raise ValueError(f"role {role} is not supported currently.")
        shutil.copytree(src=self.get_var_files_dir(), dst=var_dir)
        with open(os.path.join(var_dir, f"fate_{role}"), "r") as fin:
            data = yaml.safe_load(fin.read())
        stat_logger.info(
            f"in generate var file func, original data: {json_dumps(data, indent=4)}"
        )

        data[role]['partyid'] = conf.get('party_id')

        travel_key = set()
        for key, value in conf.get('modules').items():
            if key in data[role]:
                travel_key.add(key)
                for item, item_value in value.items():
                    if item in data[role][key]:
                        data[role][key][item] = item_value

        modify_key = set(data.get(role).keys()) - travel_key
        modify_key.remove('partyid')
        if 'eggroll' in modify_key:
            modify_key.remove('eggroll')
        for key in modify_key:
            data.get(role).get(key)['enable'] = False

        stat_logger.info(
            f"in generate var file func, edited data: {json_dumps(data, indent=4)}"
        )
        with open(os.path.join(var_dir, f"fate_{role}"), "w") as fout:
            yaml.dump(data, fout, Dumper=yaml.RoundTripDumper)
        for root, dirs, files in os.walk(var_dir):
            result.extend(
                [os.path.join(os.path.abspath(root), name) for name in files])
        return result
コード例 #18
0
ファイル: package_app.py プロジェクト: cold-code/FATE-Cloud
def do_download(data):
    path = os.path.abspath(os.path.join(data.get('dir'), os.pardir, f'temp-{data["version"]}'))
    os.makedirs(path, exist_ok=True)
    fp = os.path.join(path, "package.tar.gz")
    url = data.get('url')

    p = Package()
    p.f_status = 'running'
    p.f_version = data.get('version')
    p.f_start_time = current_timestamp()
    p.save(force_insert=True)

    try:
        stat_logger.info('Start downloading process')
        with requests.get(url, stream=True) as req:
            with open(fp, 'wb') as f:
                for chunk in req.iter_content(chunk_size=1024*5):
                    if chunk:
                        f.write(chunk)
    except Exception as e:
        stat_logger.exception(e)
    else:
        end_time = current_timestamp()
        p.f_end_time = end_time
        p.f_elapsed = p.f_end_time - p.f_start_time
        p.f_status = 'success'

    tar = tarfile.open(fp)
    try:
        dir_name = tar.getmembers()[0].name
        tar.extractall(path=path)
        stat_logger.info(f"rename: src: {os.path.join(path, dir_name)}")
        dst = data.get('dir')

        stat_logger.info(f"rename: dst: {dst}")
        os.rename(src=os.path.join(path, dir_name), dst=dst)
        shutil.rmtree(path=path)
    except Exception as e:
        stat_logger.exception(e)
        p.f_status = 'failed'
    finally:
        tar.close()
        p.save()
        DB.commit()
コード例 #19
0
ファイル: status_utils.py プロジェクト: cold-code/FATE-Cloud
def distribute_status_check_task(
        host: str,
        module_name: str = None,
        supervisor_dir='/data/projects/common/supervisord') -> dict:
    if module_name:
        # TODO acquire by fate version
        allow_modules = [
            'fateflow', 'eggroll', 'clustermanager', 'rollsite', 'nodemanager',
            'fateboard', 'mysql'
        ]
        if module_name not in allow_modules:
            raise Exception(
                f"status check module {module_name} not in allow check list, allow list: {allow_modules}"
            )
    stat_logger.info(
        f"Trying to distribute status check task to host {host}, module: {module_name if module_name else 'all'}"
    )
    if module_name:
        cmd = f'ansible all -i "{host}," -m shell -a "sh {supervisor_dir}/service.sh status fate-{module_name}"'
    else:
        cmd = f'ansible all -i "{host}," -m shell -a "sh {supervisor_dir}/service.sh status all"'
    result = subprocess.getoutput(cmd)
    res_list = result.split('\n')
    data = []
    if 'CHANGED' in res_list[0] or 'SUCCESS' in res_list[0]:
        stat_logger.info(f"executed status check on host {host} successfully.")
        for item in res_list[1:]:
            item_list = item.split()
            data.append(
                dict(
                    zip(['name', 'status', 'uptime'], [
                        item_list[0].replace('fate-', ''),
                        item_list[1].lower(), ' '.join(item_list[2:])
                    ])))
        return {'ip': host, 'list': data}
    stat_logger.info(
        f"executed status check on host {host} failed. Details: {result}")
    return {}
コード例 #20
0
ファイル: db_models.py プロジェクト: cold-code/FATE-Cloud
 def __init__(self):
     database_config = DATABASE.copy()
     db_name = database_config.pop("name")
     self.database_connection = PooledMySQLDatabase(db_name, **database_config)
     stat_logger.info('init mysql database on cluster mode successfully')
コード例 #21
0
ファイル: v1_4_0.py プロジェクト: cold-code/FATE-Cloud
    def generate_play_conf(self, job_id: str, job_data: dict) -> dict:
        '''
        seperate job conf into different play confs
        :param job_id:
        :param job_data:
        :return: {"play_id": (play_conf, hosts)}  orderdict
        '''
        result = OrderedDict()
        sequence = self.get_modules_sequence().get('sequence')
        roles_args = self.get_roles_args()
        # play_conf_list = job_data.get('conf_list', [])

        count = 1

        for offset, conf in enumerate([job_data]):
            stat_logger.info(f"in generate play conf:")
            stat_logger.info(f"offset: {offset}")
            stat_logger.info(
                f"No.{offset} conf in cutomize conf list: {json_dumps(conf, indent=4)}"
            )

            var_path_list = self.generate_variable_files(
                job_id, str(offset + 1), job_data.get('version'), conf)
            stat_logger.info(f"sequence: {sequence}")
            for module in sequence:
                stat_logger.info(f"in loop, module: {module}")
                if module in conf.get('modules'):
                    stat_logger.info(f"module {module} is in conf")
                    stat_logger.info(
                        f"result of conf.get(module).get('enable'): {conf.get('modules').get(module).get('enable')}"
                    )
                    # if conf.get('modules').get(module).get('enable'):
                    # generate play id
                    play_id = f"{job_id}_{count}"
                    stat_logger.info(f"play id: {play_id}")
                    # generate template config yaml
                    template_yml = self.get_proj_yml()
                    template_yml[0]['hosts'] = play_id
                    template_yml[0]['vars_files'] = var_path_list
                    template_yml[0]['roles'] = [roles_args.get(module)]
                    stat_logger.info(
                        f"template yaml content: {json_dumps(template_yml, indent=4)}"
                    )
                    # generate template hosts
                    hosts_str = self.get_hosts_template()
                    hosts_str = hosts_str.replace('init', play_id)
                    stat_logger.info(
                        f"in generate play conf, conf.get('modules').get(module).get('ips'): {conf.get('modules').get(module).get('ips')}"
                    )
                    hosts_str = hosts_str.replace(
                        '127.0.0.1',
                        '\n'.join(conf.get('modules').get(module).get('ips')))
                    stat_logger.info(f"hosts string: {hosts_str}")
                    result.update(
                        {play_id: {
                            'yml': template_yml,
                            'hosts': hosts_str,
                        }})
                    stat_logger.info(
                        f"current result dict: {json_dumps(result, indent=4)}")
                    count += 1

        stat_logger.info(
            f'Generating play conf successfully, result is: {json_dumps(result, indent=4)}'
        )
        return result
コード例 #22
0
    def run_job(job_id):
        job_data = job_utils.get_job_configuration(job_id=job_id)
        stat_logger.info(
            f"in play controller run job func, get job data: {json.dumps(job_data, indent=4)}"
        )
        schedule_logger(job_id).info(
            f"in play controller, func run job: {json.dumps(job_data, indent=4)}"
        )

        play_conf_path_dict = PlayController.initialize_plays(
            job_id=job_id, job_data=job_data)
        stat_logger.info(
            f"in play controller run job func after initialize play\n get play conf path dict: {play_conf_path_dict}"
        )

        # TODO get package dir by version
        package_dir = get_package_dir_by_version(job_data.get('version'))
        if not os.path.exists(package_dir) and not os.path.isdir(package_dir):
            raise Exception(
                f'Local package directory {package_dir} not exists.')

        job_info = {
            'job_id': job_id,
            'status': JobStatus.RUNNING,
            'start_time': current_timestamp()
        }
        JobSaver.update_job_status(job_info)
        JobSaver.update_job(job_info)

        for play_id, conf_dict in play_conf_path_dict.items():
            conf_dict['conf_path'] = shutil.copy2(src=conf_dict['conf_path'],
                                                  dst=package_dir)
            PlayController.run_play(
                job_id=job_id,
                play_id=play_id,
                play_conf_path=conf_dict.get('conf_path'),
                play_hosts_path=conf_dict.get('hosts_path'),
                test_mode=TEST_MODE)
            if os.path.exists(conf_dict['conf_path']):
                os.remove(conf_dict['conf_path'])
            plays = JobSaver.query_play(play_id=play_id)
            if plays:
                play = plays[0]
                status = play.f_status
                if status != PlayStatus.SUCCESS:
                    if status in [
                            PlayStatus.CANCELED, PlayStatus.FAILED,
                            PlayStatus.TIMEOUT
                    ]:
                        update_info = {
                            'job_id': job_id,
                            'play_id': play_id,
                            'status': status,
                            'end_time': current_timestamp()
                        }
                        JobSaver.update_play_status(update_info)
                        JobSaver.update_play(update_info)
                        JobSaver.update_job_status(update_info)
                        JobSaver.update_job(update_info)
                    else:
                        update_info = {
                            'job_id': job_id,
                            'play_id': play_id,
                            'status': PlayStatus.FAILED,
                            'end_time': current_timestamp()
                        }
                        schedule_logger(job_id).error(
                            f'Unexpected error occured on play {play_id}, job {job_id} failed, previous status of play: {play.f_status}'
                        )
                        stat_logger.error(
                            f'Unexpected error occured on play {play_id}, job {job_id} failed, previous status of play: {play.f_status}'
                        )

                        JobSaver.update_play_status(update_info)
                        JobSaver.update_play(update_info)
                        JobSaver.update_job_status(update_info)
                        JobSaver.update_job(update_info)

                        schedule_logger(job_id).info(
                            f"job {job_id} finished, status is {update_info.get('status')}"
                        )
                    break
                else:
                    update_info = {
                        'job_id': job_id,
                        'play_id': play_id,
                        'status': PlayStatus.SUCCESS,
                        'end_time': current_timestamp()
                    }
                    JobSaver.update_play_status(update_info)
                    JobSaver.update_play(update_info)
            else:
                raise Exception(f'can not find play {play_id}')
        else:
            update_info = {
                'job_id': job_id,
                'status': JobStatus.SUCCESS,
                'end_time': current_timestamp()
            }
            JobSaver.update_job(update_info)
            JobSaver.update_job_status(update_info)
            schedule_logger(job_id).info(
                f"job {job_id} finished, status is {update_info.get('status')}"
            )

        if not TEST_MODE:
            plays = JobSaver.query_play(job_id=job_id,
                                        status=PlayStatus.SUCCESS)
            modules = []
            module_names = []
            for play in plays:
                module_name = play.f_roles.strip('[]').replace('_', '')
                module_names.append(module_name)
                modules.append({
                    'name':
                    module_name,
                    'ips':
                    job_data.get('modules', {}).get(module_name,
                                                    {}).get('ips', []),
                    'port':
                    job_data.get('modules', {}).get(module_name,
                                                    {}).get('port', None)
                })

            # parties = PartyInfo.get_or_none(f_version=job_data.get('version'), f_party_id=job_data.get('party_id'))
            parties = PartyInfo.get_or_none(
                f_party_id=job_data.get('party_id'))
            if parties:
                module_mapping = dict(zip(module_names, modules))
                stored_modules = parties.f_modules.get("data", [])

                name_map = {}
                for offset, item in enumerate(stored_modules):
                    name_map[item.get('name')] = offset

                for key, value in module_mapping.items():
                    if key in name_map:
                        schedule_logger(job_id).info(
                            f"{key} in name map, in replace process")
                        stored_modules[name_map[key]] = value
                    else:
                        schedule_logger(job_id).info(
                            f"{key} not in name map, in append process ")
                        stored_modules.append(value)

                # update_status = False
                # for offset, module_info in enumerate(stored_modules):
                #     if module_info['name'] in module_mapping:
                #         stored_modules[offset] = module_mapping[module_info['name']]
                #         update_status = True
                for key in ['role', 'version']:
                    # if parties[key] != job_data[key]:
                    #     parties[key] = job_data[key]
                    if getattr(parties, f'f_{key}') != job_data[key]:
                        setattr(parties, f'f_{key}', job_data[key])
                        # update_status = True
                # if update_status:
                parties.f_modules = {'data': stored_modules}
                parties.save()
                DB.commit()
            else:
                party_info = PartyInfo()
                # party_info.f_job_id = job_id
                party_info.f_role = job_data.get('role')
                party_info.f_version = job_data.get('version')
                party_info.f_party_id = job_data.get('party_id')
                party_info.f_modules = {'data': modules}
                party_info.save(force_insert=True)
コード例 #23
0
ファイル: job_queue.py プロジェクト: cold-code/FATE-Cloud
 def get(self, block=True, timeout=None):
     with self.not_empty:
         if not block:
             stat_logger.info("in queue get func, in if not block condition")
             if not self.query_waiting_jobs():
                 raise Exception
         elif timeout is None:
             stat_logger.info("in queue get func, in timeout is none condition")
             while not self.query_waiting_jobs():
                 stat_logger.info("in queue get func, in timeout is none condition, in while not loop")
                 self.not_empty.wait()
         elif timeout < 0:
             stat_logger.info("in queue get func, in timeout < 0 condition")
             raise ValueError("'timeout' must be a non-negative number")
         else:
             stat_logger.info("in queue get func, in else condition")
             endtime = time() + timeout
             while not self.query_waiting_jobs():
                 remaining = endtime - time()
                 if remaining <= 0.0:
                     raise Exception
                 self.not_empty.wait(remaining)
         stat_logger.info("in queue get func, ready to get in db")
         with DB.connection_context():
             error = None
             JobQueue.lock(DB, 'deploy_server_job_queue', 10)
             try:
                 item = Job.select().where(Job.f_status == JobStatus.WAITING)[0]
                 if item:
                     update_info = {
                         'job_id': item.f_job_id,
                         'status': JobStatus.READY
                     }
                     JobSaver.update_job_status(update_info)
             except Exception as e:
                 error = e
             JobQueue.unlock(DB, 'deploy_server_job_queue')
             if error:
                 raise Exception(e)
             self.not_full.notify()
             return {
                 'job_id': item.f_job_id,
             }
コード例 #24
0
    def run_play(self, play_id, retry=False):
        super(PlayBook, self).run()

        # Note: slightly wrong, this is written so that implicit localhost
        # manages passwords
        sshpass = None
        becomepass = None
        passwords = {}

        # initial error check, to make sure all specified playbooks are accessible
        # before we start running anything through the playbook executor

        b_playbook_dirs = []
        # import json
        stat_logger.info(f'context CLIARGS: {context.CLIARGS}')
        # try:
        #     stat_logger.info(f'context cliargs: {json.dumps(context.CLIARGS, indent=4)}')
        # except Exception:
        #     pass
        for playbook in context.CLIARGS['args']:
            stat_logger.info(f'in for loop, playbook: {playbook}')
            if not os.path.exists(playbook):
                raise AnsibleError("the playbook: %s could not be found" % playbook)
            if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
                raise AnsibleError("the playbook: %s does not appear to be a file" % playbook)

            b_playbook_dir = os.path.dirname(os.path.abspath(to_bytes(playbook, errors='surrogate_or_strict')))
            stat_logger.info(f'b_playbook_dir: {b_playbook_dir}')
            # load plugins from all playbooks in case they add callbacks/inventory/etc
            add_all_plugin_dirs(b_playbook_dir)

            b_playbook_dirs.append(b_playbook_dir)
        stat_logger.info(f'playbook_dirs: {b_playbook_dirs}')

        set_collection_playbook_paths(b_playbook_dirs)

        playbook_collection = get_collection_name_from_path(b_playbook_dirs[0])
        stat_logger.info(f'playbook_collection: {playbook_collection}')

        if playbook_collection:
            display.warning("running playbook inside collection {0}".format(playbook_collection))
            AnsibleCollectionLoader().set_default_collection(playbook_collection)

        # don't deal with privilege escalation or passwords when we don't need to
        if not (context.CLIARGS['listhosts'] or context.CLIARGS['listtasks'] or
                context.CLIARGS['listtags'] or context.CLIARGS['syntax']):
            (sshpass, becomepass) = self.ask_passwords()
            passwords = {'conn_pass': sshpass, 'become_pass': becomepass}

        # create base objects
        loader, inventory, variable_manager = self._play_prereqs()

        # (which is not returned in list_hosts()) is taken into account for
        # warning if inventory is empty.  But it can't be taken into account for
        # checking if limit doesn't match any hosts.  Instead we don't worry about
        # limit if only implicit localhost was in inventory to start with.
        #
        # Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts())
        CLI.get_host_list(inventory, context.CLIARGS['subset'])

        # flush fact cache if requested
        if context.CLIARGS['flush_cache']:
            self._flush_cache(inventory, variable_manager)

        # create the playbook executor, which manages running the plays via a task queue manager
        pbex = PlayBookExecutorEdit(playbooks=context.CLIARGS['args'], inventory=inventory,
                                    variable_manager=variable_manager, loader=loader,
                                    passwords=passwords, play_id=play_id, retry=retry)

        results = pbex.run()

        if isinstance(results, list):
            for p in results:

                display.display('\nplaybook: %s' % p['playbook'])
                for idx, play in enumerate(p['plays']):
                    if play._included_path is not None:
                        loader.set_basedir(play._included_path)
                    else:
                        pb_dir = os.path.realpath(os.path.dirname(p['playbook']))
                        loader.set_basedir(pb_dir)

                    msg = "\n  play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name)
                    mytags = set(play.tags)
                    msg += '\tTAGS: [%s]' % (','.join(mytags))

                    if context.CLIARGS['listhosts']:
                        playhosts = set(inventory.get_hosts(play.hosts))
                        msg += "\n    pattern: %s\n    hosts (%d):" % (play.hosts, len(playhosts))
                        for host in playhosts:
                            msg += "\n      %s" % host

                    display.display(msg)

                    all_tags = set()
                    if context.CLIARGS['listtags'] or context.CLIARGS['listtasks']:
                        taskmsg = ''
                        if context.CLIARGS['listtasks']:
                            taskmsg = '    tasks:\n'

                        def _process_block(b):
                            taskmsg = ''
                            for task in b.block:
                                if isinstance(task, Block):
                                    taskmsg += _process_block(task)
                                else:
                                    if task.action == 'meta':
                                        continue

                                    all_tags.update(task.tags)
                                    if context.CLIARGS['listtasks']:
                                        cur_tags = list(mytags.union(set(task.tags)))
                                        cur_tags.sort()
                                        if task.name:
                                            taskmsg += "      %s" % task.get_name()
                                        else:
                                            taskmsg += "      %s" % task.action
                                        taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags)

                            return taskmsg

                        all_vars = variable_manager.get_vars(play=play)
                        for block in play.compile():
                            block = block.filter_tagged_tasks(all_vars)
                            if not block.has_tasks():
                                continue
                            taskmsg += _process_block(block)

                        if context.CLIARGS['listtags']:
                            cur_tags = list(mytags.union(all_tags))
                            cur_tags.sort()
                            taskmsg += "      TASK TAGS: [%s]\n" % ', '.join(cur_tags)

                        display.display(taskmsg)

            return 0
        else:
            return results