Ejemplo n.º 1
0
def pipeline_dag_dependency(job_info):
    try:
        detect_utils.check_config(job_info, required_arguments=["party_id", "role"])
        component_need_run = {}
        if job_info.get('job_id'):
            jobs = JobSaver.query_job(job_id=job_info["job_id"], party_id=job_info["party_id"], role=job_info["role"])
            if not jobs:
                raise Exception('query job {} failed'.format(job_info.get('job_id', '')))
            job = jobs[0]
            dsl_parser = schedule_utils.get_job_dsl_parser(dsl=job.f_dsl,
                                                           runtime_conf=job.f_runtime_conf_on_party,
                                                           train_runtime_conf=job.f_train_runtime_conf)
            tasks = JobSaver.query_task(job_id=job_info["job_id"], party_id=job_info["party_id"], role=job_info["role"], only_latest=True)
            for task in tasks:
                need_run = task.f_component_parameters.get("ComponentParam", {}).get("need_run", True)
                component_need_run[task.f_component_name] = need_run
        else:
            dsl_parser = schedule_utils.get_job_dsl_parser(dsl=job_info.get('job_dsl', {}),
                                                           runtime_conf=job_info.get('job_runtime_conf', {}),
                                                           train_runtime_conf=job_info.get('job_train_runtime_conf', {}))
        dependency = dsl_parser.get_dependency()
        dependency["component_need_run"] = component_need_run
        return dependency
    except Exception as e:
        stat_logger.exception(e)
        raise e
Ejemplo n.º 2
0
def submit_job():
    work_mode = request.json.get('job_runtime_conf',
                                 {}).get('job_parameters',
                                         {}).get('work_mode', None)
    detect_utils.check_config({'work_mode': work_mode},
                              required_arguments=[('work_mode',
                                                   (WorkMode.CLUSTER,
                                                    WorkMode.STANDALONE))])
    if work_mode == RuntimeConfig.WORK_MODE:
        job_id, job_dsl_path, job_runtime_conf_path, logs_directory, model_info, board_url = JobController.submit_job(
            request.json)
        return get_json_result(retcode=0,
                               retmsg='success',
                               job_id=job_id,
                               data={
                                   'job_dsl_path': job_dsl_path,
                                   'job_runtime_conf_path':
                                   job_runtime_conf_path,
                                   'model_info': model_info,
                                   'board_url': board_url,
                                   'logs_directory': logs_directory
                               })
    else:
        if RuntimeConfig.WORK_MODE == WorkMode.CLUSTER and work_mode == WorkMode.STANDALONE:
            # use cluster standalone job server to execute standalone job
            return request_execute_server(
                request=request,
                execute_host='{}:{}'.format(
                    request.remote_addr, CLUSTER_STANDALONE_JOB_SERVER_PORT))
        else:
            raise Exception(
                'server run on standalone can not support cluster mode job')
Ejemplo n.º 3
0
def download_upload(access_module):
    request_config = request.json
    required_arguments = ['work_mode', 'namespace', 'table_name']
    if access_module == 'upload':
        required_arguments.extend(['file', 'head', 'partition'])
    elif access_module == 'download':
        required_arguments.extend(['output_path'])
    else:
        raise Exception(
            'can not support this operating: {}'.format(access_module))
    detect_utils.check_config(request_config,
                              required_arguments=required_arguments)
    data = {}
    if access_module == "upload":
        data['table_name'] = request_config["table_name"]
        data['namespace'] = request_config["namespace"]
    job_dsl, job_runtime_conf = gen_data_access_job_config(
        request_config, access_module)
    job_id, job_dsl_path, job_runtime_conf_path, logs_directory, model_info, board_url = JobController.submit_job(
        {
            'job_dsl': job_dsl,
            'job_runtime_conf': job_runtime_conf
        })
    data.update({
        'job_dsl_path': job_dsl_path,
        'job_runtime_conf_path': job_runtime_conf_path,
        'board_url': board_url,
        'logs_directory': logs_directory
    })
    return get_json_result(job_id=job_id, data=data)
Ejemplo n.º 4
0
def get_component_summary():
    request_data = request.json
    try:
        required_params = ["job_id", "component_name", "role", "party_id"]
        detect_utils.check_config(request_data, required_params)
        tracker = Tracker(job_id=request_data["job_id"],
                          component_name=request_data["component_name"],
                          role=request_data["role"],
                          party_id=request_data["party_id"],
                          task_id=request_data.get("task_id", None),
                          task_version=request_data.get("task_version", None))
        summary = tracker.read_summary_from_db()
        if summary:
            if request_data.get("filename"):
                temp_filepath = os.path.join(TEMP_DIRECTORY,
                                             request_data.get("filename"))
                with open(temp_filepath, "w") as fout:
                    fout.write(json.dumps(summary, indent=4))
                return send_file(
                    open(temp_filepath, "rb"),
                    as_attachment=True,
                    attachment_filename=request_data.get("filename"))
            else:
                return get_json_result(data=summary)
        return error_response(
            210,
            "No component summary found, please check if arguments are specified correctly."
        )
    except Exception as e:
        stat_logger.exception(e)
        return error_response(210, str(e))
Ejemplo n.º 5
0
def bind_model_service():
    request_config = request.json
    if request_config.get('job_id', None):
        with DB.connection_context():
            model = MLModel.get_or_none(
                MLModel.f_job_id == request_config.get("job_id"),
                MLModel.f_role == 'guest'
            )
        if model:
            model_info = model.to_json()
            request_config['initiator'] = {}
            request_config['initiator']['party_id'] = str(model_info.get('f_initiator_party_id'))
            request_config['initiator']['role'] = model_info.get('f_initiator_role')
            request_config['job_parameters'] = model_info.get('f_runtime_conf').get('job_parameters')
            request_config['role'] = model_info.get('f_runtime_conf').get('role')
            for key, value in request_config['role'].items():
                for i, v in enumerate(value):
                    value[i] = str(v)
            request_config.pop('job_id')
        else:
            return get_json_result(retcode=101,
                                   retmsg="model {} can not be found in database. "
                                          "Please check if the model version is valid.".format(request_config.get('job_id')))
    if not request_config.get('servings'):
        # get my party all servings
        adapter_servings_config(request_config)
    service_id = request_config.get('service_id')
    if not service_id:
        return get_json_result(retcode=101, retmsg='no service id')
    check_config(request_config, ['initiator', 'role', 'job_parameters'])
    bind_status, retmsg = publish_model.bind_model_service(config_data=request_config)
    operation_record(request_config, "bind", "success" if not bind_status else "failed")
    return get_json_result(retcode=bind_status, retmsg='service id is {}'.format(service_id) if not retmsg else retmsg)
Ejemplo n.º 6
0
def pipeline_dag_dependency(job_info):
    try:
        detect_utils.check_config(job_info,
                                  required_arguments=["party_id", "role"])
        if job_info.get('job_id'):
            jobs = JobSaver.query_job(job_id=job_info["job_id"],
                                      party_id=job_info["party_id"],
                                      role=job_info["role"])
            if not jobs:
                raise Exception('query job {} failed'.format(
                    job_info.get('job_id', '')))
            job = jobs[0]
            job_dsl_parser = schedule_utils.get_job_dsl_parser(
                dsl=job.f_dsl,
                runtime_conf=job.f_runtime_conf,
                train_runtime_conf=job.f_train_runtime_conf)
        else:
            job_dsl_parser = schedule_utils.get_job_dsl_parser(
                dsl=job_info.get('job_dsl', {}),
                runtime_conf=job_info.get('job_runtime_conf', {}),
                train_runtime_conf=job_info.get('job_train_runtime_conf', {}))
        return job_dsl_parser.get_dependency(role=job_info["role"],
                                             party_id=int(
                                                 job_info["party_id"]))
    except Exception as e:
        stat_logger.exception(e)
        raise e
Ejemplo n.º 7
0
def operate_model(model_operation):
    request_config = request.json or request.form.to_dict()
    job_id = generate_job_id()
    if model_operation not in [ModelOperation.STORE, ModelOperation.RESTORE, ModelOperation.EXPORT, ModelOperation.IMPORT]:
        raise Exception('Can not support this operating now: {}'.format(model_operation))
    required_arguments = ["model_id", "model_version", "role", "party_id"]
    check_config(request_config, required_arguments=required_arguments)
    request_config["model_id"] = gen_party_model_id(model_id=request_config["model_id"], role=request_config["role"], party_id=request_config["party_id"])
    if model_operation in [ModelOperation.EXPORT, ModelOperation.IMPORT]:
        if model_operation == ModelOperation.IMPORT:
            file = request.files.get('file')
            file_path = os.path.join(TEMP_DIRECTORY, file.filename)
            try:
                os.makedirs(os.path.dirname(file_path), exist_ok=True)
                file.save(file_path)
            except Exception as e:
                shutil.rmtree(file_path)
                raise e
            request_config['file'] = file_path
            model = pipelined_model.PipelinedModel(model_id=request_config["model_id"], model_version=request_config["model_version"])
            model.unpack_model(file_path)
            return get_json_result()
        else:
            model = pipelined_model.PipelinedModel(model_id=request_config["model_id"], model_version=request_config["model_version"])
            archive_file_path = model.packaging_model()
            return send_file(archive_file_path, attachment_filename=os.path.basename(archive_file_path), as_attachment=True)
    else:
        data = {}
        job_dsl, job_runtime_conf = gen_model_operation_job_config(request_config, model_operation)
        job_id, job_dsl_path, job_runtime_conf_path, logs_directory, model_info, board_url = JobController.submit_job(
            {'job_dsl': job_dsl, 'job_runtime_conf': job_runtime_conf}, job_id=job_id)
        data.update({'job_dsl_path': job_dsl_path, 'job_runtime_conf_path': job_runtime_conf_path,
                     'board_url': board_url, 'logs_directory': logs_directory})
        return get_json_result(job_id=job_id, data=data)
Ejemplo n.º 8
0
def validate_component_param():
    if not request.json or not isinstance(request.json, dict):
        return error_response(400, 'bad request')

    required_keys = [
        'component_name',
        'component_module_name',
    ]
    config_keys = ['role']

    dsl_version = int(request.json.get('dsl_version', 0))
    if dsl_version == 1:
        config_keys += ['role_parameters', 'algorithm_parameters']
        parser_class = DSLParser
    elif dsl_version == 2:
        config_keys += ['component_parameters']
        parser_class = DSLParserV2
    else:
        return error_response(400, 'unsupported dsl_version')

    try:
        check_config(request.json, required_keys + config_keys)
    except Exception as e:
        return error_response(400, str(e))

    try:
        parser_class.validate_component_param(
            get_federatedml_setting_conf_directory(),
            {i: request.json[i]
             for i in config_keys}, *[request.json[i] for i in required_keys])
    except Exception as e:
        return error_response(400, str(e))

    return get_json_result()
Ejemplo n.º 9
0
def check_job_runtime_conf(runtime_conf: typing.Dict):
    detect_utils.check_config(runtime_conf, ['initiator', 'job_parameters', 'role'])
    detect_utils.check_config(runtime_conf['initiator'], ['role', 'party_id'])
    # deal party id
    runtime_conf['initiator']['party_id'] = int(runtime_conf['initiator']['party_id'])
    for r in runtime_conf['role'].keys():
        for i in range(len(runtime_conf['role'][r])):
            runtime_conf['role'][r][i] = int(runtime_conf['role'][r][i])
Ejemplo n.º 10
0
def component_output_data_table():
    request_data = request.json
    detect_utils.check_config(config=request_data, required_arguments=['job_id', 'role', 'party_id', 'component_name'])
    jobs = JobSaver.query_job(job_id=request_data.get('job_id'))
    if jobs:
        job = jobs[0]
        return jsonify(FederatedScheduler.tracker_command(job, request_data, 'output/table'))
    else:
        return get_json_result(retcode=100, retmsg='No found job')
Ejemplo n.º 11
0
def get_job_table_list():
    detect_utils.check_config(config=request.json, required_arguments=['job_id', 'role', 'party_id'])
    jobs = JobSaver.query_job(**request.json)
    if jobs:
        job = jobs[0]
        tables = get_job_all_table(job)
        return get_json_result(data=tables)
    else:
        return get_json_result(retcode=101, retmsg='no find job')
Ejemplo n.º 12
0
    def submit_job(job_data):
        job_id = generate_job_id()
        schedule_logger.info('submit job, job_id {}, body {}'.format(job_id, job_data))
        job_dsl = job_data.get('job_dsl', {})
        job_runtime_conf = job_data.get('job_runtime_conf', {})
        job_utils.check_pipeline_job_runtime_conf(job_runtime_conf)
        job_parameters = job_runtime_conf['job_parameters']
        job_initiator = job_runtime_conf['initiator']
        job_type = job_parameters.get('job_type', '')
        if job_type != 'predict':
            # generate job model info
            job_parameters['model_id'] = '#'.join([dtable_utils.all_party_key(job_runtime_conf['role']), 'model'])
            job_parameters['model_version'] = job_id
            train_runtime_conf = {}
        else:
            detect_utils.check_config(job_parameters, ['model_id', 'model_version'])
            # get inference dsl from pipeline model as job dsl
            job_tracker = Tracking(job_id=job_id, role=job_initiator['role'], party_id=job_initiator['party_id'],
                                   model_id=job_parameters['model_id'], model_version=job_parameters['model_version'])
            pipeline_model = job_tracker.get_output_model('pipeline')
            job_dsl = json_loads(pipeline_model['Pipeline'].inference_dsl)
            train_runtime_conf = json_loads(pipeline_model['Pipeline'].train_runtime_conf)
        job_dsl_path, job_runtime_conf_path = save_job_conf(job_id=job_id,
                                                            job_dsl=job_dsl,
                                                            job_runtime_conf=job_runtime_conf)

        job = Job()
        job.f_job_id = job_id
        job.f_roles = json_dumps(job_runtime_conf['role'])
        job.f_work_mode = job_parameters['work_mode']
        job.f_initiator_party_id = job_initiator['party_id']
        job.f_dsl = json_dumps(job_dsl)
        job.f_runtime_conf = json_dumps(job_runtime_conf)
        job.f_train_runtime_conf = json_dumps(train_runtime_conf)
        job.f_run_ip = ''
        job.f_status = JobStatus.WAITING
        job.f_progress = 0
        job.f_create_time = current_timestamp()

        # save job info
        TaskScheduler.distribute_job(job=job, roles=job_runtime_conf['role'], job_initiator=job_initiator)

        # push into queue
        RuntimeConfig.JOB_QUEUE.put_event({
            'job_id': job_id,
            "initiator_role": job_initiator['role'],
            "initiator_party_id": job_initiator['party_id']
        }
        )
        schedule_logger.info(
            'submit job successfully, job id is {}, model id is {}'.format(job.f_job_id, job_parameters['model_id']))
        board_url = BOARD_DASHBOARD_URL.format(job_id, job_initiator['role'], job_initiator['party_id'])
        return job_id, job_dsl_path, job_runtime_conf_path, {'model_id': job_parameters['model_id'],
                                                             'model_version': job_parameters[
                                                                 'model_version']}, board_url
Ejemplo n.º 13
0
def download_upload(access_module):
    job_id = job_utils.generate_job_id()
    if access_module == "upload" and UPLOAD_DATA_FROM_CLIENT and not (request.json and request.json.get("use_local_data") == 0):
        file = request.files['file']
        filename = os.path.join(job_utils.get_job_directory(job_id), 'fate_upload_tmp', file.filename)
        os.makedirs(os.path.dirname(filename), exist_ok=True)
        try:
            file.save(filename)
        except Exception as e:
            shutil.rmtree(os.path.join(job_utils.get_job_directory(job_id), 'fate_upload_tmp'))
            raise e
        job_config = request.args.to_dict()
        if "namespace" in job_config and "table_name" in job_config:
            pass
        else:
            # higher than version 1.5.1, support eggroll run parameters
            job_config = json_loads(list(job_config.keys())[0])
        job_config['file'] = filename
    else:
        job_config = request.json
    required_arguments = ['work_mode', 'namespace', 'table_name']
    if access_module == 'upload':
        required_arguments.extend(['file', 'head', 'partition'])
    elif access_module == 'download':
        required_arguments.extend(['output_path'])
    else:
        raise Exception('can not support this operating: {}'.format(access_module))
    detect_utils.check_config(job_config, required_arguments=required_arguments)
    data = {}
    # compatibility
    if "table_name" in job_config:
        job_config["name"] = job_config["table_name"]
    if "backend" not in job_config:
        job_config["backend"] = 0
    for _ in ["work_mode", "backend", "head", "partition", "drop"]:
        if _ in job_config:
            job_config[_] = int(job_config[_])
    if access_module == "upload":
        if job_config.get('drop', 0) == 1:
            job_config["destroy"] = True
        else:
            job_config["destroy"] = False
        data['table_name'] = job_config["table_name"]
        data['namespace'] = job_config["namespace"]
        data_table_meta = storage.StorageTableMeta(name=job_config["table_name"], namespace=job_config["namespace"])
        if data_table_meta and not job_config["destroy"]:
            return get_json_result(retcode=100,
                                   retmsg='The data table already exists.'
                                          'If you still want to continue uploading, please add the parameter -drop.'
                                          ' 0 means not to delete and continue uploading, '
                                          '1 means to upload again after deleting the table')
    job_dsl, job_runtime_conf = gen_data_access_job_config(job_config, access_module)
    submit_result = DAGScheduler.submit({'job_dsl': job_dsl, 'job_runtime_conf': job_runtime_conf}, job_id=job_id)
    data.update(submit_result)
    return get_json_result(job_id=job_id, data=data)
Ejemplo n.º 14
0
def submit_job():
    work_mode = JobRuntimeConfigAdapter(
        request.json.get('job_runtime_conf', {})).get_job_work_mode()
    detect_utils.check_config({'work_mode': work_mode},
                              required_arguments=[('work_mode',
                                                   (WorkMode.CLUSTER,
                                                    WorkMode.STANDALONE))])
    submit_result = DAGScheduler.submit(request.json)
    return get_json_result(retcode=0,
                           retmsg='success',
                           job_id=submit_result.get("job_id"),
                           data=submit_result)
Ejemplo n.º 15
0
def bind_model_service():
    request_config = request.json
    if request_config.get('job_id', None):
        retcode, retmsg, res_data = model_utils.query_model_info(
            request_config['job_id'], 'guest')
        if res_data:
            model_info = res_data[0]
            request_config['initiator'] = {}
            request_config['initiator']['party_id'] = str(
                model_info.get('f_initiator_party_id'))
            request_config['initiator']['role'] = model_info.get(
                'f_initiator_role')

            runtime_conf = model_info.get(
                'f_runtime_conf', {}) if model_info.get(
                    'f_runtime_conf', {}) else model_info.get(
                        'f_train_runtime_conf', {})
            adapter = JobRuntimeConfigAdapter(runtime_conf)
            job_parameters = adapter.get_common_parameters().to_dict()
            request_config[
                'job_parameters'] = job_parameters if job_parameters else model_info.get(
                    'f_train_runtime_conf', {}).get('job_parameters')

            roles = runtime_conf.get('role')
            request_config['role'] = roles if roles else model_info.get(
                'f_train_runtime_conf', {}).get('role')

            for key, value in request_config['role'].items():
                for i, v in enumerate(value):
                    value[i] = str(v)
            request_config.pop('job_id')
        else:
            return get_json_result(
                retcode=101,
                retmsg="model {} can not be found in database. "
                "Please check if the model version is valid.".format(
                    request_config.get('job_id')))
    if not request_config.get('servings'):
        # get my party all servings
        request_config['servings'] = RuntimeConfig.SERVICE_DB.get_urls(
            'servings')
    service_id = request_config.get('service_id')
    if not service_id:
        return get_json_result(retcode=101, retmsg='no service id')
    detect_utils.check_config(request_config,
                              ['initiator', 'role', 'job_parameters'])
    bind_status, retmsg = publish_model.bind_model_service(request_config)
    operation_record(request_config, "bind",
                     "success" if not bind_status else "failed")
    return get_json_result(
        retcode=bind_status,
        retmsg='service id is {}'.format(service_id) if not retmsg else retmsg)
Ejemplo n.º 16
0
def load_checkpoints():
    required_args = [
        'role', 'party_id', 'model_id', 'model_version', 'component_name'
    ]
    try:
        check_config(request.json, required_args)
    except Exception as e:
        abort(error_response(400, str(e)))

    checkpoint_manager = CheckpointManager(**{
        i: request.json[i]
        for i in required_args
    },
                                           mkdir=False)
    checkpoint_manager.load_checkpoints_from_disk()
    return checkpoint_manager
Ejemplo n.º 17
0
def get_url():
    request_data = request.json
    detect_utils.check_config(
        config=request_data, required_arguments=['job_id', 'role', 'party_id'])
    jobs = JobSaver.query_job(job_id=request_data.get('job_id'),
                              role=request_data.get('role'),
                              party_id=request_data.get('party_id'))
    if jobs:
        board_urls = []
        for job in jobs:
            board_url = job_utils.get_board_url(job.f_job_id, job.f_role,
                                                job.f_party_id)
            board_urls.append(board_url)
        return get_json_result(data={'board_url': board_urls})
    else:
        return get_json_result(retcode=101, retmsg='no found job')
Ejemplo n.º 18
0
def table_add():
    request_data = request.json
    detect_utils.check_config(request_data,
                              required_arguments=[
                                  "engine", "address", "namespace", "name",
                                  ("head", (0, 1)), "id_delimiter"
                              ])
    address_dict = request_data.get('address')
    engine = request_data.get('engine')
    name = request_data.get('name')
    namespace = request_data.get('namespace')
    address = storage.StorageTableMeta.create_address(
        storage_engine=engine, address_dict=address_dict)
    in_serialized = request_data.get(
        "in_serialized", 1 if engine in {
            storage.StorageEngine.STANDALONE, storage.StorageEngine.EGGROLL,
            storage.StorageEngine.MYSQL
        } else 0)
    destroy = (int(request_data.get("drop", 0)) == 1)
    data_table_meta = storage.StorageTableMeta(name=name, namespace=namespace)
    if data_table_meta:
        if destroy:
            data_table_meta.destroy_metas()
        else:
            return get_json_result(
                retcode=100,
                retmsg='The data table already exists.'
                'If you still want to continue uploading, please add the parameter -drop.'
                '1 means to add again after deleting the table')
    id_name = request_data.get("id_name")
    feature_name = request_data.get("feature_name")
    schema = None
    if id_name and feature_name:
        schema = {'header': feature_name, 'sid': id_name}
    with storage.Session.build(
            storage_engine=engine,
            options=request_data.get("options")) as storage_session:
        storage_session.create_table(
            address=address,
            name=name,
            namespace=namespace,
            partitions=request_data.get('partitions', None),
            hava_head=request_data.get("head"),
            id_delimiter=request_data.get("id_delimiter"),
            in_serialized=in_serialized,
            schema=schema)
    return get_json_result(data={"table_name": name, "namespace": namespace})
Ejemplo n.º 19
0
def get_predict_conf():
    request_data = request.json
    required_parameters = ['model_id', 'model_version']
    check_config(request_data, required_parameters)
    model_dir = os.path.join(get_project_base_directory(), 'model_local_cache')
    model_fp_list = glob.glob(
        model_dir +
        f"/guest#*#{request_data['model_id']}/{request_data['model_version']}")
    if model_fp_list:
        fp = model_fp_list[0]
        pipeline_model = PipelinedModel(model_id=fp.split('/')[-2],
                                        model_version=fp.split('/')[-1])
        pipeline = pipeline_model.read_component_model('pipeline',
                                                       'pipeline')['Pipeline']
        predict_dsl = json_loads(pipeline.inference_dsl)

        train_runtime_conf = json_loads(pipeline.train_runtime_conf)
        parser = schedule_utils.get_dsl_parser_by_version(
            train_runtime_conf.get('dsl_version', '1'))
        predict_conf = parser.generate_predict_conf_template(
            predict_dsl=predict_dsl,
            train_conf=train_runtime_conf,
            model_id=request_data['model_id'],
            model_version=request_data['model_version'])
    else:
        predict_conf = ''
    if predict_conf:
        if request_data.get("filename"):
            os.makedirs(TEMP_DIRECTORY, exist_ok=True)
            temp_filepath = os.path.join(TEMP_DIRECTORY,
                                         request_data.get("filename"))
            with open(temp_filepath, "w") as fout:

                fout.write(json_dumps(predict_conf, indent=4))
            return send_file(open(temp_filepath, "rb"),
                             as_attachment=True,
                             attachment_filename=request_data.get("filename"))
        else:
            return get_json_result(data=predict_conf)
    return error_response(
        210,
        "No model found, please check if arguments are specified correctly.")
Ejemplo n.º 20
0
def submit_job():
    work_mode = request.json.get('job_runtime_conf',
                                 {}).get('job_parameters',
                                         {}).get('work_mode', None)
    detect_utils.check_config({'work_mode': work_mode},
                              required_arguments=[('work_mode',
                                                   (WorkMode.CLUSTER,
                                                    WorkMode.STANDALONE))])
    job_id, job_dsl_path, job_runtime_conf_path, logs_directory, model_info, board_url = DAGScheduler.submit(
        request.json)
    return get_json_result(retcode=0,
                           retmsg='success',
                           job_id=job_id,
                           data={
                               'job_dsl_path': job_dsl_path,
                               'job_runtime_conf_path': job_runtime_conf_path,
                               'model_info': model_info,
                               'board_url': board_url,
                               'logs_directory': logs_directory
                           })
Ejemplo n.º 21
0
def download_upload(data_func):
    request_config = request.json
    _job_id = generate_job_id()
    stat_logger.info('generated job_id {}, body {}'.format(_job_id, request_config))
    _job_dir = get_job_directory(_job_id)
    os.makedirs(_job_dir, exist_ok=True)
    module = data_func
    required_arguments = ['work_mode', 'namespace', 'table_name']
    if module == 'upload':
        required_arguments.extend(['file', 'head', 'partition'])
    elif module == 'download':
        required_arguments.extend(['output_path'])
    else:
        raise Exception('can not support this operating: {}'.format(module))
    detect_utils.check_config(request_config, required_arguments=required_arguments)
    if module == "upload":
        if not os.path.isabs(request_config['file']):
            request_config["file"] = os.path.join(file_utils.get_project_base_directory(), request_config["file"])
    try:
        conf_file_path = new_runtime_conf(job_dir=_job_dir, method=data_func, module=module,
                                          role=request_config.get('local', {}).get("role"),
                                          party_id=request_config.get('local', {}).get("party_id", ''))
        file_utils.dump_json_conf(request_config, conf_file_path)
        progs = ["python3",
                 os.path.join(file_utils.get_project_base_directory(), JOB_MODULE_CONF[module]["module_path"]),
                 "-j", _job_id,
                 "-c", conf_file_path
                 ]
        try:
            p = run_subprocess(config_dir=_job_dir, process_cmd=progs)
        except Exception as e:
            stat_logger.exception(e)
            p = None
        return get_json_result(retcode=(0 if p else 101), job_id=_job_id,
                               data={'table_name': request_config['table_name'],
                                     'namespace': request_config['namespace'], 'pid': p.pid if p else ''})
    except Exception as e:
        stat_logger.exception(e)
        return get_json_result(retcode=-104, retmsg="failed", job_id=_job_id)
Ejemplo n.º 22
0
def check_pipeline_job_runtime_conf(runtime_conf: typing.Dict):
    detect_utils.check_config(runtime_conf,
                              ['initiator', 'job_parameters', 'role'])
    detect_utils.check_config(runtime_conf['initiator'], ['role', 'party_id'])
    detect_utils.check_config(runtime_conf['job_parameters'],
                              [('work_mode', RuntimeConfig.WORK_MODE)])
    # deal party id
    runtime_conf['initiator']['party_id'] = int(
        runtime_conf['initiator']['party_id'])
    for r in runtime_conf['role'].keys():
        for i in range(len(runtime_conf['role'][r])):
            runtime_conf['role'][r][i] = int(runtime_conf['role'][r][i])
Ejemplo n.º 23
0
def deploy():
    request_data = request.json
    require_parameters = ['model_id', 'model_version']
    check_config(request_data, require_parameters)
    model_id = request_data.get("model_id")
    model_version = request_data.get("model_version")
    retcode, retmsg, model_info = model_utils.query_model_info_from_file(
        model_id=model_id, model_version=model_version, to_dict=True)
    if not model_info:
        raise Exception(
            f'Deploy model failed, no model {model_id} {model_version} found.')
    else:
        for key, value in model_info.items():
            version_check = model_utils.compare_version(
                value.get('f_fate_version'), '1.5.0')
            if version_check == 'lt':
                continue
            else:
                init_role = key.split('/')[-2].split('#')[0]
                init_party_id = key.split('/')[-2].split('#')[1]
                model_init_role = value.get('f_initiator_role') if value.get(
                    'f_initiator_role') else value.get(
                        'f_train_runtime_conf', {}).get('initiator', {}).get(
                            'role', '')
                model_init_party_id = value.get(
                    'f_initiator_role_party_id') if value.get(
                        'f_initiator_role_party_id') else value.get(
                            'f_train_runtime_conf', {}).get(
                                'initiator', {}).get('party_id', '')
                if (init_role
                        == model_init_role) and (init_party_id
                                                 == str(model_init_party_id)):
                    break
        else:
            raise Exception(
                "Deploy model failed, can not found model of initiator role or the fate version of model is older than 1.5.0"
            )

        # distribute federated deploy task
        _job_id = job_utils.generate_job_id()
        request_data['child_model_version'] = _job_id

        initiator_party_id = model_init_party_id
        initiator_role = model_init_role
        request_data['initiator'] = {
            'role': initiator_role,
            'party_id': initiator_party_id
        }
        deploy_status = True
        deploy_status_info = {}
        deploy_status_msg = 'success'
        deploy_status_info['detail'] = {}

        for role_name, role_partys in value.get("f_train_runtime_conf",
                                                {}).get('role', {}).items():
            if role_name not in ['arbiter', 'host', 'guest']:
                continue
            deploy_status_info[role_name] = deploy_status_info.get(
                role_name, {})
            deploy_status_info['detail'][role_name] = {}
            adapter = JobRuntimeConfigAdapter(
                value.get("f_train_runtime_conf", {}))
            work_mode = adapter.get_job_work_mode()

            for _party_id in role_partys:
                request_data['local'] = {
                    'role': role_name,
                    'party_id': _party_id
                }
                try:
                    response = federated_api(
                        job_id=_job_id,
                        method='POST',
                        endpoint='/model/deploy/do',
                        src_party_id=initiator_party_id,
                        dest_party_id=_party_id,
                        src_role=initiator_role,
                        json_body=request_data,
                        federated_mode=FederatedMode.MULTIPLE
                        if work_mode else FederatedMode.SINGLE)
                    deploy_status_info[role_name][_party_id] = response[
                        'retcode']
                    detail = {_party_id: {}}
                    detail[_party_id]['retcode'] = response['retcode']
                    detail[_party_id]['retmsg'] = response['retmsg']
                    deploy_status_info['detail'][role_name].update(detail)
                    if response['retcode']:
                        deploy_status = False
                        deploy_status_msg = 'failed'
                except Exception as e:
                    stat_logger.exception(e)
                    deploy_status = False
                    deploy_status_msg = 'failed'
                    deploy_status_info[role_name][_party_id] = 100

        deploy_status_info['model_id'] = request_data['model_id']
        deploy_status_info['model_version'] = _job_id
        return get_json_result(retcode=(0 if deploy_status else 101),
                               retmsg=deploy_status_msg,
                               data=deploy_status_info)
Ejemplo n.º 24
0
def operate_model(model_operation):
    request_config = request.json or request.form.to_dict()
    job_id = job_utils.generate_job_id()
    if model_operation not in [
            ModelOperation.STORE, ModelOperation.RESTORE,
            ModelOperation.EXPORT, ModelOperation.IMPORT
    ]:
        raise Exception(
            'Can not support this operating now: {}'.format(model_operation))
    required_arguments = ["model_id", "model_version", "role", "party_id"]
    check_config(request_config, required_arguments=required_arguments)
    request_config["model_id"] = gen_party_model_id(
        model_id=request_config["model_id"],
        role=request_config["role"],
        party_id=request_config["party_id"])
    if model_operation in [ModelOperation.EXPORT, ModelOperation.IMPORT]:
        if model_operation == ModelOperation.IMPORT:
            try:
                file = request.files.get('file')
                file_path = os.path.join(TEMP_DIRECTORY, file.filename)
                # if not os.path.exists(file_path):
                #     raise Exception('The file is obtained from the fate flow client machine, but it does not exist, '
                #                     'please check the path: {}'.format(file_path))
                try:
                    os.makedirs(os.path.dirname(file_path), exist_ok=True)
                    file.save(file_path)
                except Exception as e:
                    shutil.rmtree(file_path)
                    raise e
                request_config['file'] = file_path
                model = pipelined_model.PipelinedModel(
                    model_id=request_config["model_id"],
                    model_version=request_config["model_version"])
                model.unpack_model(file_path)

                pipeline = model.read_component_model('pipeline',
                                                      'pipeline')['Pipeline']
                train_runtime_conf = json_loads(pipeline.train_runtime_conf)
                permitted_party_id = []
                for key, value in train_runtime_conf.get('role', {}).items():
                    for v in value:
                        permitted_party_id.extend([v, str(v)])
                if request_config["party_id"] not in permitted_party_id:
                    shutil.rmtree(model.model_path)
                    raise Exception(
                        "party id {} is not in model roles, please check if the party id is valid."
                    )
                try:
                    adapter = JobRuntimeConfigAdapter(train_runtime_conf)
                    job_parameters = adapter.get_common_parameters().to_dict()
                    with DB.connection_context():
                        db_model = MLModel.get_or_none(
                            MLModel.f_job_id == job_parameters.get(
                                "model_version"),
                            MLModel.f_role == request_config["role"])
                    if not db_model:
                        model_info = model_utils.gather_model_info_data(model)
                        model_info['imported'] = 1
                        model_info['job_id'] = model_info['f_model_version']
                        model_info['size'] = model.calculate_model_file_size()
                        model_info['role'] = request_config["model_id"].split(
                            '#')[0]
                        model_info['party_id'] = request_config[
                            "model_id"].split('#')[1]
                        if model_utils.compare_version(
                                model_info['f_fate_version'], '1.5.1') == 'lt':
                            model_info['roles'] = model_info.get(
                                'f_train_runtime_conf', {}).get('role', {})
                            model_info['initiator_role'] = model_info.get(
                                'f_train_runtime_conf',
                                {}).get('initiator', {}).get('role')
                            model_info['initiator_party_id'] = model_info.get(
                                'f_train_runtime_conf',
                                {}).get('initiator', {}).get('party_id')
                            model_info[
                                'work_mode'] = adapter.get_job_work_mode()
                            model_info['parent'] = False if model_info.get(
                                'f_inference_dsl') else True
                        model_utils.save_model_info(model_info)
                    else:
                        stat_logger.info(
                            f'job id: {job_parameters.get("model_version")}, '
                            f'role: {request_config["role"]} model info already existed in database.'
                        )
                except peewee.IntegrityError as e:
                    stat_logger.exception(e)
                operation_record(request_config, "import", "success")
                return get_json_result()
            except Exception:
                operation_record(request_config, "import", "failed")
                raise
        else:
            try:
                model = pipelined_model.PipelinedModel(
                    model_id=request_config["model_id"],
                    model_version=request_config["model_version"])
                if model.exists():
                    archive_file_path = model.packaging_model()
                    operation_record(request_config, "export", "success")
                    return send_file(archive_file_path,
                                     attachment_filename=os.path.basename(
                                         archive_file_path),
                                     as_attachment=True)
                else:
                    operation_record(request_config, "export", "failed")
                    res = error_response(
                        response_code=210,
                        retmsg="Model {} {} is not exist.".format(
                            request_config.get("model_id"),
                            request_config.get("model_version")))
                    return res
            except Exception as e:
                operation_record(request_config, "export", "failed")
                stat_logger.exception(e)
                return error_response(response_code=210, retmsg=str(e))
    else:
        data = {}
        job_dsl, job_runtime_conf = gen_model_operation_job_config(
            request_config, model_operation)
        submit_result = DAGScheduler.submit(
            {
                'job_dsl': job_dsl,
                'job_runtime_conf': job_runtime_conf
            },
            job_id=job_id)
        data.update(submit_result)
        operation_record(data=job_runtime_conf,
                         oper_type=model_operation,
                         oper_status='')
        return get_json_result(job_id=job_id, data=data)
Ejemplo n.º 25
0
def migrate_model_process():
    request_config = request.json
    _job_id = job_utils.generate_job_id()
    initiator_party_id = request_config['migrate_initiator']['party_id']
    initiator_role = request_config['migrate_initiator']['role']
    if not request_config.get("unify_model_version"):
        request_config["unify_model_version"] = _job_id
    migrate_status = True
    migrate_status_info = {}
    migrate_status_msg = 'success'
    migrate_status_info['detail'] = {}

    require_arguments = [
        "migrate_initiator", "role", "migrate_role", "model_id",
        "model_version", "execute_party", "job_parameters"
    ]
    check_config(request_config, require_arguments)

    try:
        if compare_roles(request_config.get("migrate_role"),
                         request_config.get("role")):
            return get_json_result(
                retcode=100,
                retmsg=
                "The config of previous roles is the same with that of migrate roles. "
                "There is no need to migrate model. Migration process aborting."
            )
    except Exception as e:
        return get_json_result(retcode=100, retmsg=str(e))

    local_template = {"role": "", "party_id": "", "migrate_party_id": ""}

    res_dict = {}

    for role_name, role_partys in request_config.get("migrate_role").items():
        for offset, party_id in enumerate(role_partys):
            local_res = deepcopy(local_template)
            local_res["role"] = role_name
            local_res["party_id"] = request_config.get("role").get(
                role_name)[offset]
            local_res["migrate_party_id"] = party_id
            if not res_dict.get(role_name):
                res_dict[role_name] = {}
            res_dict[role_name][local_res["party_id"]] = local_res

    for role_name, role_partys in request_config.get("execute_party").items():
        migrate_status_info[role_name] = migrate_status_info.get(role_name, {})
        migrate_status_info['detail'][role_name] = {}
        for party_id in role_partys:
            request_config["local"] = res_dict.get(role_name).get(party_id)
            try:
                response = federated_api(
                    job_id=_job_id,
                    method='POST',
                    endpoint='/model/migrate/do',
                    src_party_id=initiator_party_id,
                    dest_party_id=party_id,
                    src_role=initiator_role,
                    json_body=request_config,
                    federated_mode=request_config['job_parameters']
                    ['federated_mode'])
                migrate_status_info[role_name][party_id] = response['retcode']
                detail = {party_id: {}}
                detail[party_id]['retcode'] = response['retcode']
                detail[party_id]['retmsg'] = response['retmsg']
                migrate_status_info['detail'][role_name].update(detail)
            except Exception as e:
                stat_logger.exception(e)
                migrate_status = False
                migrate_status_msg = 'failed'
                migrate_status_info[role_name][party_id] = 100
    return get_json_result(job_id=_job_id,
                           retcode=(0 if migrate_status else 101),
                           retmsg=migrate_status_msg,
                           data=migrate_status_info)
Ejemplo n.º 26
0
def call_fun(func, config_data, dsl_path, config_path):
    ip = server_conf.get(SERVERS).get(ROLE).get('host')
    if ip in ['localhost', '127.0.0.1']:
        ip = get_lan_ip()
    http_port = server_conf.get(SERVERS).get(ROLE).get('http.port')
    server_url = "http://{}:{}/{}".format(ip, http_port, API_VERSION)

    if func in JOB_OPERATE_FUNC:
        if func == 'submit_job':
            if not config_path:
                raise Exception('the following arguments are required: {}'.format('runtime conf path'))
            dsl_data = {}
            if dsl_path or config_data.get('job_parameters', {}).get('job_type', '') == 'predict':
                if dsl_path:
                    dsl_path = os.path.abspath(dsl_path)
                    with open(dsl_path, 'r') as f:
                        dsl_data = json.load(f)
            else:
                raise Exception('the following arguments are required: {}'.format('dsl path'))
            post_data = {'job_dsl': dsl_data,
                         'job_runtime_conf': config_data}
            response = requests.post("/".join([server_url, "job", func.rstrip('_job')]), json=post_data)
            try:
                if response.json()['retcode'] == 999:
                    start_cluster_standalone_job_server()
                    response = requests.post("/".join([server_url, "job", func.rstrip('_job')]), json=post_data)
            except:
                pass
        elif func == 'data_view_query' or func == 'clean_queue':
            response = requests.post("/".join([server_url, "job", func.replace('_', '/')]), json=config_data)
        else:
            if func != 'query_job':
                detect_utils.check_config(config=config_data, required_arguments=['job_id'])
            post_data = config_data
            response = requests.post("/".join([server_url, "job", func.rstrip('_job')]), json=post_data)
            if func == 'query_job':
                response = response.json()
                if response['retcode'] == 0:
                    for i in range(len(response['data'])):
                        del response['data'][i]['f_runtime_conf']
                        del response['data'][i]['f_dsl']
    elif func in JOB_FUNC:
        if func == 'job_config':
            detect_utils.check_config(config=config_data, required_arguments=['job_id', 'role', 'party_id', 'output_path'])
            response = requests.post("/".join([server_url, func.replace('_', '/')]), json=config_data)
            response_data = response.json()
            if response_data['retcode'] == 0:
                job_id = response_data['data']['job_id']
                download_directory = os.path.join(config_data['output_path'], 'job_{}_config'.format(job_id))
                os.makedirs(download_directory, exist_ok=True)
                for k, v in response_data['data'].items():
                    if k == 'job_id':
                        continue
                    with open('{}/{}.json'.format(download_directory, k), 'w') as fw:
                        json.dump(v, fw, indent=4)
                del response_data['data']['dsl']
                del response_data['data']['runtime_conf']
                response_data['directory'] = download_directory
                response_data['retmsg'] = 'download successfully, please check {} directory'.format(download_directory)
                response = response_data
        elif func == 'job_log':
            detect_utils.check_config(config=config_data, required_arguments=['job_id', 'output_path'])
            job_id = config_data['job_id']
            tar_file_name = 'job_{}_log.tar.gz'.format(job_id)
            extract_dir = os.path.join(config_data['output_path'], 'job_{}_log'.format(job_id))
            with closing(requests.get("/".join([server_url, func.replace('_', '/')]), json=config_data,
                                      stream=True)) as response:
                if response.status_code == 200:
                    download_from_request(http_response=response, tar_file_name=tar_file_name, extract_dir=extract_dir)
                    response = {'retcode': 0,
                                'directory': extract_dir,
                                'retmsg': 'download successfully, please check {} directory'.format(extract_dir)}
                else:
                    response = response.json()
    elif func in TASK_OPERATE_FUNC:
        response = requests.post("/".join([server_url, "job", "task", func.rstrip('_task')]), json=config_data)
    elif func in TRACKING_FUNC:
        if func != 'component_metric_delete':
            detect_utils.check_config(config=config_data,
                                      required_arguments=['job_id', 'component_name', 'role', 'party_id'])
        if func == 'component_output_data':
            detect_utils.check_config(config=config_data, required_arguments=['output_path'])
            tar_file_name = 'job_{}_{}_{}_{}_output_data.tar.gz'.format(config_data['job_id'],
                                                                        config_data['component_name'],
                                                                        config_data['role'],
                                                                        config_data['party_id'])
            extract_dir = os.path.join(config_data['output_path'], tar_file_name.replace('.tar.gz', ''))
            with closing(requests.get("/".join([server_url, "tracking", func.replace('_', '/'), 'download']),
                                      json=config_data,
                                      stream=True)) as response:
                if response.status_code == 200:
                    try:
                        download_from_request(http_response=response, tar_file_name=tar_file_name, extract_dir=extract_dir)
                        response = {'retcode': 0,
                                    'directory': extract_dir,
                                    'retmsg': 'download successfully, please check {} directory'.format(extract_dir)}
                    except:
                        response = {'retcode': 100,
                                    'retmsg': 'download failed, please check if the parameters are correct'}
                else:
                    response = response.json()

        else:
            response = requests.post("/".join([server_url, "tracking", func.replace('_', '/')]), json=config_data)
    elif func in DATA_FUNC:
        if func == 'upload' and config_data.get('use_local_data', 1) != 0:
            file_name = config_data.get('file')
            if not os.path.isabs(file_name):
                file_name = os.path.join(file_utils.get_project_base_directory(), file_name)
            if os.path.exists(file_name):
                with open(file_name, 'rb') as fp:
                    data = MultipartEncoder(
                        fields={'file': (os.path.basename(file_name), fp, 'application/octet-stream')}
                    )
                    tag = [0]

                    def read_callback(monitor):
                        if config_data.get('verbose') == 1:
                            sys.stdout.write("\r UPLOADING:{0}{1}".format("|" * (monitor.bytes_read * 100 // monitor.len), '%.2f%%' % (monitor.bytes_read * 100 // monitor.len)))
                            sys.stdout.flush()
                            if monitor.bytes_read /monitor.len == 1:
                                tag[0] += 1
                                if tag[0] == 2:
                                    sys.stdout.write('\n')
                    data = MultipartEncoderMonitor(data, read_callback)
                    response = requests.post("/".join([server_url, "data", func.replace('_', '/')]), data=data,
                                             params=config_data,
                                             headers={'Content-Type': data.content_type})
            else:
                raise Exception('The file is obtained from the fate flow client machine, but it does not exist, '
                                'please check the path: {}'.format(file_name))
        else:
            response = requests.post("/".join([server_url, "data", func.replace('_', '/')]), json=config_data)
        try:
            if response.json()['retcode'] == 999:
                start_cluster_standalone_job_server()
                response = requests.post("/".join([server_url, "data", func]), json=config_data)
        except:
            pass
    elif func in TABLE_FUNC:
        if func == "table_info":
            detect_utils.check_config(config=config_data, required_arguments=['namespace', 'table_name'])
            response = requests.post("/".join([server_url, "table", func]), json=config_data)
        else:
            response = requests.post("/".join([server_url, "table", func.lstrip('table_')]), json=config_data)
    elif func in MODEL_FUNC:
        if func == "import":
            file_path = config_data["file"]
            if not os.path.isabs(file_path):
                file_path = os.path.join(file_utils.get_project_base_directory(), file_path)
            if os.path.exists(file_path):
                files = {'file': open(file_path, 'rb')}
            else:
                raise Exception('The file is obtained from the fate flow client machine, but it does not exist, '
                                'please check the path: {}'.format(file_path))
            response = requests.post("/".join([server_url, "model", func]), data=config_data, files=files)
        elif func == "export":
            with closing(requests.get("/".join([server_url, "model", func]), json=config_data, stream=True)) as response:
                if response.status_code == 200:
                    archive_file_name = re.findall("filename=(.+)", response.headers["Content-Disposition"])[0]
                    os.makedirs(config_data["output_path"], exist_ok=True)
                    archive_file_path = os.path.join(config_data["output_path"], archive_file_name)
                    with open(archive_file_path, 'wb') as fw:
                        for chunk in response.iter_content(1024):
                            if chunk:
                                fw.write(chunk)
                    response = {'retcode': 0,
                                'file': archive_file_path,
                                'retmsg': 'download successfully, please check {}'.format(archive_file_path)}
                else:
                    response = response.json()
        else:
            response = requests.post("/".join([server_url, "model", func]), json=config_data)
    elif func in PERMISSION_FUNC:
        detect_utils.check_config(config=config_data, required_arguments=['src_party_id', 'src_role'])
        response = requests.post("/".join([server_url, "permission", func.replace('_', '/')]), json=config_data)
    return response.json() if isinstance(response, requests.models.Response) else response
Ejemplo n.º 27
0
def call_fun(func, config_data, dsl_path, config_path):
    ip = server_conf.get(SERVERS).get(ROLE).get('host')
    if ip in ['localhost', '127.0.0.1']:
        ip = get_lan_ip()
    http_port = server_conf.get(SERVERS).get(ROLE).get('http.port')
    server_url = "http://{}:{}/{}".format(ip, http_port, API_VERSION)

    if func in JOB_OPERATE_FUNC:
        if func == 'submit_job':
            if not config_path:
                raise Exception(
                    'the following arguments are required: {}'.format(
                        'runtime conf path'))
            dsl_data = {}
            if dsl_path or config_data.get('job_parameters', {}).get(
                    'job_type', '') == 'predict':
                if dsl_path:
                    dsl_path = os.path.abspath(dsl_path)
                    with open(dsl_path, 'r') as f:
                        dsl_data = json.load(f)
            else:
                raise Exception(
                    'the following arguments are required: {}'.format(
                        'dsl path'))
            post_data = {'job_dsl': dsl_data, 'job_runtime_conf': config_data}
            response = requests.post("/".join(
                [server_url, "job", func.rstrip('_job')]),
                                     json=post_data)
            try:
                if response.json()['retcode'] == 999:
                    start_cluster_standalone_job_server()
                    response = requests.post("/".join(
                        [server_url, "job",
                         func.rstrip('_job')]),
                                             json=post_data)
            except:
                pass
        elif func == 'data_view_query':
            response = requests.post("/".join(
                [server_url, "job", func.replace('_', '/')]),
                                     json=config_data)
        else:
            if func != 'query_job':
                detect_utils.check_config(config=config_data,
                                          required_arguments=['job_id'])
            post_data = config_data
            response = requests.post("/".join(
                [server_url, "job", func.rstrip('_job')]),
                                     json=post_data)
            if func == 'query_job':
                response = response.json()
                if response['retcode'] == 0:
                    for i in range(len(response['data'])):
                        del response['data'][i]['f_runtime_conf']
                        del response['data'][i]['f_dsl']
    elif func in JOB_FUNC:
        if func == 'job_config':
            detect_utils.check_config(config=config_data,
                                      required_arguments=[
                                          'job_id', 'role', 'party_id',
                                          'output_path'
                                      ])
            response = requests.post("/".join(
                [server_url, func.replace('_', '/')]),
                                     json=config_data)
            response_data = response.json()
            if response_data['retcode'] == 0:
                job_id = response_data['data']['job_id']
                download_directory = os.path.join(
                    config_data['output_path'], 'job_{}_config'.format(job_id))
                os.makedirs(download_directory, exist_ok=True)
                for k, v in response_data['data'].items():
                    if k == 'job_id':
                        continue
                    with open('{}/{}.json'.format(download_directory, k),
                              'w') as fw:
                        json.dump(v, fw, indent=4)
                del response_data['data']['dsl']
                del response_data['data']['runtime_conf']
                response_data['directory'] = download_directory
                response_data[
                    'retmsg'] = 'download successfully, please check {} directory'.format(
                        download_directory)
                response = response_data
        elif func == 'job_log':
            detect_utils.check_config(
                config=config_data,
                required_arguments=['job_id', 'output_path'])
            job_id = config_data['job_id']
            tar_file_name = 'job_{}_log.tar.gz'.format(job_id)
            extract_dir = os.path.join(config_data['output_path'],
                                       'job_{}_log'.format(job_id))
            with closing(
                    requests.get("/".join([server_url,
                                           func.replace('_', '/')]),
                                 json=config_data,
                                 stream=True)) as response:
                if response.status_code == 200:
                    download_from_request(http_response=response,
                                          tar_file_name=tar_file_name,
                                          extract_dir=extract_dir)
                    response = {
                        'retcode':
                        0,
                        'directory':
                        extract_dir,
                        'retmsg':
                        'download successfully, please check {} directory'.
                        format(extract_dir)
                    }
                else:
                    response = response.json()
    elif func in TASK_OPERATE_FUNC:
        response = requests.post("/".join(
            [server_url, "job", "task",
             func.rstrip('_task')]),
                                 json=config_data)
    elif func in TRACKING_FUNC:
        if func != 'component_metric_delete':
            detect_utils.check_config(config=config_data,
                                      required_arguments=[
                                          'job_id', 'component_name', 'role',
                                          'party_id'
                                      ])
        if func == 'component_output_data':
            detect_utils.check_config(config=config_data,
                                      required_arguments=['output_path'])
            tar_file_name = 'job_{}_{}_{}_{}_output_data.tar.gz'.format(
                config_data['job_id'], config_data['component_name'],
                config_data['role'], config_data['party_id'])
            extract_dir = os.path.join(config_data['output_path'],
                                       tar_file_name.replace('.tar.gz', ''))
            with closing(
                    requests.get("/".join([
                        server_url, "tracking",
                        func.replace('_', '/'), 'download'
                    ]),
                                 json=config_data,
                                 stream=True)) as response:
                if response.status_code == 200:
                    download_from_request(http_response=response,
                                          tar_file_name=tar_file_name,
                                          extract_dir=extract_dir)
                    response = {
                        'retcode':
                        0,
                        'directory':
                        extract_dir,
                        'retmsg':
                        'download successfully, please check {} directory'.
                        format(extract_dir)
                    }
                else:
                    response = response.json()

        else:
            response = requests.post("/".join(
                [server_url, "tracking",
                 func.replace('_', '/')]),
                                     json=config_data)
    elif func in DATA_FUNC:
        response = requests.post("/".join(
            [server_url, "data", func.replace('_', '/')]),
                                 json=config_data)
        try:
            if response.json()['retcode'] == 999:
                start_cluster_standalone_job_server()
                response = requests.post("/".join([server_url, "data", func]),
                                         json=config_data)
        except:
            pass
    elif func in TABLE_FUNC:
        if func == "table_info":
            detect_utils.check_config(
                config=config_data,
                required_arguments=['namespace', 'table_name'])
            response = requests.post("/".join([server_url, "table", func]),
                                     json=config_data)
        else:
            response = requests.post("/".join(
                [server_url, "table",
                 func.lstrip('table_')]),
                                     json=config_data)
    elif func in MODEL_FUNC:
        if func == "version":
            detect_utils.check_config(config=config_data,
                                      required_arguments=['namespace'])
        response = requests.post("/".join([server_url, "model", func]),
                                 json=config_data)
    elif func in PERMISSION_FUNC:
        detect_utils.check_config(
            config=config_data,
            required_arguments=['src_party_id', 'src_role'])
        response = requests.post("/".join(
            [server_url, "permission",
             func.replace('_', '/')]),
                                 json=config_data)
    return response.json() if isinstance(
        response, requests.models.Response) else response
Ejemplo n.º 28
0
def call_fun(func, config_data, dsl_path, config_path):
    ip = server_conf.get(SERVERS).get(ROLE).get('host')
    http_port = server_conf.get(SERVERS).get(ROLE).get('http.port')
    local_url = "http://{}:{}/{}".format(ip, http_port, API_VERSION)

    if func in JOB_OPERATE_FUNC:
        if func == 'submit_job':
            if not config_path:
                raise Exception(
                    'the following arguments are required: {}'.format(
                        'runtime conf path'))
            dsl_data = {}
            if dsl_path or config_data.get('job_parameters', {}).get(
                    'job_type', '') == 'predict':
                if dsl_path:
                    dsl_path = os.path.abspath(dsl_path)
                    with open(dsl_path, 'r') as f:
                        dsl_data = json.load(f)
            else:
                raise Exception(
                    'the following arguments are required: {}'.format(
                        'dsl path'))
            post_data = {'job_dsl': dsl_data, 'job_runtime_conf': config_data}
        else:
            if func != 'query_job':
                detect_utils.check_config(config=config_data,
                                          required_arguments=['job_id'])
            post_data = config_data
        response = requests.post("/".join(
            [local_url, "job", func.rstrip('_job')]),
                                 json=post_data)
        if func == 'query_job':
            response = response.json()
            if response['retcode'] == 0:
                for i in range(len(response['data'])):
                    del response['data'][i]['f_runtime_conf']
                    del response['data'][i]['f_dsl']
    elif func in JOB_FUNC:
        if func == 'job_config':
            detect_utils.check_config(config=config_data,
                                      required_arguments=[
                                          'job_id', 'role', 'party_id',
                                          'output_path'
                                      ])
            response = requests.post("/".join(
                [local_url, func.replace('_', '/')]),
                                     json=config_data)
            response_data = response.json()
            if response_data['retcode'] == 0:
                job_id = response_data['data']['job_id']
                download_directory = os.path.join(
                    config_data['output_path'], 'job_{}_config'.format(job_id))
                os.makedirs(download_directory, exist_ok=True)
                for k, v in response_data['data'].items():
                    if k == 'job_id':
                        continue
                    with open('{}/{}.json'.format(download_directory, k),
                              'w') as fw:
                        json.dump(v, fw, indent=4)
                del response_data['data']['dsl']
                del response_data['data']['runtime_conf']
                response_data['directory'] = download_directory
                response_data[
                    'retmsg'] = 'download successfully, please check {} directory'.format(
                        download_directory)
                response = response_data
        elif func == 'job_log':
            detect_utils.check_config(
                config=config_data,
                required_arguments=['job_id', 'output_path'])
            with closing(
                    requests.get("/".join([local_url,
                                           func.replace('_', '/')]),
                                 json=config_data,
                                 stream=True)) as response:
                job_id = config_data['job_id']
                tar_file_name = 'job_{}_log.tar.gz'.format(job_id)
                with open(tar_file_name, 'wb') as fw:
                    for chunk in response.iter_content(1024):
                        if chunk:
                            fw.write(chunk)
                extract_dir = os.path.join(config_data['output_path'],
                                           'job_{}_log'.format(job_id))
                tar = tarfile.open(tar_file_name, "r:gz")
                file_names = tar.getnames()
                for file_name in file_names:
                    tar.extract(file_name, extract_dir)
                tar.close()
                os.remove(tar_file_name)
            response = {
                'retcode':
                0,
                'directory':
                extract_dir,
                'retmsg':
                'download successfully, please check {} directory'.format(
                    extract_dir)
            }
    elif func in TASK_OPERATE_FUNC:
        response = requests.post("/".join(
            [local_url, "job", "task",
             func.rstrip('_task')]),
                                 json=config_data)
    elif func in TRACKING_FUNC:
        detect_utils.check_config(config=config_data,
                                  required_arguments=[
                                      'job_id', 'component_name', 'role',
                                      'party_id'
                                  ])
        if func == 'component_output_data':
            detect_utils.check_config(config=config_data,
                                      required_arguments=['output_path'])
            tar_file_name = 'job_{}_{}_{}_{}_output_data.tar.gz'.format(
                config_data['job_id'], config_data['component_name'],
                config_data['role'], config_data['party_id'])
            extract_dir = os.path.join(config_data['output_path'],
                                       tar_file_name.replace('.tar.gz', ''))
            with closing(
                    requests.get("/".join([
                        local_url, "tracking",
                        func.replace('_', '/'), 'download'
                    ]),
                                 json=config_data,
                                 stream=True)) as res:
                if res.status_code == 200:
                    with open(tar_file_name, 'wb') as fw:
                        for chunk in res.iter_content(1024):
                            if chunk:
                                fw.write(chunk)
                    tar = tarfile.open(tar_file_name, "r:gz")
                    file_names = tar.getnames()
                    for file_name in file_names:
                        tar.extract(file_name, extract_dir)
                    tar.close()
                    os.remove(tar_file_name)
                    response = {
                        'retcode':
                        0,
                        'directory':
                        extract_dir,
                        'retmsg':
                        'download successfully, please check {} directory'.
                        format(extract_dir)
                    }
                else:
                    response = res.json()

        else:
            response = requests.post("/".join(
                [local_url, "tracking",
                 func.replace('_', '/')]),
                                     json=config_data)
    elif func in DATA_FUNC:
        response = requests.post("/".join([local_url, "data", func]),
                                 json=config_data)
    elif func in TABLE_FUNC:
        detect_utils.check_config(
            config=config_data, required_arguments=['namespace', 'table_name'])
        response = requests.post("/".join([local_url, "table", func]),
                                 json=config_data)
    elif func in MODEL_FUNC:
        if func == "version":
            detect_utils.check_config(config=config_data,
                                      required_arguments=['namespace'])
        response = requests.post("/".join([local_url, "model", func]),
                                 json=config_data)
    return response.json() if isinstance(
        response, requests.models.Response) else response
Ejemplo n.º 29
0
def download_upload(access_module):
    job_id = generate_job_id()
    if access_module == "upload" and USE_LOCAL_DATA and not (
            request.json and request.json.get("use_local_data") == 0):
        file = request.files['file']
        filename = os.path.join(get_job_directory(job_id), 'fate_upload_tmp',
                                file.filename)
        os.makedirs(os.path.dirname(filename), exist_ok=True)
        try:
            file.save(filename)
        except Exception as e:
            shutil.rmtree(os.path.join(get_job_directory(job_id), 'tmp'))
            raise e
        request_config = request.args.to_dict()
        request_config['file'] = filename
    else:
        request_config = request.json
    required_arguments = ['work_mode', 'namespace', 'table_name']
    if access_module == 'upload':
        required_arguments.extend(['file', 'head', 'partition'])
    elif access_module == 'download':
        required_arguments.extend(['output_path'])
    elif access_module == 'download_test':
        required_arguments.extend(['output_path'])
    else:
        raise Exception(
            'can not support this operating: {}'.format(access_module))
    detect_utils.check_config(request_config,
                              required_arguments=required_arguments)
    data = {}
    if access_module == "upload":
        data['table_name'] = request_config["table_name"]
        data['namespace'] = request_config["namespace"]
        if WORK_MODE != 0:
            data_table = session.get_data_table(
                name=request_config["table_name"],
                namespace=request_config["namespace"])
            count = data_table.count()
            if count and int(request_config.get('drop', 2)) == 2:
                return get_json_result(
                    retcode=100,
                    retmsg='The data table already exists, table data count:{}.'
                    'If you still want to continue uploading, please add the parameter -drop. '
                    '0 means not to delete and continue uploading, '
                    '1 means to upload again after deleting the table'.format(
                        count))
            elif count and int(request_config.get('drop', 2)) == 1:
                data_table.destroy()
    job_dsl, job_runtime_conf = gen_data_access_job_config(
        request_config, access_module)
    job_id, job_dsl_path, job_runtime_conf_path, logs_directory, model_info, board_url = JobController.submit_job(
        {
            'job_dsl': job_dsl,
            'job_runtime_conf': job_runtime_conf
        },
        job_id=job_id)
    data.update({
        'job_dsl_path': job_dsl_path,
        'job_runtime_conf_path': job_runtime_conf_path,
        'board_url': board_url,
        'logs_directory': logs_directory
    })
    return get_json_result(job_id=job_id, data=data)
Ejemplo n.º 30
0
def download_upload(data_func):
    request_config = request.json
    _job_id = generate_job_id()
    stat_logger.info('generated job_id {}, body {}'.format(
        _job_id, request_config))
    _job_dir = get_job_directory(_job_id)
    os.makedirs(_job_dir, exist_ok=True)
    module = data_func
    required_arguments = ['work_mode', 'namespace', 'table_name']
    if module == 'upload':
        required_arguments.extend(['file', 'head', 'partition'])
    elif module == 'download':
        required_arguments.extend(['output_path'])
    else:
        raise Exception('can not support this operating: {}'.format(module))
    detect_utils.check_config(request_config,
                              required_arguments=required_arguments)
    job_work_mode = request_config['work_mode']
    # todo: The current code here is redundant with job_app/submit_job, the next version of this function will be implemented by job_app/submit_job
    if job_work_mode != RuntimeConfig.WORK_MODE:
        if RuntimeConfig.WORK_MODE == WorkMode.CLUSTER and job_work_mode == WorkMode.STANDALONE:
            # use cluster standalone job server to execute standalone job
            return request_execute_server(
                request=request,
                execute_host='{}:{}'.format(
                    request.remote_addr, CLUSTER_STANDALONE_JOB_SERVER_PORT))
        else:
            raise Exception(
                'server run on standalone can not support cluster mode job')

    if module == "upload":
        if not os.path.isabs(request_config['file']):
            request_config["file"] = os.path.join(
                file_utils.get_project_base_directory(),
                request_config["file"])
    try:
        conf_file_path = new_runtime_conf(
            job_dir=_job_dir,
            method=data_func,
            module=module,
            role=request_config.get('local', {}).get("role"),
            party_id=request_config.get('local', {}).get("party_id", ''))
        file_utils.dump_json_conf(request_config, conf_file_path)
        progs = [
            "python3",
            os.path.join(file_utils.get_project_base_directory(),
                         JOB_MODULE_CONF[module]["module_path"]), "-j",
            _job_id, "-c", conf_file_path
        ]
        try:
            p = run_subprocess(config_dir=_job_dir, process_cmd=progs)
        except Exception as e:
            stat_logger.exception(e)
            p = None
        return get_json_result(retcode=(0 if p else 101),
                               job_id=_job_id,
                               data={
                                   'table_name': request_config['table_name'],
                                   'namespace': request_config['namespace'],
                                   'pid': p.pid if p else ''
                               })
    except Exception as e:
        stat_logger.exception(e)
        return get_json_result(retcode=-104, retmsg="failed", job_id=_job_id)