def get_flow_info(): from fate_flow import set_env from fate_arch.common.conf_utils import get_base_config FATE_FLOW_SERVICE_NAME = "fateflow" HOST = get_base_config(FATE_FLOW_SERVICE_NAME, {}).get("host", "127.0.0.1") HTTP_PORT = get_base_config(FATE_FLOW_SERVICE_NAME, {}).get("http_port") return HOST, HTTP_PORT
def get_federated_proxy_address(src_party_id, dest_party_id): proxy_config = get_base_config("fateflow", {}).get("proxy", None) protocol_config = get_base_config("fateflow", {}).get("protocol", "default") if isinstance(proxy_config, str): if proxy_config == CoordinationProxyService.ROLLSITE: proxy_address = get_base_config("fate_on_eggroll", {}).get(proxy_config) return proxy_address["host"], proxy_address.get("grpc_port", proxy_address["port"]), CoordinationCommunicationProtocol.GRPC elif proxy_config == CoordinationProxyService.NGINX: proxy_address = get_base_config("fate_on_spark", {}).get(proxy_config) protocol = CoordinationCommunicationProtocol.HTTP if protocol_config == "default" else protocol_config return proxy_address["host"], proxy_address[f"{protocol}_port"], protocol else: raise RuntimeError(f"can not support coordinate proxy {proxy_config}") elif isinstance(proxy_config, dict): proxy_address = proxy_config protocol = CoordinationCommunicationProtocol.HTTP if protocol_config == "default" else protocol_config proxy_name = proxy_config.get("name", CoordinationProxyService.FATEFLOW) if proxy_name == CoordinationProxyService.FATEFLOW and str(dest_party_id) == str(src_party_id): host = RuntimeConfig.JOB_SERVER_HOST port = RuntimeConfig.HTTP_PORT else: host = proxy_address["host"] port = proxy_address[f"{protocol}_port"] return host, port, protocol else: raise RuntimeError(f"can not support coordinate proxy config {proxy_config}")
def get_command_federation_channel(): engine = "PROXY" if get_base_config("independent_scheduling_proxy", False) else "EGGROLL" address = conf_utils.get_base_config(engine).get("address") channel = grpc.insecure_channel('{}:{}'.format(address.get("host"), address.get("port"))) stub = proxy_pb2_grpc.DataTransferServiceStub(channel) return engine, channel, stub
def forward_api(role, request_config): endpoint = request_config.get('header', {}).get('endpoint') ip = get_base_config(role, {}).get("host", "127.0.0.1") port = get_base_config(role, {}).get("port") url = "http://{}:{}{}".format(ip, port, endpoint) method = request_config.get('header', {}).get('method', 'post') audit_logger().info('api request: {}'.format(url)) action = getattr(requests, method.lower(), None) http_response = action(url=url, json=request_config.get('body'), headers=HEADERS) response = http_response.json() audit_logger().info(response) return response
def set_fate_server_info(): # manager federated_id = request.json.get("federatedId") manager_conf = conf_utils.get_base_config("fatemanager", {}) manager_conf["federatedId"] = federated_id conf_utils.update_config("fatemanager", manager_conf) return get_json_result(data={"federatedId": federated_id})
def get_command_federation_channel(): proxy_config = get_base_config("fateflow", {}).get("proxy", None) if isinstance(proxy_config, str): if proxy_config == CoordinateProxyService.rollsite: address = get_base_config("fate_on_eggroll", {}).get(proxy_config) elif proxy_config == CoordinateProxyService.nginx: address = get_base_config("fate_on_spark", {}).get(proxy_config) else: raise RuntimeError(f"can not support coordinate proxy {proxy_config}") elif isinstance(proxy_config, dict): address = proxy_config else: raise RuntimeError(f"can not support coordinate proxy config {proxy_config}") channel = grpc.insecure_channel('{}:{}'.format(address.get("host"), address.get("port"))) stub = proxy_pb2_grpc.DataTransferServiceStub(channel) return channel, stub
def load_model(config_data): stat_logger.info(config_data) if not config_data.get('servings'): return 100, 'Please configure servings address' for serving in config_data.get('servings'): with grpc.insecure_channel(serving) as channel: stub = model_service_pb2_grpc.ModelServiceStub(channel) load_model_request = model_service_pb2.PublishRequest() for role_name, role_partys in config_data.get("role").items(): for _party_id in role_partys: load_model_request.role[role_name].partyId.append(_party_id) for role_name, role_model_config in config_data.get("model").items(): for _party_id, role_party_model_config in role_model_config.items(): load_model_request.model[role_name].roleModelInfo[_party_id].tableName = role_party_model_config[ 'model_version'] load_model_request.model[role_name].roleModelInfo[_party_id].namespace = role_party_model_config[ 'model_id'] stat_logger.info('request serving: {} load model'.format(serving)) load_model_request.local.role = config_data.get('local').get('role') load_model_request.local.partyId = config_data.get('local').get('party_id') load_model_request.loadType = config_data['job_parameters'].get("load_type", "FATEFLOW") if not get_base_config('use_registry'): load_model_request.filePath = f"http://{IP}:{HTTP_PORT}{FATE_FLOW_MODEL_TRANSFER_ENDPOINT}" else: load_model_request.filePath = config_data['job_parameters'].get("file_path", "") stat_logger.info(load_model_request) response = stub.publishLoad(load_model_request) stat_logger.info( '{} {} load model status: {}'.format(load_model_request.local.role, load_model_request.local.partyId, response.statusCode)) if response.statusCode != 0: return response.statusCode, '{} {}'.format(response.message, response.error) return 0, 'success'
def initialize(cls): for backend_name, backend_engines in SUPPORT_BACKENDS_ENTRANCE.items(): for engine_type, engine_keys_list in backend_engines.items(): for engine_keys in engine_keys_list: engine_config = get_base_config(backend_name, {}).get( engine_keys[1], {}) if engine_config: cls.register_engine(engine_type=engine_type, engine_name=engine_keys[0], engine_entrance=engine_keys[1], engine_config=engine_config) # initialize standalone engine for backend_engines in SUPPORT_BACKENDS_ENTRANCE.values(): for engine_type in backend_engines.keys(): engine_name = "STANDALONE" engine_entrance = "fateflow" engine_config = { "nodes": 1, "cores_per_node": STANDALONE_BACKEND_VIRTUAL_CORES_PER_NODE, } cls.register_engine(engine_type=engine_type, engine_name=engine_name, engine_entrance=engine_entrance, engine_config=engine_config)
def do_load_model(): request_data = request.json request_data['servings'] = RuntimeConfig.SERVICE_DB.get_urls('servings') role = request_data['local']['role'] party_id = request_data['local']['party_id'] model_id = request_data['job_parameters']['model_id'] model_version = request_data['job_parameters']['model_version'] party_model_id = model_utils.gen_party_model_id(model_id, role, party_id) if get_base_config('enable_model_store', False): pipeline_model = pipelined_model.PipelinedModel( party_model_id, model_version) component_parameters = { 'model_id': party_model_id, 'model_version': model_version, 'store_address': ServiceRegistry.MODEL_STORE_ADDRESS, } model_storage = get_model_storage(component_parameters) if pipeline_model.exists() and not model_storage.exists( **component_parameters): stat_logger.info( f'Uploading {pipeline_model.model_path} to model storage.') model_storage.store(**component_parameters) elif not pipeline_model.exists() and model_storage.exists( **component_parameters): stat_logger.info( f'Downloading {pipeline_model.model_path} from model storage.') model_storage.restore(**component_parameters) if not model_utils.check_if_deployed(role, party_id, model_id, model_version): return get_json_result( retcode=100, retmsg= "Only deployed models could be used to execute process of loading. " "Please deploy model before loading.") retcode, retmsg = publish_model.load_model(request_data) try: if not retcode: with DB.connection_context(): model = MLModel.get_or_none( MLModel.f_role == request_data["local"]["role"], MLModel.f_party_id == request_data["local"]["party_id"], MLModel.f_model_id == request_data["job_parameters"] ["model_id"], MLModel.f_model_version == request_data["job_parameters"]["model_version"]) if model: model.f_loaded_times += 1 model.save() except Exception as modify_err: stat_logger.exception(modify_err) operation_record(request_data, "load", "success" if not retcode else "failed") return get_json_result(retcode=retcode, retmsg=retmsg)
def get_zk(cls, ): if not get_base_config('use_registry', False): return zk_config = get_base_config("zookeeper", {}) if zk_config.get("use_acl", False): default_acl = make_digest_acl(zk_config.get("user", ""), zk_config.get("password", ""), all=True) zk = KazooClient(hosts=zk_config.get("hosts", []), default_acl=[default_acl], auth_data=[("digest", "{}:{}".format( zk_config.get("user", ""), zk_config.get("password", "")))]) else: zk = KazooClient(hosts=zk_config.get("hosts", [])) return zk
def register(cls): if get_base_config("use_registry", False): zk = ServiceUtils.get_zk() zk.start() model_transfer_url = 'http://{}:{}{}'.format( IP, HTTP_PORT, FATE_FLOW_MODEL_TRANSFER_ENDPOINT) fate_flow_model_transfer_service = '{}/{}'.format( FATE_SERVICES_REGISTERED_PATH.get(FATEFLOW_SERVICE_NAME, ""), parse.quote(model_transfer_url, safe=' ')) try: zk.create(fate_flow_model_transfer_service, makepath=True, ephemeral=True) stat_logger.info("register path {} to {}".format( fate_flow_model_transfer_service, ";".join(get_base_config("zookeeper", {}).get("hosts")))) except Exception as e: stat_logger.exception(e)
def get_fateboard_info(): fateboard = get_base_config('fateboard', {}) host = fateboard.get('host') port = fateboard.get('port') if not host or not port: return error_response(404, 'fateboard is not configured') return get_json_result(data={ 'host': host, 'port': port, })
def setUp(self): self.party_info = file_utils.load_json_conf( os.path.abspath(os.path.join( './jobs', 'party_info.json'))) if WORK_MODE else None self.guest_party_id = self.party_info['guest'] if WORK_MODE else 9999 self.host_party_id = self.party_info['host'] if WORK_MODE else 10000 self.dsl_path = 'fate_flow/examples/test_hetero_lr_job_dsl.json' self.config_path = 'fate_flow/examples/test_hetero_lr_job_conf.json' ip = conf_utils.get_base_config(FATEFLOW_SERVICE_NAME).get("host") self.server_url = "http://{}:{}/{}".format(ip, HTTP_PORT, API_VERSION)
def save(cls, service_config): update_server = {} for service_name, service_info in service_config.items(): cls.parameter_verification(service_name, service_info) manager_conf = conf_utils.get_base_config(service_name, {}) if not manager_conf: manager_conf = service_info else: manager_conf.update(service_info) conf_utils.update_config(service_name, manager_conf) update_server[service_name] = manager_conf setattr(cls, service_name.upper(), manager_conf) return update_server
def register_service(cls, service_config): update_server = {} for service_name, service_info in service_config.items(): if service_name not in cls.registry_service_info.keys(): continue cls.parameter_verification(service_name, service_info) manager_conf = conf_utils.get_base_config(service_name, {}) if not manager_conf: manager_conf = service_info else: manager_conf.update(service_info) conf_utils.update_config(service_name, manager_conf) update_server[service_name] = manager_conf return update_server
def get_node_identity(json_body, src_party_id): params = { 'partyId': int(src_party_id), 'federatedId': conf_utils.get_base_config("fatemanager", {}).get("federatedId") } try: response = requests.post(url="http://{}:{}{}".format( ServiceUtils.get_item("fatemanager", "host"), ServiceUtils.get_item("fatemanager", "port"), FATE_MANAGER_GET_NODE_INFO_ENDPOINT), json=params) json_body['appKey'] = response.json().get('data').get('appKey') json_body['appSecret'] = response.json().get('data').get('appSecret') json_body['_src_role'] = response.json().get('data').get('role') except Exception as e: raise Exception('get appkey and secret failed: {}'.format(str(e)))
def get_from_registry(cls, service_name): if not get_base_config('use_registry', False): return try: zk = ServiceUtils.get_zk() zk.start() nodes = zk.get_children( FATE_SERVICES_REGISTERED_PATH.get(service_name, "")) services = nodes_unquote(nodes) zk.stop() return services except Exception as e: raise Exception( 'loading servings node failed from zookeeper: {}'.format(e))
def register(cls, zk, party_model_id=None, model_version=None): if not get_base_config('use_registry', False): return model_transfer_url = 'http://{}:{}{}'.format( IP, HTTP_PORT, FATE_FLOW_MODEL_TRANSFER_ENDPOINT) if party_model_id is not None and model_version is not None: model_transfer_url += '/{}/{}'.format( party_model_id.replace('#', '~'), model_version) fate_flow_model_transfer_service = '{}/{}'.format( FATE_SERVICES_REGISTERED_PATH.get(FATEFLOW_SERVICE_NAME, ""), parse.quote(model_transfer_url, safe=' ')) try: zk.create(fate_flow_model_transfer_service, makepath=True, ephemeral=True) stat_logger.info("register path {} to {}".format( fate_flow_model_transfer_service, ";".join(get_base_config("zookeeper", {}).get("hosts")))) except NodeExistsError: pass except Exception as e: stat_logger.exception(e)
def initialize(cls): for engine_type, engines_name in SUPPORT_ENGINES.items(): for engine_name in engines_name: engine_info = get_base_config(engine_name, {}) if engine_info: engine_info["engine"] = engine_name cls.register_engine(engine_type=engine_type, engine_info=engine_info) # initialize standalone engine for engine_type in SUPPORT_ENGINES.keys(): engine_name = "STANDALONE" engine_info = { "engine": engine_name, "nodes": 1, "cores_per_node": STANDALONE_BACKEND_VIRTUAL_CORES_PER_NODE, } cls.register_engine(engine_type=engine_type, engine_info=engine_info)
def nodes_check(src_party_id, src_role, appKey, appSecret, dst_party_id): if CHECK_NODES_IDENTITY: body = { 'srcPartyId': int(src_party_id), 'role': src_role, 'appKey': appKey, 'appSecret': appSecret, 'dstPartyId': int(dst_party_id), 'federatedId': conf_utils.get_base_config("fatemanager", {}).get("federatedId") } try: response = requests.post(url="http://{}:{}{}".format( ServiceUtils.get_item("fatemanager", "host"), ServiceUtils.get_item("fatemanager", "port"), FATE_MANAGER_NODE_CHECK_ENDPOINT), json=body).json() if response['code'] != 0: raise Exception(str(response['msg'])) except Exception as e: raise Exception('role {} party id {} authentication failed: {}'.format(src_role, src_party_id, str(e)))
def setUp(self): self.data_dir = os.path.join(file_utils.get_project_base_directory(), "examples", "data") self.upload_guest_config = { "file": os.path.join(self.data_dir, "breast_hetero_guest.csv"), "head": 1, "partition": 10, "work_mode": WORK_MODE, "namespace": "experiment", "table_name": "breast_hetero_guest", "use_local_data": 0, 'drop': 1, 'backend': 0, "id_delimiter": ',', } self.upload_host_config = { "file": os.path.join(self.data_dir, "breast_hetero_host.csv"), "head": 1, "partition": 10, "work_mode": WORK_MODE, "namespace": "experiment", "table_name": "breast_hetero_host", "use_local_data": 0, 'drop': 1, 'backend': 0, "id_delimiter": ',', } self.download_config = { "output_path": os.path.join(file_utils.get_project_base_directory(), "fate_flow/fate_flow_unittest_breast_b.csv"), "work_mode": WORK_MODE, "namespace": "experiment", "table_name": "breast_hetero_guest" } ip = conf_utils.get_base_config(FATEFLOW_SERVICE_NAME).get("host") self.server_url = "http://{}:{}/{}".format(ip, HTTP_PORT, API_VERSION)
def get_engines_config_from_conf(group_map=False): engines_config = {} engine_group_map = {} for engine_type in { EngineType.COMPUTING, EngineType.FEDERATION, EngineType.STORAGE }: engines_config[engine_type] = {} engine_group_map[engine_type] = {} for group_name, engine_map in Relationship.EngineConfMap.items(): for engine_type, name_maps in engine_map.items(): for name_map in name_maps: single_engine_config = conf_utils.get_base_config( group_name, {}).get(name_map[1], {}) if single_engine_config: engine_name = name_map[0] engines_config[engine_type][ engine_name] = single_engine_config engine_group_map[engine_type][engine_name] = group_name if not group_map: return engines_config else: return engines_config, engine_group_map
def get(cls, service_name, default=None): if get_base_config( "use_registry", False) and service_name in SERVICES_SUPPORT_REGISTRY: return ServiceUtils.get_from_registry(service_name) return ServiceUtils.get_from_file(service_name, default)
def get_engines(): engines = { EngineType.COMPUTING: None, EngineType.FEDERATION: None, EngineType.STORAGE: None, } # check service_conf.yaml if (conf_utils.get_base_config("default_engines", {}).get( EngineType.COMPUTING) is None): raise RuntimeError( f"must set default_engines on conf/service_conf.yaml") default_engines = conf_utils.get_base_config("default_engines") # computing engine if default_engines.get(EngineType.COMPUTING) is None: raise RuntimeError( f"{EngineType.COMPUTING} is None," f"Please check default_engines on conf/service_conf.yaml") engines[EngineType.COMPUTING] = default_engines[ EngineType.COMPUTING].upper() if engines[EngineType.COMPUTING] not in get_engine_class_members( ComputingEngine): raise RuntimeError(f"{engines[EngineType.COMPUTING]} is illegal") # federation engine if default_engines.get(EngineType.FEDERATION) is not None: engines[EngineType.FEDERATION] = default_engines[ EngineType.FEDERATION].upper() # storage engine if default_engines.get(EngineType.STORAGE) is not None: engines[EngineType.STORAGE] = default_engines[ EngineType.STORAGE].upper() # set default storage engine and federation engine by computing engine for t in (EngineType.STORAGE, EngineType.FEDERATION): if engines.get(t) is None: # use default relation engine engines[t] = Relationship.Computing[engines[ EngineType.COMPUTING]][t]["default"] # set default federated mode by federation engine if engines[EngineType.FEDERATION] == FederationEngine.STANDALONE: engines["federated_mode"] = FederatedMode.SINGLE else: engines["federated_mode"] = FederatedMode.MULTIPLE if engines[EngineType.STORAGE] not in get_engine_class_members( StorageEngine): raise RuntimeError(f"{engines[EngineType.STORAGE]} is illegal") if engines[EngineType.FEDERATION] not in get_engine_class_members( FederationEngine): raise RuntimeError(f"{engines[EngineType.FEDERATION]} is illegal") for t in [EngineType.FEDERATION]: if engines[t] not in Relationship.Computing[engines[ EngineType.COMPUTING]][t]["support"]: raise RuntimeError( f"{engines[t]} is not supported in {engines[EngineType.COMPUTING]}" ) return engines
import json import os import tarfile import time from contextlib import closing from datetime import datetime import requests from fate_arch.common import conf_utils from fate_flow.entity.types import StatusSet config_path = 'config/settings.json' ip = conf_utils.get_base_config("fateflow").get("host") http_port = conf_utils.get_base_config("fateflow").get("http_port") API_VERSION = "v1" server_url = "http://{}:{}/{}".format(ip, http_port, API_VERSION) component_name = "hetero_secure_boost_0" metric_output_path = './output/metric/{}_hetero_secure_boost_0.json' model_output_path = './output/model/{}_hetero_secure_boost_0.json' data_output_path = './output/data/job_{}_{}_{}_{}_output_data' # job_id, cpn, role, party_id def get_dict_from_file(file_name): with open(file_name, 'r', encoding='utf-8') as f: json_info = json.load(f) return json_info class Base(object):
# limitations under the License. # import base64 import datetime import io import json import os import pickle import socket import time import uuid from enum import Enum, IntEnum from fate_arch.common.conf_utils import get_base_config use_deserialize_safe_module = get_base_config('use_deserialize_safe_module', False) class CustomJSONEncoder(json.JSONEncoder): def __init__(self, **kwargs): super(CustomJSONEncoder, self).__init__(**kwargs) def default(self, obj): if isinstance(obj, datetime.datetime): return obj.strftime('%Y-%m-%d %H:%M:%S') elif isinstance(obj, datetime.date): return obj.strftime('%Y-%m-%d') elif isinstance(obj, datetime.timedelta): return str(obj) elif issubclass(type(obj), Enum) or issubclass(type(obj), IntEnum): return obj.value
def call_fun(func, config_data, dsl_path, config_path): ip = conf_utils.get_base_config(FATEFLOW_SERVICE_NAME).get("host") http_port = conf_utils.get_base_config(FATEFLOW_SERVICE_NAME).get( "http_port") server_url = "http://{}:{}/{}".format(ip, http_port, API_VERSION) if func in JOB_OPERATE_FUNC: if func == 'submit_job': if not config_path: raise Exception( 'the following arguments are required: {}'.format( 'runtime conf path')) dsl_data = {} if dsl_path or config_data.get('job_parameters', {}).get( 'job_type', '') == 'predict': if dsl_path: dsl_path = os.path.abspath(dsl_path) with open(dsl_path, 'r') as f: dsl_data = json.load(f) else: raise Exception( 'the following arguments are required: {}'.format( 'dsl path')) post_data = {'job_dsl': dsl_data, 'job_runtime_conf': config_data} response = requests.post("/".join( [server_url, "job", func.rstrip('_job')]), json=post_data) try: if response.json()['retcode'] == 999: start_cluster_standalone_job_server() response = requests.post("/".join( [server_url, "job", func.rstrip('_job')]), json=post_data) except: pass elif func == 'data_view_query' or func == 'clean_queue': response = requests.post("/".join( [server_url, "job", func.replace('_', '/')]), json=config_data) else: if func != 'query_job': detect_utils.check_config(config=config_data, required_arguments=['job_id']) post_data = config_data response = requests.post("/".join( [server_url, "job", func.rstrip('_job')]), json=post_data) if func == 'query_job': response = response.json() if response['retcode'] == 0: for i in range(len(response['data'])): del response['data'][i]['f_runtime_conf'] del response['data'][i]['f_dsl'] elif func in JOB_FUNC: if func == 'job_config': detect_utils.check_config(config=config_data, required_arguments=[ 'job_id', 'role', 'party_id', 'output_path' ]) response = requests.post("/".join( [server_url, func.replace('_', '/')]), json=config_data) response_data = response.json() if response_data['retcode'] == 0: job_id = response_data['data']['job_id'] download_directory = os.path.join( config_data['output_path'], 'job_{}_config'.format(job_id)) os.makedirs(download_directory, exist_ok=True) for k, v in response_data['data'].items(): if k == 'job_id': continue with open('{}/{}.json'.format(download_directory, k), 'w') as fw: json.dump(v, fw, indent=4) del response_data['data']['dsl'] del response_data['data']['runtime_conf'] response_data['directory'] = download_directory response_data[ 'retmsg'] = 'download successfully, please check {} directory'.format( download_directory) response = response_data elif func == 'job_log': detect_utils.check_config( config=config_data, required_arguments=['job_id', 'output_path']) job_id = config_data['job_id'] tar_file_name = 'job_{}_log.tar.gz'.format(job_id) extract_dir = os.path.join(config_data['output_path'], 'job_{}_log'.format(job_id)) with closing( requests.get("/".join([server_url, func.replace('_', '/')]), json=config_data, stream=True)) as response: if response.status_code == 200: download_from_request(http_response=response, tar_file_name=tar_file_name, extract_dir=extract_dir) response = { 'retcode': 0, 'directory': extract_dir, 'retmsg': 'download successfully, please check {} directory'. format(extract_dir) } else: response = response.json() elif func in TASK_OPERATE_FUNC: response = requests.post("/".join( [server_url, "job", "task", func.rstrip('_task')]), json=config_data) elif func in TRACKING_FUNC: if func != 'component_metric_delete': detect_utils.check_config(config=config_data, required_arguments=[ 'job_id', 'component_name', 'role', 'party_id' ]) if func == 'component_output_data': detect_utils.check_config(config=config_data, required_arguments=['output_path']) tar_file_name = 'job_{}_{}_{}_{}_output_data.tar.gz'.format( config_data['job_id'], config_data['component_name'], config_data['role'], config_data['party_id']) extract_dir = os.path.join(config_data['output_path'], tar_file_name.replace('.tar.gz', '')) with closing( requests.get("/".join([ server_url, "tracking", func.replace('_', '/'), 'download' ]), json=config_data, stream=True)) as response: if response.status_code == 200: try: download_from_request(http_response=response, tar_file_name=tar_file_name, extract_dir=extract_dir) response = { 'retcode': 0, 'directory': extract_dir, 'retmsg': 'download successfully, please check {} directory'. format(extract_dir) } except: response = { 'retcode': 100, 'retmsg': 'download failed, please check if the parameters are correct' } else: response = response.json() else: response = requests.post("/".join( [server_url, "tracking", func.replace('_', '/')]), json=config_data) elif func in DATA_FUNC: if func == 'upload' and config_data.get('use_local_data', 1) != 0: file_name = config_data.get('file') if not os.path.isabs(file_name): file_name = os.path.join( file_utils.get_project_base_directory(), file_name) if os.path.exists(file_name): with open(file_name, 'rb') as fp: data = MultipartEncoder( fields={ 'file': (os.path.basename(file_name), fp, 'application/octet-stream') }) tag = [0] def read_callback(monitor): if config_data.get('verbose') == 1: sys.stdout.write("\r UPLOADING:{0}{1}".format( "|" * (monitor.bytes_read * 100 // monitor.len), '%.2f%%' % (monitor.bytes_read * 100 // monitor.len))) sys.stdout.flush() if monitor.bytes_read / monitor.len == 1: tag[0] += 1 if tag[0] == 2: sys.stdout.write('\n') data = MultipartEncoderMonitor(data, read_callback) response = requests.post( "/".join([server_url, "data", func.replace('_', '/')]), data=data, params=json.dumps(config_data), headers={'Content-Type': data.content_type}) else: raise Exception( 'The file is obtained from the fate flow client machine, but it does not exist, ' 'please check the path: {}'.format(file_name)) else: response = requests.post("/".join( [server_url, "data", func.replace('_', '/')]), json=config_data) try: if response.json()['retcode'] == 999: start_cluster_standalone_job_server() response = requests.post("/".join([server_url, "data", func]), json=config_data) except: pass elif func in TABLE_FUNC: if func == "table_info": detect_utils.check_config( config=config_data, required_arguments=['namespace', 'table_name']) response = requests.post("/".join([server_url, "table", func]), json=config_data) else: response = requests.post("/".join( [server_url, func.replace('_', '/')]), json=config_data) elif func in MODEL_FUNC: if func == "import": file_path = config_data["file"] if not os.path.isabs(file_path): file_path = os.path.join( file_utils.get_project_base_directory(), file_path) if os.path.exists(file_path): files = {'file': open(file_path, 'rb')} else: raise Exception( 'The file is obtained from the fate flow client machine, but it does not exist, ' 'please check the path: {}'.format(file_path)) response = requests.post("/".join([server_url, "model", func]), data=config_data, files=files) elif func == "export": with closing( requests.get("/".join([server_url, "model", func]), json=config_data, stream=True)) as response: if response.status_code == 200: archive_file_name = re.findall( "filename=(.+)", response.headers["Content-Disposition"])[0] os.makedirs(config_data["output_path"], exist_ok=True) archive_file_path = os.path.join( config_data["output_path"], archive_file_name) with open(archive_file_path, 'wb') as fw: for chunk in response.iter_content(1024): if chunk: fw.write(chunk) response = { 'retcode': 0, 'file': archive_file_path, 'retmsg': 'download successfully, please check {}'.format( archive_file_path) } else: response = response.json() else: response = requests.post("/".join([server_url, "model", func]), json=config_data) elif func in PERMISSION_FUNC: detect_utils.check_config( config=config_data, required_arguments=['src_party_id', 'src_role']) response = requests.post("/".join( [server_url, "permission", func.replace('_', '/')]), json=config_data) return response.json() if isinstance( response, requests.models.Response) else response
def register_models(cls, zk, models): if not get_base_config('use_registry', False): return for model in models: cls.register(zk, model.f_party_model_id, model.f_model_version)
# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import operator import typing from enum import IntEnum from peewee import Field, IntegerField, FloatField, BigIntegerField, TextField, Model, CompositeKey, Metadata from fate_arch.common import conf_utils, EngineType from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64, timestamp_to_date, date_string_to_timestamp, json_dumps, json_loads from fate_arch.federation import FederationEngine is_standalone = conf_utils.get_base_config("default_engines", {}).get( EngineType.FEDERATION).upper() == FederationEngine.STANDALONE if is_standalone: from playhouse.apsw_ext import DateTimeField else: from peewee import DateTimeField CONTINUOUS_FIELD_TYPE = {IntegerField, FloatField, DateTimeField} AUTO_DATE_TIMESTAMP_FIELD_PREFIX = {"create", "start", "end", "update", "read_access", "write_access"} class SerializedType(IntEnum): PICKLE = 1 JSON = 2 class LongTextField(TextField):
def get_from_file(cls, service_name, default=None): return conf_utils.get_base_config(service_name, default)