def main(config="../../config.yaml", namespace=""): if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] hosts = parties.host arbiter = parties.arbiter[0] backend = config.backend work_mode = config.work_mode guest_train_data = { "name": "breast_hetero_guest", "namespace": f"experiment{namespace}" } host_train_data = [{ "name": "breast_hetero_host", "namespace": f"experiment{namespace}" }, { "name": "breast_hetero_host", "namespace": f"experiment{namespace}" }] # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=hosts, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance( role='guest', party_id=guest).algorithm_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance( role='host', party_id=hosts[0]).algorithm_param(table=host_train_data[0]) reader_0.get_party_instance( role='host', party_id=hosts[1]).algorithm_param(table=host_train_data[1]) # define DataIO components dataio_0 = DataIO(name="dataio_0") # start component numbering at 0 # get DataIO party instance of guest dataio_0_guest_party_instance = dataio_0.get_party_instance(role='guest', party_id=guest) # configure DataIO for guest dataio_0_guest_party_instance.algorithm_param(with_label=True, output_format="dense") # get and configure DataIO party instance of host dataio_0.get_party_instance( role='host', party_id=hosts[0]).algorithm_param(with_label=False) dataio_0.get_party_instance( role='host', party_id=hosts[1]).algorithm_param(with_label=False) # define Intersection components intersection_0 = Intersection(name="intersection_0") param = {"k": 3, "max_iter": 10} hetero_kmeans_0 = HeteroKmeans(name='hetero_kmeans_0', **param) evaluation_0 = Evaluation(name='evaluation_0', eval_type='clustering') # add components to pipeline, in order of task execution pipeline.add_component(reader_0) pipeline.add_component(dataio_0, data=Data(data=reader_0.output.data)) # set data input sources of intersection components pipeline.add_component(intersection_0, data=Data(data=dataio_0.output.data)) pipeline.add_component(hetero_kmeans_0, data=Data(train_data=intersection_0.output.data)) # print(f"data: {hetero_kmeans_0.output.data.data[0]}") pipeline.add_component(evaluation_0, data=Data(data=hetero_kmeans_0.output.data.data[0])) # compile pipeline once finished adding modules, this step will form conf and dsl files for running job pipeline.compile() # fit model pipeline.fit(backend=backend, work_mode=work_mode) # query component summary print(pipeline.get_component("hetero_kmeans_0").get_summary())
def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] backend = config.backend work_mode = config.work_mode guest_train_data = { "name": "breast_hetero_guest", "namespace": f"experiment{namespace}" } host_train_data = { "name": "breast_hetero_host", "namespace": f"experiment{namespace}" } # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") # configure Reader for guest reader_0.get_party_instance( role='guest', party_id=guest).component_param(table=guest_train_data) # configure Reader for host reader_0.get_party_instance( role='host', party_id=host).component_param(table=host_train_data) # define DataIO components dataio_0 = DataIO(name="dataio_0") # start component numbering at 0 # get DataIO party instance of guest dataio_0_guest_party_instance = dataio_0.get_party_instance(role='guest', party_id=guest) # configure DataIO for guest dataio_0_guest_party_instance.component_param(with_label=True, output_format="dense") # get and configure DataIO party instance of host dataio_0.get_party_instance( role='host', party_id=host).component_param(with_label=False) # define Intersection components intersection_0 = Intersection(name="intersection_0") param = { "name": 'hetero_feature_binning_0', "method": 'optimal', "optimal_binning_param": { "metric_method": "iv" }, "bin_indexes": -1 } hetero_feature_binning_0 = HeteroFeatureBinning(**param) param = { "name": 'hetero_feature_selection_0', "filter_methods": ["manually", "iv_filter"], "manually_param": { "filter_out_indexes": [1] }, "iv_param": { "metrics": ["iv", "iv"], "filter_type": ["top_k", "threshold"], "take_high": [True, True], "threshold": [10, 0.001] }, "select_col_indexes": -1 } hetero_feature_selection_0 = HeteroFeatureSelection(**param) param = {"k": 3, "max_iter": 10} hetero_kmeans_0 = HeteroKmeans(name='hetero_kmeans_0', **param) evaluation_0 = Evaluation(name='evaluation_0', eval_type='clustering') # add components to pipeline, in order of task execution pipeline.add_component(reader_0) pipeline.add_component(dataio_0, data=Data(data=reader_0.output.data)) # set data input sources of intersection components pipeline.add_component(intersection_0, data=Data(data=dataio_0.output.data)) # set train & validate data of hetero_lr_0 component pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data)) pipeline.add_component( hetero_feature_selection_0, data=Data(data=intersection_0.output.data), model=Model(isometric_model=hetero_feature_binning_0.output.model)) pipeline.add_component( hetero_kmeans_0, data=Data(train_data=hetero_feature_selection_0.output.data)) print(f"data: {hetero_kmeans_0.output.data.data[0]}") pipeline.add_component(evaluation_0, data=Data(data=hetero_kmeans_0.output.data.data[0])) # compile pipeline once finished adding modules, this step will form conf and dsl files for running job pipeline.compile() # fit model job_parameters = JobParameters(backend=backend, work_mode=work_mode) pipeline.fit(job_parameters) # query component summary print(pipeline.get_component("hetero_kmeans_0").get_summary())