Esempio n. 1
0
 def _load_pipeline(self):
     buffer_type = "Pipeline"
     pipeline_obj = pipeline_pb2.Pipeline()
     pipeline_obj = model_manager.read_model(buffer_type=buffer_type,
                                             proto_buffer=pipeline_obj,
                                             name=self.workflow_param.model_table,
                                             namespace=self.workflow_param.model_namespace)
     pipeline_obj.node_meta = list(pipeline_obj.node_meta)
     pipeline_obj.node_param = list(pipeline_obj.node_param)
     self.pipeline = pipeline_obj
Esempio n. 2
0
 def save_pipeline(job_id, role, party_id, model_id, model_version):
     job_dsl, job_runtime_conf, train_runtime_conf = job_utils.get_job_configuration(job_id=job_id, role=role,
                                                                                     party_id=party_id)
     job_parameters = job_runtime_conf.get('job_parameters', {})
     job_type = job_parameters.get('job_type', '')
     if job_type == 'predict':
         return
     dag = job_utils.get_job_dsl_parser(dsl=job_dsl,
                                        runtime_conf=job_runtime_conf,
                                        train_runtime_conf=train_runtime_conf)
     predict_dsl = dag.get_predict_dsl(role=role)
     pipeline = pipeline_pb2.Pipeline()
     pipeline.inference_dsl = json_dumps(predict_dsl, byte=True)
     pipeline.train_dsl = json_dumps(job_dsl, byte=True)
     pipeline.train_runtime_conf = json_dumps(job_runtime_conf, byte=True)
     job_tracker = Tracking(job_id=job_id, role=role, party_id=party_id, model_id=model_id,
                            model_version=model_version)
     job_tracker.save_output_model({'Pipeline': pipeline}, 'pipeline')
Esempio n. 3
0
 def _init_pipeline(self):
     pipeline_obj = pipeline_pb2.Pipeline()
     # pipeline_obj.node_meta = []
     # pipeline_obj.node_param = []
     self.pipeline = pipeline_obj
     LOGGER.debug("finish init pipeline")