Exemplo n.º 1
0
 def insert_data_to_db(self,
                       metric_namespace: str,
                       metric_name: str,
                       data_type: int,
                       kv,
                       job_level=False):
     with DB.connection_context():
         try:
             tracking_metric = TrackingMetric.model(table_index=self.job_id)
             tracking_metric.f_job_id = self.job_id
             tracking_metric.f_component_name = self.component_name if not job_level else 'dag'
             tracking_metric.f_task_id = self.task_id
             tracking_metric.f_role = self.role
             tracking_metric.f_party_id = self.party_id
             tracking_metric.f_metric_namespace = metric_namespace
             tracking_metric.f_metric_name = metric_name
             tracking_metric.f_type = data_type
             default_db_source = tracking_metric.to_json()
             tracking_metric_data_source = []
             for k, v in kv:
                 db_source = default_db_source.copy()
                 db_source['f_key'] = serialize_b64(k)
                 db_source['f_value'] = serialize_b64(v)
                 db_source['f_create_time'] = current_timestamp()
                 tracking_metric_data_source.append(db_source)
             self.bulk_insert_model_data(
                 TrackingMetric.model(table_index=self.get_table_index()),
                 tracking_metric_data_source)
         except Exception as e:
             schedule_logger(self.job_id).exception(e)
Exemplo n.º 2
0
 def save_metric_meta_remote(self,
                             metric_namespace: str,
                             metric_name: str,
                             metric_meta: MetricMeta,
                             job_level: bool = False):
     # TODO: In the next version will be moved to tracking api module on arch/api package
     schedule_logger(self.job_id).info(
         'request save job {} component {} on {} {} {} {} metric meta'.
         format(self.job_id, self.component_name, self.role, self.party_id,
                metric_namespace, metric_name))
     request_body = dict()
     request_body['metric_namespace'] = metric_namespace
     request_body['metric_name'] = metric_name
     request_body['metric_meta'] = serialize_b64(metric_meta, to_str=True)
     request_body['job_level'] = job_level
     response = api_utils.local_api(
         method='POST',
         endpoint='/{}/tracking/{}/{}/{}/{}/{}/metric_meta/save'.format(
             API_VERSION, self.job_id, self.component_name, self.task_id,
             self.role, self.party_id),
         json_body=request_body)
     return response['retcode'] == 0
Exemplo n.º 3
0
 def store(self,
           model_id: str,
           model_version: str,
           store_address: dict,
           force_update: bool = False):
     """
     Store the model from local cache to mysql
     :param model_id:
     :param model_version:
     :param store_address:
     :param force_update:
     :return:
     """
     try:
         self.get_connection(config=store_address)
         DB.create_tables([MachineLearningModel])
         model = PipelinedModel(model_id=model_id,
                                model_version=model_version)
         LOGGER.info("start store model {} {}".format(
             model_id, model_version))
         with DB.connection_context():
             with open(model.packaging_model(), "rb") as fr:
                 slice_index = 0
                 while True:
                     content = fr.read(SLICE_MAX_SIZE)
                     if content:
                         model_in_table = MachineLearningModel()
                         model_in_table.f_create_time = current_timestamp()
                         model_in_table.f_model_id = model_id
                         model_in_table.f_model_version = model_version
                         model_in_table.f_content = serialize_b64(content)
                         model_in_table.f_size = sys.getsizeof(
                             model_in_table.f_content)
                         model_in_table.f_slice_index = slice_index
                         if force_update:
                             model_in_table.save(only=[
                                 MachineLearningModel.f_content,
                                 MachineLearningModel.f_size,
                                 MachineLearningModel.f_update_time,
                                 MachineLearningModel.f_slice_index
                             ])
                             LOGGER.info(
                                 "update model {} {} slice index {} content"
                                 .format(model_id, model_version,
                                         slice_index))
                         else:
                             model_in_table.save(force_insert=True)
                         slice_index += 1
                         LOGGER.info(
                             "insert model {} {} slice index {} content".
                             format(model_id, model_version, slice_index))
                     else:
                         break
                 LOGGER.info(
                     "Store model {} {} to mysql successfully".format(
                         model_id, model_version))
         self.close_connection()
     except Exception as e:
         LOGGER.exception(e)
         raise Exception("Store model {} {} to mysql failed".format(
             model_id, model_version))