Ejemplo n.º 1
0
 def save_data_view(self, role, party_id, data_info, mark=False):
     with DB.connection_context():
         data_views = DataView.select().where(
             DataView.f_job_id == self.job_id,
             DataView.f_component_name == self.component_name,
             DataView.f_task_id == self.task_id, DataView.f_role == role,
             DataView.f_party_id == party_id)
         is_insert = True
         if mark and self.component_name == "upload_0":
             return
         if data_views:
             data_view = data_views[0]
             is_insert = False
         else:
             data_view = DataView()
             data_view.f_create_time = current_timestamp()
         data_view.f_job_id = self.job_id
         data_view.f_component_name = self.component_name
         data_view.f_task_id = self.task_id
         data_view.f_role = role
         data_view.f_party_id = party_id
         data_view.f_update_time = current_timestamp()
         for k, v in data_info.items():
             if k in [
                     'f_job_id', 'f_component_name', 'f_task_id', 'f_role',
                     'f_party_id'
             ] or v == getattr(DataView, k).default:
                 continue
             setattr(data_view, k, v)
         if is_insert:
             data_view.save(force_insert=True)
         else:
             data_view.save()
         return data_view
Ejemplo n.º 2
0
    def save_job_info(self, role, party_id, job_info, create=False):
        with DB.connection_context():
            schedule_logger(self.job_id).info('save {} {} job: {}'.format(
                role, party_id, job_info))
            jobs = Job.select().where(Job.f_job_id == self.job_id,
                                      Job.f_role == role,
                                      Job.f_party_id == party_id)
            is_insert = True
            if jobs:
                job = jobs[0]
                is_insert = False
                if job.f_status == JobStatus.TIMEOUT:
                    return None
            elif create:
                job = Job()
                job.f_create_time = current_timestamp()
            else:
                return None
            job.f_job_id = self.job_id
            job.f_role = role
            job.f_party_id = party_id
            if 'f_status' in job_info:
                if job.f_status in [JobStatus.COMPLETE, JobStatus.FAILED]:
                    # Termination status cannot be updated
                    # TODO:
                    return
                if (job_info['f_status'] in [
                        JobStatus.FAILED, JobStatus.TIMEOUT
                ]) and (not job.f_end_time):
                    if not job.f_start_time:
                        return
                    job_info['f_end_time'] = current_timestamp()
                    job_info['f_elapsed'] = job_info[
                        'f_end_time'] - job.f_start_time
                    job_info['f_update_time'] = current_timestamp()

                if (job_info['f_status'] in [
                        JobStatus.FAILED, JobStatus.TIMEOUT,
                        JobStatus.CANCELED, JobStatus.COMPLETE
                ]):
                    job_info['f_tag'] = 'job_end'
            update_fields = []
            for k, v in job_info.items():
                try:
                    if k in ['f_job_id', 'f_role', 'f_party_id'
                             ] or v == getattr(Job, k).default:
                        continue
                    setattr(job, k, v)
                    update_fields.append(getattr(Job, k))
                except:
                    pass

            if is_insert:
                job.save(force_insert=True)
            else:
                job.save(only=update_fields)
Ejemplo n.º 3
0
 def insert_data_to_db(self,
                       metric_namespace: str,
                       metric_name: str,
                       data_type: int,
                       kv,
                       job_level=False):
     with DB.connection_context():
         try:
             tracking_metric = TrackingMetric.model(table_index=self.job_id)
             tracking_metric.f_job_id = self.job_id
             tracking_metric.f_component_name = self.component_name if not job_level else 'dag'
             tracking_metric.f_task_id = self.task_id
             tracking_metric.f_role = self.role
             tracking_metric.f_party_id = self.party_id
             tracking_metric.f_metric_namespace = metric_namespace
             tracking_metric.f_metric_name = metric_name
             tracking_metric.f_type = data_type
             default_db_source = tracking_metric.to_json()
             tracking_metric_data_source = []
             for k, v in kv:
                 db_source = default_db_source.copy()
                 db_source['f_key'] = serialize_b64(k)
                 db_source['f_value'] = serialize_b64(v)
                 db_source['f_create_time'] = current_timestamp()
                 tracking_metric_data_source.append(db_source)
             self.bulk_insert_model_data(
                 TrackingMetric.model(table_index=self.get_table_index()),
                 tracking_metric_data_source)
         except Exception as e:
             schedule_logger(self.job_id).exception(e)
Ejemplo n.º 4
0
def update_job_progress(job_id, dag, current_task_id):
    role, party_id = query_job_info(job_id)
    component_count = len(
        dag.get_dependency(role=role,
                           party_id=int(party_id))['component_list'])
    success_count = success_task_count(job_id=job_id)
    job = Job()
    job.f_progress = float(success_count) / component_count * 100
    job.f_update_time = current_timestamp()
    job.f_current_tasks = json_dumps([current_task_id])
    return job
Ejemplo n.º 5
0
 def save_task(self, role, party_id, task_info):
     with DB.connection_context():
         tasks = Task.select().where(
             Task.f_job_id == self.job_id,
             Task.f_component_name == self.component_name,
             Task.f_task_id == self.task_id, Task.f_role == role,
             Task.f_party_id == party_id)
         is_insert = True
         if tasks:
             task = tasks[0]
             is_insert = False
         else:
             task = Task()
             task.f_create_time = current_timestamp()
         task.f_job_id = self.job_id
         task.f_component_name = self.component_name
         task.f_task_id = self.task_id
         task.f_role = role
         task.f_party_id = party_id
         if 'f_status' in task_info:
             if task.f_status in [TaskStatus.COMPLETE, TaskStatus.FAILED]:
                 # Termination status cannot be updated
                 # TODO:
                 pass
         for k, v in task_info.items():
             try:
                 if k in [
                         'f_job_id', 'f_component_name', 'f_task_id',
                         'f_role', 'f_party_id'
                 ] or v == getattr(Task, k).default:
                     continue
             except:
                 pass
             setattr(task, k, v)
         if is_insert:
             task.save(force_insert=True)
         else:
             task.save()
         return task
Ejemplo n.º 6
0
 def save(self, *args, **kwargs):
     if hasattr(self, "update_date"):
         self.update_date = datetime.datetime.now()
     if hasattr(self, "update_time"):
         self.update_time = current_timestamp()
     super(DataBaseModel, self).save(*args, **kwargs)
Ejemplo n.º 7
0
    def submit_job(job_data, job_id=None):
        if not job_id:
            job_id = generate_job_id()
        schedule_logger(job_id).info('submit job, job_id {}, body {}'.format(job_id, job_data))
        job_dsl = job_data.get('job_dsl', {})
        job_runtime_conf = job_data.get('job_runtime_conf', {})
        job_utils.check_pipeline_job_runtime_conf(job_runtime_conf)
        job_parameters = job_runtime_conf['job_parameters']
        job_initiator = job_runtime_conf['initiator']
        job_type = job_parameters.get('job_type', '')
        if job_type != 'predict':
            # generate job model info
            job_parameters['model_id'] = '#'.join([dtable_utils.all_party_key(job_runtime_conf['role']), 'model'])
            job_parameters['model_version'] = job_id
            train_runtime_conf = {}
        else:
            detect_utils.check_config(job_parameters, ['model_id', 'model_version'])
            # get inference dsl from pipeline model as job dsl
            job_tracker = Tracking(job_id=job_id, role=job_initiator['role'], party_id=job_initiator['party_id'],
                                   model_id=job_parameters['model_id'], model_version=job_parameters['model_version'])
            pipeline_model = job_tracker.get_output_model('pipeline')
            job_dsl = json_loads(pipeline_model['Pipeline'].inference_dsl)
            train_runtime_conf = json_loads(pipeline_model['Pipeline'].train_runtime_conf)
        path_dict = save_job_conf(job_id=job_id,
                                  job_dsl=job_dsl,
                                  job_runtime_conf=job_runtime_conf,
                                  train_runtime_conf=train_runtime_conf,
                                  pipeline_dsl=None)

        job = Job()
        job.f_job_id = job_id
        job.f_roles = json_dumps(job_runtime_conf['role'])
        job.f_work_mode = job_parameters['work_mode']
        job.f_initiator_party_id = job_initiator['party_id']
        job.f_dsl = json_dumps(job_dsl)
        job.f_runtime_conf = json_dumps(job_runtime_conf)
        job.f_train_runtime_conf = json_dumps(train_runtime_conf)
        job.f_run_ip = ''
        job.f_status = JobStatus.WAITING
        job.f_progress = 0
        job.f_create_time = current_timestamp()

        initiator_role = job_initiator['role']
        initiator_party_id = job_initiator['party_id']
        if initiator_party_id not in job_runtime_conf['role'][initiator_role]:
            schedule_logger(job_id).info("initiator party id error:{}".format(initiator_party_id))
            raise Exception("initiator party id error {}".format(initiator_party_id))

        get_job_dsl_parser(dsl=job_dsl,
                           runtime_conf=job_runtime_conf,
                           train_runtime_conf=train_runtime_conf)

        TaskScheduler.distribute_job(job=job, roles=job_runtime_conf['role'], job_initiator=job_initiator)

        # push into queue
        job_event = job_utils.job_event(job_id, initiator_role,  initiator_party_id)
        try:
            RuntimeConfig.JOB_QUEUE.put_event(job_event)
        except Exception as e:
            raise Exception('push job into queue failed')

        schedule_logger(job_id).info(
            'submit job successfully, job id is {}, model id is {}'.format(job.f_job_id, job_parameters['model_id']))
        board_url = BOARD_DASHBOARD_URL.format(job_id, job_initiator['role'], job_initiator['party_id'])
        logs_directory = get_job_log_directory(job_id)
        return job_id, path_dict['job_dsl_path'], path_dict['job_runtime_conf_path'], logs_directory, \
               {'model_id': job_parameters['model_id'],'model_version': job_parameters['model_version']}, board_url