コード例 #1
0
    def run(self):
        session = CeleryDBAdapter.session()
        winograd_autotune_job = session.query(WinogradAutotuneJobsModel).get(self.emit_message.job_id)
        new_model = session.query(TopologiesModel).get(winograd_autotune_job.result_model_id)
        tuned_path = new_model.path
        set_status_in_db(WinogradAutotuneJobsModel, self.emit_message.job_id, StatusEnum.running, session)
        session.close()
        self.emit_message.emit_progress()
        parameters = WinogradParameters(self.emit_message.config)
        parameters.set_parameter('o', tuned_path)
        parser = WinogradConsoleOutputParser(self.emit_message, WinogradToolStages.get_stages())

        return_code, message = run_console_tool(parameters, parser)
        if return_code:
            message = parser.get_error() if not message else message
            error_message = WinogradErrorMessageProcessor.recognize_error(message, 'winograd autotuner')
            self.emit_message.add_error(error_message)
            raise WinogradAutotuneError(error_message, self.emit_message.job_id)
        session = CeleryDBAdapter.session()
        set_status_in_db(WinogradAutotuneJobsModel, self.emit_message.job_id, StatusEnum.ready, session)
        winograd_autotune_job = session.query(WinogradAutotuneJobsModel).get(self.emit_message.job_id)
        winograd_autotune_job.progress = 100
        write_record(winograd_autotune_job, session)
        set_status_in_db(WinogradAutotuneJobsModel, self.emit_message.job_id, StatusEnum.ready, session, force=True)
        session.close()
        self.emit_message.emit_message()
コード例 #2
0
    def run(self):
        emit_msg = self.emit_message
        config = emit_msg.config

        session = CeleryDBAdapter.session()

        topology = session.query(TopologiesModel).get(config.topology_id)
        log.debug('[ MODEL OPTIMIZER ] Analyzing model %s', topology.name)

        mo_job_record = session.query(ModelOptimizerScanJobsModel).filter_by(topology_id=config.topology_id).first()
        mo_job_id = mo_job_record.job_id
        mo_job_record.status = StatusEnum.running
        write_record(mo_job_record, session)

        resolve_file_args(emit_msg.job_id, config, topology)
        session.close()

        parameters = ModelOptimizerParameters(config.mo_args, {'MO_ENABLED_TRANSFORMS': 'ANALYSIS_JSON_PRINT'})

        parser = ModelOptimizerScanParser(self.emit_message, ModelOptimizerStages.get_stages())

        return_code, message = run_console_tool(parameters, parser, self)

        if return_code:
            match = re.search(r': (.+)\.\s+For more information please refer to Model Optimizer FAQ', message)
            if match:
                short_error_message = match.group(1)
            elif 'FRAMEWORK ERROR' in message:
                short_error_message = 'Invalid topology'
            else:
                short_error_message = 'Model Optimizer Scan failed'

            log.error('[ MODEL OPTIMIZER ] [ ERROR ]: %s', short_error_message)

            session = CeleryDBAdapter.session()
            set_status_in_db(ModelOptimizerScanJobsModel, mo_job_id, StatusEnum.error, session, short_error_message)
            mo_job_record = session.query(ModelOptimizerScanJobsModel).filter_by(topology_id=config.topology_id).first()
            mo_job_record.error_message = short_error_message
            mo_job_record.detailed_error_message = re.sub(r'^.*ERROR \]\s*', '', re.sub(r'(\n\s*)+', '\n', message))
            write_record(mo_job_record, session)

            set_status_in_db(TopologiesModel, emit_msg.job_id, StatusEnum.error, session, short_error_message)
            session.close()

            self.emit_message.emit_message()

            raise ModelOptimizerError(short_error_message, self.emit_message.job_id)

        session = CeleryDBAdapter.session()
        mo_job_record = session.query(ModelOptimizerScanJobsModel).get(mo_job_id)
        mo_job_record.progress = 100
        mo_job_record.status = StatusEnum.ready
        write_record(mo_job_record, session)
        session.close()

        self.emit_message.emit_message()
コード例 #3
0
    def run(self):
        parameters = self.emit_message.config
        dataset_id = self.emit_message.job_id
        current_job = self.emit_message.add_stage(
            IEmitMessageStage('Setup dataset parameters', weight=0.1))
        session = CeleryDBAdapter.session()
        dataset = session.query(DatasetsModel).get(self.emit_message.job_id)
        dataset.status = StatusEnum.running
        dataset_path = dataset.path
        write_record(dataset, session)
        session.close()
        image_size = parameters.width * parameters.height * parameters.channels
        create_empty_dir(dataset_path)
        try:
            random_generator = DistributionLaw(
                parameters.dist_law, parameters.params_dist).random_generator
        except AssertionError as exception:
            self.emit_message.add_error(str(exception))
            raise
        self.emit_message.update_progress(current_job, 100)
        current_job = self.emit_message.add_stage(
            IEmitMessageStage('Generate dataset', weight=0.9))
        log.debug('Starting of generating dataset %s', dataset_id)
        index = 0
        while index < self.emit_message.config.size:
            file_name = os.path.join(dataset_path, '{}.jpg'.format(index))
            cv2.imwrite(
                file_name,
                random_generator(image_size).reshape(
                    parameters.height, parameters.width,
                    parameters.channels).astype(np.uint8))
            percent = (index / (parameters.size + 2)) * 100

            if index % np.ceil(parameters.size / 10) == 0:
                self.emit_message.update_progress(current_job, percent)
            with open(os.path.join(dataset_path, parameters.name + '.txt'),
                      'a') as desc_file:
                desc_file.write('{}.jpg 0\n'.format(index))
            index += 1

        session = CeleryDBAdapter.session()
        dataset = session.query(DatasetsModel).get(self.emit_message.job_id)
        dataset.progress = 100
        dataset.status = StatusEnum.ready
        dataset.size = get_size_of_files(dataset_path)
        write_record(dataset, session)
        session.close()

        self.emit_message.update_progress(current_job, 100)
        log.debug('Finish of generating dataset %s', dataset_id)
コード例 #4
0
 def full_json(self):
     session = CeleryDBAdapter.session()
     job_record = session.query(WinogradAutotuneJobsModel).filter_by(job_id=self.job_id).first()
     progress = job_record.progress
     status = job_record.status.value
     error_message = job_record.error_message
     project_record = session.query(ProjectsModel).filter_by(
         model_id=job_record.result_model_id,
         dataset_id=self.config.dataset_id,
         target=DevicesEnum(self.config.device),
         optimization_type=OptimizationTypesEnum(self.job.job_type.value)
     ).first()
     session.close()
     message = {
         'creationTimestamp': self.date,
         'jobId': self.job_id,
         'type': self.job.job_type.value,
         'config': self.config.json(),
         'projectId': project_record.id,
         'originalModelId': get_top_level_model_id(project_record.id),
         'status': {
             'name': status,
             'progress': progress
         }
     }
     if error_message:
         message['status']['errorMessage'] = error_message
     return message
コード例 #5
0
 def set_task_id(self, task_id):
     session = CeleryDBAdapter.session()
     resulting_topology = session.query(TopologiesModel).get(
         self.emit_message.config.result_model_id)
     resulting_topology.task_id = task_id
     write_record(resulting_topology, session)
     session.close()
コード例 #6
0
 def update_progress_in_database(self, progress):
     session = CeleryDBAdapter.session()
     int8_record = session.query(Int8AutotuneJobsModel).filter_by(
         job_id=self.job_id).first()
     int8_record.progress = progress
     write_record(int8_record, session)
     session.close()
コード例 #7
0
ファイル: ijob.py プロジェクト: nathanbangwa243/VLinder-AI
 def set_task_id(self, task_id: str):
     session = CeleryDBAdapter.session()
     record = session.query(self.db_table).get(self.emit_message.job_id)
     if record:
         record.task_id = task_id
         write_record(record, session)
     session.close()
コード例 #8
0
    def full_json(self):
        session = CeleryDBAdapter.session()

        model = session.query(TopologiesModel).get(self.config.result_model_id)

        record = (session.query(ModelDownloaderConversionJobsModel).filter_by(
            result_model_id=self.config.result_model_id).order_by(
                desc(ModelDownloaderConversionJobsModel.creation_timestamp)).
                  first())

        session.close()

        json_message = model.short_json()

        json_message['stages'] = get_stages_status(record.job_id, session)
        session.close()
        if record.conversion_args:
            json_message = {
                'mo': {
                    'params': {
                        'dataType':
                        json.loads(record.conversion_args)['precision']
                    }
                }
            }

        return json_message
コード例 #9
0
 def full_json(self):
     session = CeleryDBAdapter.session()
     dataset = session.query(DatasetsModel).get(self.job_id)
     json_message = dataset.json()
     json_message['status']['progress'] = self.total_progress
     session.close()
     return json_message
コード例 #10
0
    def __init__(self, path: str, exe: str, parameters: IConfig):
        super(Parameters, self).__init__()
        self.path = path
        self.exe = exe
        self.params = dict()
        session = CeleryDBAdapter.session()
        dataset = session.query(DatasetsModel).get(parameters.dataset_id)
        dataset_path = dataset.path
        if dataset.dataset_type in [
                DatasetTypesEnum.voc_object_detection,
                DatasetTypesEnum.voc_segmentation
        ]:
            dataset_path = get_images_folder_for_voc(dataset_path)
        model = session.query(TopologiesModel).get(parameters.model_id)
        model_path = model.path
        session.close()

        self.params['i'] = dataset_path
        xml_path = find_all_paths(model_path, ('.xml', ))[0]

        self.params['m'] = xml_path
        self.params['d'] = parameters.device
        if self.params['d'] == 'CPU':
            self.params['l'] = os.path.join(IE_BIN_PATH, 'lib',
                                            'libcpu_extension' + LIB_EXTENSION)
コード例 #11
0
 def on_failure(self):
     session = CeleryDBAdapter.session()
     set_status_in_db(WinogradAutotuneJobsModel, self.emit_message.job_id, StatusEnum.error, session)
     winograd_job = session.query(WinogradAutotuneJobsModel).get(self.emit_message.job_id)
     result_model_id = winograd_job.result_model_id
     session.close()
     set_status_in_db(TopologiesModel, result_model_id, StatusEnum.error, session)
コード例 #12
0
 def update_progress_for_optimize(self):
     job_progress = self.local_progress
     session = CeleryDBAdapter.session()
     record = session.query(ModelDownloaderConversionJobsModel).filter_by(
         job_id=self.job_id).first()
     record.progress = job_progress
     write_record(record, session)
コード例 #13
0
 def on_failure(self):
     session = CeleryDBAdapter.session()
     set_status_in_db(DatasetsModel, self.emit_message.job_id,
                      StatusEnum.error, session)
     session.close()
     remove_dir(
         os.path.join(UPLOAD_FOLDER_DATASETS, self.emit_message.job_id))
コード例 #14
0
 def full_json(self):
     session = CeleryDBAdapter.session()
     model = session.query(TopologiesModel).get(self.job_id)
     model_analysis = session.query(TopologyAnalysisJobsModel).filter_by(model_id=self.job_id).first()
     json_message = model.short_json()
     json_message['stages'] = get_stages_status(model_analysis.job_id, session)
     session.close()
     return json_message
コード例 #15
0
 def total_progress(self):
     session = CeleryDBAdapter.session()
     artifact = session.query(TopologiesModel).get(self.job_id)
     progress = self.local_progress * self.weight + self.previous_progress
     artifact.progress = progress
     write_record(artifact, session)
     session.close()
     return progress
コード例 #16
0
 def set_inference_result_to_record(self, results):
     session = CeleryDBAdapter.session()
     infer_result = session.query(InferenceResultsModel).get(
         self.inference_result_record_id)
     infer_result.progress = 100
     infer_result.update(results)
     write_record(infer_result, session)
     session.close()
コード例 #17
0
 def update_progress_in_database(self, progress):
     session = CeleryDBAdapter.session()
     accuracy_record = session.query(AccuracyJobsModel).filter_by(
         job_id=self.job_id).first()
     accuracy_record.progress = progress
     session.add(accuracy_record)
     session.commit()
     session.close()
コード例 #18
0
 def update_progress_for_inference_result(self):
     session = CeleryDBAdapter.session()
     infer_result = session.query(InferenceResultsModel).get(
         self.inference_result_record_id)
     infer_result.progress = self.local_progress
     infer_result.status = StatusEnum.running
     write_record(infer_result, session)
     session.close()
コード例 #19
0
 def set_error_to_database(self, message):
     session = CeleryDBAdapter.session()
     job_record = session.query(ModelDownloaderModel).get(self.job_id)
     set_status_in_db(ModelDownloaderModel, self.job_id, StatusEnum.error,
                      session, message)
     set_status_in_db(TopologiesModel, job_record.result_model_id,
                      StatusEnum.error, session, message)
     session.close()
コード例 #20
0
 def update_progress_in_database(self):
     job_progress = self.local_progress
     session = CeleryDBAdapter.session()
     record = session.query(ModelDownloaderModel).filter_by(
         job_id=self.job_id).first()
     record.progress = job_progress
     write_record(record, session)
     session.close()
コード例 #21
0
 def on_failure(self):
     super().on_failure()
     session = CeleryDBAdapter.session()
     mo_job_record = session.query(ModelOptimizerScanJobsModel).filter_by(
         topology_id=self.emit_message.config.topology_id).first()
     mo_job_record.status = StatusEnum.error
     write_record(mo_job_record, session)
     set_status_in_db(TopologiesModel, self.emit_message.config.topology_id, StatusEnum.error, session)
     session.close()
コード例 #22
0
 def result_to_json(self) -> list:
     res = []
     session = CeleryDBAdapter.session()
     inference_results = session.query(InferenceResultsModel).filter_by(
         job_id=self.job_id).all()
     for inference_result in inference_results:
         res.append(inference_result.json())
     session.close()
     return res
コード例 #23
0
 def set_exec_info(self, data: dict):
     session = CeleryDBAdapter.session()
     infer_result = session.query(InferenceResultsModel).get(
         self.inference_result_record_id)
     infer_result.update(data)
     session.add(infer_result)
     session.commit()
     session.close()
     self.emit_message()
コード例 #24
0
 def on_failure(self):
     super().on_failure()
     session = CeleryDBAdapter.session()
     set_status_in_db(TopologiesModel,
                      self.emit_message.config.result_model_id,
                      StatusEnum.error, session)
     set_status_in_db(ModelDownloaderConversionJobsModel,
                      self.emit_message.job_id, StatusEnum.error, session)
     session.close()
コード例 #25
0
 def model_optimizer_job_creator(job_id, data, weight):
     session = CeleryDBAdapter.session()
     record = session.query(ModelOptimizerJobModel).get(job_id)
     config = ModelOptimizerConfig(record.session_id, record.json())
     job = ModelOptimizerJob(record.result_model_id, config, weight)
     job.emit_message.date = record.creation_timestamp.timestamp()
     job.emit_message.previous_progress = record.result_model.progress
     session.close()
     return job
コード例 #26
0
 def run(self):
     session = CeleryDBAdapter.session()
     int8_job = session.query(Int8AutotuneJobsModel).get(self.emit_message.job_id)
     new_int8_model = session.query(TopologiesModel).get(int8_job.result_model_id)
     int8_model_id = new_int8_model.id
     tuned_path = new_int8_model.path
     session.close()
     create_empty_dir(tuned_path)
     self._run_python_calibration(tuned_path, int8_model_id)
コード例 #27
0
 def on_failure(self):
     session = CeleryDBAdapter.session()
     self.emit_message.add_error('')
     set_status_in_db(self.db_table, self.emit_message.job_id,
                      StatusEnum.error, session)
     session.close()
     remove_dir(
         os.path.join(self.emit_message.config.destination_path,
                      self.emit_message.config.path))
コード例 #28
0
 def update_model_optimizer_scan_result(self, results: str):
     session = CeleryDBAdapter.session()
     mo_job_record = (session.query(ModelOptimizerScanJobsModel).filter_by(
         topology_id=self.config.topology_id).first())
     mo_job_record.information = self.cleanup_results(results)
     mo_job_record.progress = 100
     mo_job_record.status = StatusEnum.ready
     write_record(mo_job_record, session)
     session.close()
コード例 #29
0
 def on_failure(self):
     super().on_failure()
     session = CeleryDBAdapter.session()
     set_status_in_db(TopologiesModel,
                      self.emit_message.config.result_model_id,
                      StatusEnum.error, session)
     remove_dir(
         session.query(TopologiesModel).get(
             self.emit_message.config.result_model_id).path)
     session.close()
コード例 #30
0
def get_top_level_model_id(project_id: int) -> int:
    session = CeleryDBAdapter.session()
    project = session.query(ProjectsModel).get(project_id)
    model_id = project.model_id
    while True:
        parent_model = session.query(TopologiesModel).get(model_id)
        if not parent_model.optimized_from:
            session.close()
            return parent_model.id
        model_id = parent_model.optimized_from