def unpack_zip(self, target_path):
     self.job.emit_message.update_extract_progress(10)
     create_empty_dir(target_path)
     parameters = ZIPParameters(
         OrderedDict([('o', self.file_path), ('d', target_path)]))
     return run_console_tool(parameters,
                             ExtractParser(self.job.emit_message), self.job)
Exemplo n.º 2
0
def fetch_downloadable_models():
    omz_meta = get_metadata_for_omz_models()
    parameters = InfoDumperParameters()
    parser = InfoDumperParser()
    return_code, _ = run_console_tool(parameters, parser)
    if return_code:
        return
    models = json.loads(parser.stdout)
    for model in models:
        model_meta = omz_meta.get(
            model['name'], {
                'topology_type': TaskMethodEnum.generic,
                'advanced_configuration': None
            })
        for precision in model['precisions']:
            existing_model = (OMZTopologyModel.query.filter_by(
                name=model['name'],
                precision=ModelPrecisionEnum(precision)).first())
            if model['framework'] == 'dldt':
                model['framework'] = SupportedFrameworksEnum.openvino.value

            if not existing_model:
                if model_meta['topology_type'] != TaskMethodEnum.generic:
                    task_type = define_topology_task_type(model)
                else:
                    task_type = TaskEnum.generic
                record = OMZTopologyModel(
                    data=model,
                    task_type=task_type,
                    topology_type=model_meta['topology_type'],
                    advanced_configuration=model_meta[
                        'advanced_configuration'],
                    precision=ModelPrecisionEnum(precision))
                write_record(record, get_db().session)
Exemplo n.º 3
0
    def run(self):
        session = CeleryDBAdapter.session()
        winograd_autotune_job = session.query(WinogradAutotuneJobsModel).get(self.emit_message.job_id)
        new_model = session.query(TopologiesModel).get(winograd_autotune_job.result_model_id)
        tuned_path = new_model.path
        set_status_in_db(WinogradAutotuneJobsModel, self.emit_message.job_id, StatusEnum.running, session)
        session.close()
        self.emit_message.emit_progress()
        parameters = WinogradParameters(self.emit_message.config)
        parameters.set_parameter('o', tuned_path)
        parser = WinogradConsoleOutputParser(self.emit_message, WinogradToolStages.get_stages())

        return_code, message = run_console_tool(parameters, parser)
        if return_code:
            message = parser.get_error() if not message else message
            error_message = WinogradErrorMessageProcessor.recognize_error(message, 'winograd autotuner')
            self.emit_message.add_error(error_message)
            raise WinogradAutotuneError(error_message, self.emit_message.job_id)
        session = CeleryDBAdapter.session()
        set_status_in_db(WinogradAutotuneJobsModel, self.emit_message.job_id, StatusEnum.ready, session)
        winograd_autotune_job = session.query(WinogradAutotuneJobsModel).get(self.emit_message.job_id)
        winograd_autotune_job.progress = 100
        write_record(winograd_autotune_job, session)
        set_status_in_db(WinogradAutotuneJobsModel, self.emit_message.job_id, StatusEnum.ready, session, force=True)
        session.close()
        self.emit_message.emit_message()
 def unpack_tar(self, target_path):
     self.job.emit_message.update_extract_progress(10)
     create_empty_dir(target_path)
     parameters = TarGzParameters(
         OrderedDict([('xfp', self.file_path), ('-C', target_path)]))
     return run_console_tool(parameters,
                             ExtractParser(self.job.emit_message.job_id),
                             self.job)
Exemplo n.º 5
0
    def run(self):
        emit_msg = self.emit_message
        config = emit_msg.config

        session = CeleryDBAdapter.session()

        topology = session.query(TopologiesModel).get(config.topology_id)
        log.debug('[ MODEL OPTIMIZER ] Analyzing model %s', topology.name)

        mo_job_record = session.query(ModelOptimizerScanJobsModel).filter_by(topology_id=config.topology_id).first()
        mo_job_id = mo_job_record.job_id
        mo_job_record.status = StatusEnum.running
        write_record(mo_job_record, session)

        resolve_file_args(emit_msg.job_id, config, topology)
        session.close()

        parameters = ModelOptimizerParameters(config.mo_args, {'MO_ENABLED_TRANSFORMS': 'ANALYSIS_JSON_PRINT'})

        parser = ModelOptimizerScanParser(self.emit_message, ModelOptimizerStages.get_stages())

        return_code, message = run_console_tool(parameters, parser, self)

        if return_code:
            match = re.search(r': (.+)\.\s+For more information please refer to Model Optimizer FAQ', message)
            if match:
                short_error_message = match.group(1)
            elif 'FRAMEWORK ERROR' in message:
                short_error_message = 'Invalid topology'
            else:
                short_error_message = 'Model Optimizer Scan failed'

            log.error('[ MODEL OPTIMIZER ] [ ERROR ]: %s', short_error_message)

            session = CeleryDBAdapter.session()
            set_status_in_db(ModelOptimizerScanJobsModel, mo_job_id, StatusEnum.error, session, short_error_message)
            mo_job_record = session.query(ModelOptimizerScanJobsModel).filter_by(topology_id=config.topology_id).first()
            mo_job_record.error_message = short_error_message
            mo_job_record.detailed_error_message = re.sub(r'^.*ERROR \]\s*', '', re.sub(r'(\n\s*)+', '\n', message))
            write_record(mo_job_record, session)

            set_status_in_db(TopologiesModel, emit_msg.job_id, StatusEnum.error, session, short_error_message)
            session.close()

            self.emit_message.emit_message()

            raise ModelOptimizerError(short_error_message, self.emit_message.job_id)

        session = CeleryDBAdapter.session()
        mo_job_record = session.query(ModelOptimizerScanJobsModel).get(mo_job_id)
        mo_job_record.progress = 100
        mo_job_record.status = StatusEnum.ready
        write_record(mo_job_record, session)
        session.close()

        self.emit_message.emit_message()
    def _run_python_calibration(self,
                                tuned_path: str,
                                int8_model_id: int):
        try:
            self.emit_message.emit_progress()
            session = CeleryDBAdapter.session()

            new_int8_model = session.query(TopologiesModel).get(int8_model_id)
            int8_job = session.query(Int8AutotuneJobsModel).get(self.emit_message.job_id)
            original_model = session.query(TopologiesModel).get(new_int8_model.optimized_from)

            project_model = session.query(ProjectsModel).get(int8_job.project_id)
            dataset_model = session.query(DatasetsModel).get(project_model.dataset_id)

            config = construct_accuracy_tool_config(original_model, dataset_model, DevicesEnum.cpu)
            config.dataset.subsample_size = '{}%'.format(int8_job.subset_size)

            int8_job.status = StatusEnum.running
            int8_job.calibration_config = json.dumps(config.to_dict())

            write_record(int8_job, session)
            session.close()

            tuned_model_path = os.path.join(tuned_path, str(self.emit_message.job_id))
            yml_file = '{}.yml'.format(tuned_model_path)

            config.dump_to_yml(yml_file)

            cli_params = AccuracyCheckerCliParameters()
            cli_params.exe = os.path.join(PYTHON_CLI_FOLDER, os.path.join('calibration', 'calibrate.py'))
            cli_params.set_parameter('y', yml_file)
            cli_params.set_parameter('th', self.emit_message.config.threshold)
            cli_params.set_parameter('tp', tuned_model_path)

            self.emit_message.add_stage(IEmitMessageStage(job_type='int8_tuning'))
            cli_parser = ProgressParser(self.emit_message, None)
            code, error = run_console_tool(cli_params, cli_parser, self)
            if code:
                self.emit_message.add_error('Calibration tool failed')
                raise Int8AutotuneError(error, self.emit_message.job_id)
            self._update_db_on_success()
            self.emit_message.emit_message()

        except Exception as exc:
            log.debug('[ INT8 python ] ERROR: calibration job failed')
            log.debug(exc)
            remove_dir(tuned_path)
            self.emit_message.add_error('Calibration tool failed')
            raise Int8AutotuneError(str(exc), self.emit_message.job_id)
 def run(self):
     emit_msg = self.emit_message
     config = emit_msg.config
     log.debug('[ MODEL DOWNLOADER ] Downloading model %s', config.name)
     self.emit_message.emit_message()
     parser = ModelDownloaderParser(self.emit_message,
                                    self.emit_message.stages.get_stages())
     parameters = self.setup_parameters(config)
     session = CeleryDBAdapter.session()
     artifact = session.query(TopologiesModel).get(config.result_model_id)
     artifact.status = StatusEnum.running
     download_model = session.query(ModelDownloaderModel).get(
         self.emit_message.job_id)
     download_model.status = StatusEnum.running
     emit_msg.set_previous_accumulated_progress(artifact.progress)
     session.add(artifact)
     write_record(download_model, session)
     session.close()
     return_code, message = run_console_tool(parameters, parser, self)
     if return_code or 'Error' in message:
         job_name = self.emit_message.get_current_job(
         ).name if self.emit_message.get_current_job() else None
         error = ModelDownloaderErrorMessageProcessor.recognize_error(
             message, job_name)
         session = CeleryDBAdapter.session()
         set_status_in_db(ModelDownloaderModel, self.emit_message.job_id,
                          StatusEnum.error, session, error)
         set_status_in_db(TopologiesModel, config.result_model_id,
                          StatusEnum.error, session, error)
         session.close()
         log.error('[ MODEL_DOWNLOADER ] [ ERROR ]: %s', error)
         self.emit_message.add_error(
             'Model downloader failed: {}'.format(error))
         raise ModelDownloaderError(error, self.emit_message.job_id)
     for job in self.emit_message.jobs:
         job.progress = 100
     download_model = session.query(ModelDownloaderModel).get(
         self.emit_message.job_id)
     download_model.progress = 100
     download_model.status = StatusEnum.ready
     write_record(download_model, session)
     session.close()
     self.emit_message.emit_message()
Exemplo n.º 8
0
    def run(self):
        session = CeleryDBAdapter.session()
        accuracy_job = session.query(AccuracyJobsModel).get(self.emit_message.job_id)
        project = session.query(ProjectsModel).get(accuracy_job.project_id)
        original_model = session.query(TopologiesModel).get(project.model_id)
        dataset_model = session.query(DatasetsModel).get(project.dataset_id)
        accuracy_job.status = StatusEnum.running

        config = construct_accuracy_tool_config(original_model, dataset_model, project.target)

        accuracy_config = json.dumps(config.to_dict())
        accuracy_job.accuracy_config = accuracy_config

        write_record(accuracy_job, session)

        session.close()
        self.emit_message.add_stage(IEmitMessageStage(job_type='accuracy'))

        cli_params = AccuracyCheckerCliParameters()
        cli_params.exe = os.path.join(PYTHON_CLI_FOLDER, os.path.join('accuracy', 'check_accuracy.py'))
        log.debug(accuracy_config)
        cli_params.set_parameter('y', "\'{}\'".format(accuracy_config))

        cli_parser = AccuracyParser(self.emit_message, None)
        code, error = run_console_tool(cli_params, cli_parser, self)
        if code:
            self.emit_message.add_error('Accuracy tool failed')
            raise AccuracyError(error, self.emit_message.job_id)

        session = CeleryDBAdapter.session()
        accuracy_job = session.query(AccuracyJobsModel).get(self.emit_message.job_id)
        accuracy_job.accuracy = round(cli_parser.accuracy, 3)
        accuracy_job.status = StatusEnum.ready
        write_record(accuracy_job, session)
        session.close()
        self.emit_message.update_percent(100)
    def run(self):
        emit_msg = self.emit_message
        config = emit_msg.config

        session = CeleryDBAdapter.session()

        original_topology = session.query(TopologiesModel).get(
            config.original_topology_id)
        log.debug('[ MODEL OPTIMIZER ] Optimizing model %s',
                  original_topology.name)

        mo_job_record = (session.query(ModelOptimizerJobModel).filter_by(
            result_model_id=config.result_model_id).order_by(
                desc(ModelOptimizerJobModel.creation_timestamp)).first())
        mo_job_id = mo_job_record.job_id
        mo_job_record.status = StatusEnum.running

        resulting_topology = session.query(TopologiesModel).get(
            config.result_model_id)
        resulting_topology.converted_from = config.original_topology_id
        resulting_topology.status = StatusEnum.running
        resulting_topology.path = os.path.join(UPLOAD_FOLDER_MODELS,
                                               str(config.result_model_id),
                                               ORIGINAL_FOLDER)
        write_record(resulting_topology, session)
        create_empty_dir(resulting_topology.path)

        resolve_file_args(emit_msg.job_id, config, original_topology)
        mo_job_record.mo_args = json.dumps(config.mo_args)
        write_record(mo_job_record, session)

        config.mo_args.update({
            'model_name': original_topology.name,
            'framework': original_topology.framework.value,
            'output_dir': resulting_topology.path,
            'steps': True,
        })

        session.close()

        parameters = ModelOptimizerParameters(config.mo_args)
        parser = ModelOptimizerParser(self.emit_message,
                                      ModelOptimizerStages.get_stages())
        return_code, message = run_console_tool(parameters, parser, self)

        if return_code:
            match = re.search(
                r': (.+)\.\s+For more information please refer to Model Optimizer FAQ',
                message)
            short_error_message = match.group(
                1) if match else 'Model Optimizer failed'

            log.error('[ MODEL OPTIMIZER ] [ ERROR ]: %s', short_error_message)

            session = CeleryDBAdapter.session()

            mo_job_record = session.query(ModelOptimizerJobModel).get(
                mo_job_id)
            mo_job_record.status = StatusEnum.error
            mo_job_record.error_message = short_error_message
            mo_job_record.detailed_error_message = re.sub(
                r'\[ ERROR \]\s*', '', re.sub(r'(\n\s*)+', '\n', message))
            write_record(mo_job_record, session)

            resulting_topology = session.query(TopologiesModel).get(
                config.result_model_id)
            resulting_topology.status = StatusEnum.error
            resulting_topology.error_message = short_error_message
            write_record(resulting_topology, session)

            session.close()

            self.emit_message.emit_message()

            raise ModelOptimizerError(short_error_message,
                                      self.emit_message.job_id)

        session = CeleryDBAdapter.session()

        mo_job_record = session.query(ModelOptimizerJobModel).get(mo_job_id)
        mo_job_record.progress = 100
        mo_job_record.status = StatusEnum.ready
        write_record(mo_job_record, session)

        resulting_topology = session.query(TopologiesModel).get(
            config.result_model_id)
        resulting_topology.size = get_size_of_files(resulting_topology.path)
        write_record(resulting_topology, session)

        session.close()

        self.emit_message.emit_message()
    def run(self):
        emit_msg = self.emit_message
        config = emit_msg.config
        log.debug('[ MODEL DOWNLOADER CONVERT ] Converting model %s',
                  config.name)

        session = CeleryDBAdapter.session()
        convert_model = session.query(ModelDownloaderConversionJobsModel).get(
            self.emit_message.job_id)
        download_model = session.query(ModelDownloaderModel).filter_by(
            result_model_id=convert_model.result_model_id).first()
        if convert_model.conversion_args is None or (
                download_model and download_model.status != StatusEnum.ready):
            log.debug(
                '[ MODEL DOWNLOADER CONVERT ] Model Converter args or %s files are not in place yet, skipping.',
                convert_model.result_model.name)
            # Once the downloader started, its id is in the Topology instance.
            # Each next convert request is skipped if downloader is still running.
            # However, in the Topology instance, we need to switch task id back to the original Downloader job.
            self.set_task_id(self.previous_task_id)
            session.close()
            self.celery_task.request.chain = None
            return
        convert_model.status = StatusEnum.running
        artifact = session.query(TopologiesModel).get(config.result_model_id)
        artifact.status = StatusEnum.running
        emit_msg.set_previous_accumulated_progress(artifact.progress)
        write_record(convert_model, session)
        write_record(artifact, session)
        session.close()

        parameters = TopologyConvertParameters(config)
        parser = TopologyConvertParser(self.emit_message,
                                       TopologyConvertStages.get_stages())
        return_code, message = run_console_tool(parameters, parser, self)

        if return_code:
            job_name = self.emit_message.get_current_job(
            ).name if self.emit_message.get_current_job() else None
            error = TopologyConvertErrorMessageProcessor.recognize_error(
                message, job_name)
            log.error('[ MODEL DOWNLOADER CONVERT ] [ ERROR ]: %s', error)
            session = CeleryDBAdapter.session()
            set_status_in_db(ModelDownloaderConversionJobsModel,
                             self.emit_message.job_id, StatusEnum.error,
                             session, error)
            set_status_in_db(TopologiesModel, config.result_model_id,
                             StatusEnum.error, session, error)
            session.close()
            self.emit_message.add_error(
                'Model optimizer failed: {}'.format(error), return_code)
            raise ModelOptimizerError(error, self.emit_message.job_id)
        session = CeleryDBAdapter.session()
        convert_model = session.query(ModelDownloaderConversionJobsModel).get(
            self.emit_message.job_id)
        convert_model.progress = 100
        convert_model.status = StatusEnum.ready
        write_record(convert_model, session)
        model = session.query(TopologiesModel).get(config.result_model_id)
        model.path = config.output
        write_record(model, session)
        session.close()
        self.emit_message.emit_message()