コード例 #1
0
 def on_failure(self):
     session = CeleryDBAdapter.session()
     set_status_in_db(DatasetsModel, self.emit_message.job_id,
                      StatusEnum.error, session)
     session.close()
     remove_dir(
         os.path.join(UPLOAD_FOLDER_DATASETS, self.emit_message.job_id))
コード例 #2
0
 def on_failure(self):
     session = CeleryDBAdapter.session()
     set_status_in_db(WinogradAutotuneJobsModel, self.emit_message.job_id, StatusEnum.error, session)
     winograd_job = session.query(WinogradAutotuneJobsModel).get(self.emit_message.job_id)
     result_model_id = winograd_job.result_model_id
     session.close()
     set_status_in_db(TopologiesModel, result_model_id, StatusEnum.error, session)
コード例 #3
0
 def set_error_to_database(self, message):
     session = CeleryDBAdapter.session()
     job_record = session.query(ModelDownloaderModel).get(self.job_id)
     set_status_in_db(ModelDownloaderModel, self.job_id, StatusEnum.error,
                      session, message)
     set_status_in_db(TopologiesModel, job_record.result_model_id,
                      StatusEnum.error, session, message)
     session.close()
コード例 #4
0
 def on_failure(self):
     super().on_failure()
     session = CeleryDBAdapter.session()
     set_status_in_db(TopologiesModel,
                      self.emit_message.config.result_model_id,
                      StatusEnum.error, session)
     set_status_in_db(ModelDownloaderConversionJobsModel,
                      self.emit_message.job_id, StatusEnum.error, session)
     session.close()
コード例 #5
0
 def on_failure(self):
     super().on_failure()
     session = CeleryDBAdapter.session()
     mo_job_record = session.query(ModelOptimizerScanJobsModel).filter_by(
         topology_id=self.emit_message.config.topology_id).first()
     mo_job_record.status = StatusEnum.error
     write_record(mo_job_record, session)
     set_status_in_db(TopologiesModel, self.emit_message.config.topology_id, StatusEnum.error, session)
     session.close()
コード例 #6
0
 def on_failure(self):
     session = CeleryDBAdapter.session()
     self.emit_message.add_error('')
     set_status_in_db(self.db_table, self.emit_message.job_id,
                      StatusEnum.error, session)
     session.close()
     remove_dir(
         os.path.join(self.emit_message.config.destination_path,
                      self.emit_message.config.path))
コード例 #7
0
    def run(self):
        emit_msg = self.emit_message
        config = emit_msg.config

        session = CeleryDBAdapter.session()

        topology = session.query(TopologiesModel).get(config.topology_id)
        log.debug('[ MODEL OPTIMIZER ] Analyzing model %s', topology.name)

        mo_job_record = session.query(ModelOptimizerScanJobsModel).filter_by(topology_id=config.topology_id).first()
        mo_job_id = mo_job_record.job_id
        mo_job_record.status = StatusEnum.running
        write_record(mo_job_record, session)

        resolve_file_args(emit_msg.job_id, config, topology)
        session.close()

        parameters = ModelOptimizerParameters(config.mo_args, {'MO_ENABLED_TRANSFORMS': 'ANALYSIS_JSON_PRINT'})

        parser = ModelOptimizerScanParser(self.emit_message, ModelOptimizerStages.get_stages())

        return_code, message = run_console_tool(parameters, parser, self)

        if return_code:
            match = re.search(r': (.+)\.\s+For more information please refer to Model Optimizer FAQ', message)
            if match:
                short_error_message = match.group(1)
            elif 'FRAMEWORK ERROR' in message:
                short_error_message = 'Invalid topology'
            else:
                short_error_message = 'Model Optimizer Scan failed'

            log.error('[ MODEL OPTIMIZER ] [ ERROR ]: %s', short_error_message)

            session = CeleryDBAdapter.session()
            set_status_in_db(ModelOptimizerScanJobsModel, mo_job_id, StatusEnum.error, session, short_error_message)
            mo_job_record = session.query(ModelOptimizerScanJobsModel).filter_by(topology_id=config.topology_id).first()
            mo_job_record.error_message = short_error_message
            mo_job_record.detailed_error_message = re.sub(r'^.*ERROR \]\s*', '', re.sub(r'(\n\s*)+', '\n', message))
            write_record(mo_job_record, session)

            set_status_in_db(TopologiesModel, emit_msg.job_id, StatusEnum.error, session, short_error_message)
            session.close()

            self.emit_message.emit_message()

            raise ModelOptimizerError(short_error_message, self.emit_message.job_id)

        session = CeleryDBAdapter.session()
        mo_job_record = session.query(ModelOptimizerScanJobsModel).get(mo_job_id)
        mo_job_record.progress = 100
        mo_job_record.status = StatusEnum.ready
        write_record(mo_job_record, session)
        session.close()

        self.emit_message.emit_message()
コード例 #8
0
 def on_failure(self):
     super().on_failure()
     session = CeleryDBAdapter.session()
     set_status_in_db(TopologiesModel,
                      self.emit_message.config.result_model_id,
                      StatusEnum.error, session)
     remove_dir(
         session.query(TopologiesModel).get(
             self.emit_message.config.result_model_id).path)
     session.close()
コード例 #9
0
def on_new_chunk_received(request, file_id: int):
    file_record = FilesModel.query.get(file_id)
    artifact = file_record.artifact

    if not artifact or artifact.status == StatusEnum.cancelled or file_record.status == StatusEnum.cancelled:
        return {}
    try:
        write_chunk(file_id, request)
    except OSError:
        return 'Internal server error', 500

    if TopologiesModel.query.get(file_record.artifact_id):
        emit_message = create_upload_emit_message_for_topology(file_record)
    elif DatasetsModel.query.get(file_record.artifact_id):
        emit_message = create_upload_emit_message_for_dataset(file_record)
    else:
        return 'Cannot find artifact for this file {}'.format(file_id), 404

    uploaded_progress = update_artifact_upload_progress(file_id, emit_message)

    if uploaded_progress >= 100 or all(f.uploaded_blob_size == f.size
                                       for f in artifact.files):
        celery_tasks_chain = []
        if TopologiesModel.query.get(artifact.id):
            upload_job = UploadJobsModel.query.filter_by(
                artifact_id=artifact.id).first()
            upload_job.status = StatusEnum.ready
            upload_job.progress = 100
            write_record(upload_job, get_db().session)
            celery_tasks_chain = create_tasks_chain_for_upload_model(
                artifact.id)
        elif DatasetsModel.query.get(artifact.id):
            celery_tasks_chain = create_tasks_chain_for_upload_dataset(
                artifact.id)
        artifact.size = get_size_of_files(artifact.path)
        write_record(artifact, get_db().session)
        set_status_in_db(ArtifactsModel, artifact.id, StatusEnum.running,
                         get_db().session)
        try:
            write_record(artifact, get_db().session)
        except orm.exc.StaleDataError:
            pass

        # pylint: disable=fixme
        # TODO: Remove as soon as Model Optimizer fixes filenames handling.
        rename_mxnet_files(artifact.id)
        if celery_tasks_chain:
            chain(celery_tasks_chain).apply_async()
    return {}
コード例 #10
0
def convert(mo_job_record: ModelOptimizerJobModel, data: dict,
            chain_progress_weight: dict):
    """Validate MO params, prepare them, update MO job record and launch MO chain."""

    pipeline_config = data.get('pipelineConfigFile', None)
    if pipeline_config:
        del data['pipelineConfigFile']
        save_pipeline_config(pipeline_config,
                             mo_job_record.original_topology_id)
    mo_form = MOForm(data, mo_job_record.original_topology.framework.value)
    if mo_form.is_invalid:
        set_status_in_db(ModelOptimizerJobModel, mo_job_record.job_id,
                         StatusEnum.error,
                         get_db().session)
        set_status_in_db(TopologiesModel, mo_job_record.result_model_id,
                         StatusEnum.error,
                         get_db().session)
        return jsonify({'errors': mo_form.errors}), 400

    mo_job_record.mo_args = json.dumps(mo_form.get_args())
    write_record(mo_job_record, get_db().session)

    chain([
        TASK.subtask(
            args=(None, JobTypesEnum.model_optimizer_type.value,
                  mo_job_record.job_id),
            kwargs={
                'progress_weight':
                chain_progress_weight[JobTypesEnum.model_optimizer_type]
            }),
        TASK.subtask(
            args=(JobTypesEnum.model_analyzer_type.value,
                  mo_job_record.result_model_id),
            kwargs={
                'progress_weight':
                chain_progress_weight[JobTypesEnum.model_analyzer_type],
            })
    ]).apply_async()

    return jsonify({
        'irId': mo_job_record.result_model_id,
        'modelOptimizerJobId': mo_job_record.job_id,
    })
コード例 #11
0
 def on_failure(self):
     session = CeleryDBAdapter.session()
     set_status_in_db(Int8AutotuneJobsModel, self.emit_message.job_id, StatusEnum.error, session)
     int8_job = session.query(Int8AutotuneJobsModel).get(self.emit_message.job_id)
     result_model_id = int8_job.result_model_id
     set_status_in_db(TopologiesModel, result_model_id, StatusEnum.error, session)
     project = session.query(ProjectsModel).filter_by(model_id=result_model_id).first()
     compound_job = session.query(CompoundInferenceJobsModel).filter_by(project_id=project.id).first()
     compound_job_id = compound_job.job_id
     set_status_in_db(CompoundInferenceJobsModel, compound_job_id, StatusEnum.error, session)
     set_status_in_db(TopologiesModel, result_model_id, StatusEnum.error, session)
     session.close()
コード例 #12
0
 def run(self):
     emit_msg = self.emit_message
     config = emit_msg.config
     log.debug('[ MODEL DOWNLOADER ] Downloading model %s', config.name)
     self.emit_message.emit_message()
     parser = ModelDownloaderParser(self.emit_message,
                                    self.emit_message.stages.get_stages())
     parameters = self.setup_parameters(config)
     session = CeleryDBAdapter.session()
     artifact = session.query(TopologiesModel).get(config.result_model_id)
     artifact.status = StatusEnum.running
     download_model = session.query(ModelDownloaderModel).get(
         self.emit_message.job_id)
     download_model.status = StatusEnum.running
     emit_msg.set_previous_accumulated_progress(artifact.progress)
     session.add(artifact)
     write_record(download_model, session)
     session.close()
     return_code, message = run_console_tool(parameters, parser, self)
     if return_code or 'Error' in message:
         job_name = self.emit_message.get_current_job(
         ).name if self.emit_message.get_current_job() else None
         error = ModelDownloaderErrorMessageProcessor.recognize_error(
             message, job_name)
         session = CeleryDBAdapter.session()
         set_status_in_db(ModelDownloaderModel, self.emit_message.job_id,
                          StatusEnum.error, session, error)
         set_status_in_db(TopologiesModel, config.result_model_id,
                          StatusEnum.error, session, error)
         session.close()
         log.error('[ MODEL_DOWNLOADER ] [ ERROR ]: %s', error)
         self.emit_message.add_error(
             'Model downloader failed: {}'.format(error))
         raise ModelDownloaderError(error, self.emit_message.job_id)
     for job in self.emit_message.jobs:
         job.progress = 100
     download_model = session.query(ModelDownloaderModel).get(
         self.emit_message.job_id)
     download_model.progress = 100
     download_model.status = StatusEnum.ready
     write_record(download_model, session)
     session.close()
     self.emit_message.emit_message()
コード例 #13
0
    def run(self):
        session = CeleryDBAdapter.session()
        winograd_autotune_job = session.query(WinogradAutotuneJobsModel).get(self.emit_message.job_id)
        new_model = session.query(TopologiesModel).get(winograd_autotune_job.result_model_id)
        tuned_path = new_model.path
        set_status_in_db(WinogradAutotuneJobsModel, self.emit_message.job_id, StatusEnum.running, session)
        session.close()
        self.emit_message.emit_progress()
        parameters = WinogradParameters(self.emit_message.config)
        parameters.set_parameter('o', tuned_path)
        parser = WinogradConsoleOutputParser(self.emit_message, WinogradToolStages.get_stages())

        return_code, message = run_console_tool(parameters, parser)
        if return_code:
            message = parser.get_error() if not message else message
            error_message = WinogradErrorMessageProcessor.recognize_error(message, 'winograd autotuner')
            self.emit_message.add_error(error_message)
            raise WinogradAutotuneError(error_message, self.emit_message.job_id)
        session = CeleryDBAdapter.session()
        set_status_in_db(WinogradAutotuneJobsModel, self.emit_message.job_id, StatusEnum.ready, session)
        winograd_autotune_job = session.query(WinogradAutotuneJobsModel).get(self.emit_message.job_id)
        winograd_autotune_job.progress = 100
        write_record(winograd_autotune_job, session)
        set_status_in_db(WinogradAutotuneJobsModel, self.emit_message.job_id, StatusEnum.ready, session, force=True)
        session.close()
        self.emit_message.emit_message()
コード例 #14
0
def update_artifact_upload_progress(file_id: int,
                                    emit_message: UploadEmitMessage) -> float:
    file_record = FilesModel.query.get(file_id)
    artifact = file_record.artifact

    if file_record.uploaded_blob_size == file_record.size:
        file_status = StatusEnum.ready
    else:
        file_status = StatusEnum.running

    uploaded_progress = min(artifact.uploaded_progress, 100)

    artifact.progress = uploaded_progress * emit_message.weight
    topology = TopologiesModel.query.get(artifact.id)

    total_progress = uploaded_progress

    if topology and topology.framework != SupportedFrameworksEnum.openvino:
        mo_job = ModelOptimizerJobModel.query.filter_by(
            original_topology_id=artifact.id).first()
        result_topology = mo_job.result_model
        weights = JobsWeight.upload_and_convert_openvino_model()
        result_topology.status = StatusEnum.running
        result_topology.progress = uploaded_progress * weights[
            JobTypesEnum.iuploader_type]
        write_record(result_topology, get_db().session)
        total_progress = result_topology.progress

    write_record(artifact, get_db().session)

    set_status_in_db(FilesModel, file_id, file_status, get_db().session)

    if artifact.progress == 100:
        set_status_in_db(ArtifactsModel, artifact.id, StatusEnum.ready,
                         get_db().session)
    else:
        set_status_in_db(ArtifactsModel, artifact.id, StatusEnum.running,
                         get_db().session)

    emit_message.add_stage(
        IEmitMessageStage('uploading', progress=total_progress))
    return uploaded_progress
コード例 #15
0
def cancel_upload_in_db(artifact_id: int) -> bool:
    artifact = DatasetsModel.query.get(artifact_id)
    if not artifact:
        artifact = TopologiesModel.query.get(artifact_id)
        if artifact:
            model_optimize = ModelOptimizerJobModel.query.filter_by(
                result_model_id=artifact_id).first()
            if model_optimize:
                set_status_in_db(ModelOptimizerJobModel, model_optimize.job_id,
                                 StatusEnum.cancelled,
                                 get_db().session)
            model_downloader = ModelDownloaderModel.query.filter_by(
                result_model_id=artifact_id).first()
            if model_downloader:
                set_status_in_db(ModelDownloaderModel, model_downloader.job_id,
                                 StatusEnum.cancelled,
                                 get_db().session)
    if artifact:
        set_status_in_db(ArtifactsModel, artifact_id, StatusEnum.cancelled,
                         get_db().session)
        return True
    return False
コード例 #16
0
 def set_error_to_database(self, message):
     session = CeleryDBAdapter.session()
     set_status_in_db(self.job.db_table, self.job_id, StatusEnum.error,
                      session, message)
     session.close()
コード例 #17
0
    def run(self):
        emit_msg = self.emit_message
        config = emit_msg.config
        log.debug('[ MODEL DOWNLOADER CONVERT ] Converting model %s',
                  config.name)

        session = CeleryDBAdapter.session()
        convert_model = session.query(ModelDownloaderConversionJobsModel).get(
            self.emit_message.job_id)
        download_model = session.query(ModelDownloaderModel).filter_by(
            result_model_id=convert_model.result_model_id).first()
        if convert_model.conversion_args is None or (
                download_model and download_model.status != StatusEnum.ready):
            log.debug(
                '[ MODEL DOWNLOADER CONVERT ] Model Converter args or %s files are not in place yet, skipping.',
                convert_model.result_model.name)
            # Once the downloader started, its id is in the Topology instance.
            # Each next convert request is skipped if downloader is still running.
            # However, in the Topology instance, we need to switch task id back to the original Downloader job.
            self.set_task_id(self.previous_task_id)
            session.close()
            self.celery_task.request.chain = None
            return
        convert_model.status = StatusEnum.running
        artifact = session.query(TopologiesModel).get(config.result_model_id)
        artifact.status = StatusEnum.running
        emit_msg.set_previous_accumulated_progress(artifact.progress)
        write_record(convert_model, session)
        write_record(artifact, session)
        session.close()

        parameters = TopologyConvertParameters(config)
        parser = TopologyConvertParser(self.emit_message,
                                       TopologyConvertStages.get_stages())
        return_code, message = run_console_tool(parameters, parser, self)

        if return_code:
            job_name = self.emit_message.get_current_job(
            ).name if self.emit_message.get_current_job() else None
            error = TopologyConvertErrorMessageProcessor.recognize_error(
                message, job_name)
            log.error('[ MODEL DOWNLOADER CONVERT ] [ ERROR ]: %s', error)
            session = CeleryDBAdapter.session()
            set_status_in_db(ModelDownloaderConversionJobsModel,
                             self.emit_message.job_id, StatusEnum.error,
                             session, error)
            set_status_in_db(TopologiesModel, config.result_model_id,
                             StatusEnum.error, session, error)
            session.close()
            self.emit_message.add_error(
                'Model optimizer failed: {}'.format(error), return_code)
            raise ModelOptimizerError(error, self.emit_message.job_id)
        session = CeleryDBAdapter.session()
        convert_model = session.query(ModelDownloaderConversionJobsModel).get(
            self.emit_message.job_id)
        convert_model.progress = 100
        convert_model.status = StatusEnum.ready
        write_record(convert_model, session)
        model = session.query(TopologiesModel).get(config.result_model_id)
        model.path = config.output
        write_record(model, session)
        session.close()
        self.emit_message.emit_message()
コード例 #18
0
 def _update_db_on_success(self):
     session = CeleryDBAdapter.session()
     set_status_in_db(Int8AutotuneJobsModel, self.emit_message.job_id, StatusEnum.ready, session, force=True)
     session.close()
コード例 #19
0
 def on_failure(self):
     session = CeleryDBAdapter.session()
     set_status_in_db(AccuracyJobsModel, self.emit_message.job_id, StatusEnum.error, session)
     session.close()