Exemplo n.º 1
0
 def update_progress_for_optimize(self):
     job_progress = self.local_progress
     session = CeleryDBAdapter.session()
     record = session.query(ModelDownloaderConversionJobsModel).filter_by(
         job_id=self.job_id).first()
     record.progress = job_progress
     write_record(record, session)
Exemplo n.º 2
0
 def update_progress_in_database(self, progress):
     session = CeleryDBAdapter.session()
     int8_record = session.query(Int8AutotuneJobsModel).filter_by(
         job_id=self.job_id).first()
     int8_record.progress = progress
     write_record(int8_record, session)
     session.close()
Exemplo n.º 3
0
 def set_task_id(self, task_id: str):
     session = CeleryDBAdapter.session()
     record = session.query(self.db_table).get(self.emit_message.job_id)
     if record:
         record.task_id = task_id
         write_record(record, session)
     session.close()
Exemplo n.º 4
0
def convert_downloaded_model(data: dict):
    topology_id = data['topologyId']
    topology = TopologiesModel.query.get(topology_id)
    topology.precision = ModelPrecisionEnum(data['dataType'])
    omz_topology = OMZTopologyModel.query.filter_by(name=topology.name).first()
    convert_job_record = ModelDownloaderConversionJobsModel.query.filter_by(
        result_model_id=topology_id).first()
    convert_job_record.conversion_args = json.dumps(({
        'precision':
        data['dataType'],
    }))
    write_record(convert_job_record, get_db().session)
    weights = JobsWeight.download_source_model()
    tasks = [
        TASK.subtask(args=[
            None, JobTypesEnum.model_convert_type.value,
            convert_job_record.job_id
        ],
                     kwargs={
                         'progress_weight':
                         weights[JobTypesEnum.model_convert_type]
                     }),
    ]
    source_path = os.path.join(MODEL_DOWNLOADS_FOLDER, str(topology_id),
                               omz_topology.path)
    destination_path = topology.path
    ir_postprocessing(tasks, source_path, destination_path, topology.id,
                      weights)

    chain(tasks).apply_async()
    return jsonify({})
Exemplo n.º 5
0
def fetch_downloadable_models():
    omz_meta = get_metadata_for_omz_models()
    parameters = InfoDumperParameters()
    parser = InfoDumperParser()
    return_code, _ = run_console_tool(parameters, parser)
    if return_code:
        return
    models = json.loads(parser.stdout)
    for model in models:
        model_meta = omz_meta.get(
            model['name'], {
                'topology_type': TaskMethodEnum.generic,
                'advanced_configuration': None
            })
        for precision in model['precisions']:
            existing_model = (OMZTopologyModel.query.filter_by(
                name=model['name'],
                precision=ModelPrecisionEnum(precision)).first())
            if model['framework'] == 'dldt':
                model['framework'] = SupportedFrameworksEnum.openvino.value

            if not existing_model:
                if model_meta['topology_type'] != TaskMethodEnum.generic:
                    task_type = define_topology_task_type(model)
                else:
                    task_type = TaskEnum.generic
                record = OMZTopologyModel(
                    data=model,
                    task_type=task_type,
                    topology_type=model_meta['topology_type'],
                    advanced_configuration=model_meta[
                        'advanced_configuration'],
                    precision=ModelPrecisionEnum(precision))
                write_record(record, get_db().session)
 def set_task_id(self, task_id):
     session = CeleryDBAdapter.session()
     resulting_topology = session.query(TopologiesModel).get(
         self.emit_message.config.result_model_id)
     resulting_topology.task_id = task_id
     write_record(resulting_topology, session)
     session.close()
Exemplo n.º 7
0
    def run(self):
        session = CeleryDBAdapter.session()
        winograd_autotune_job = session.query(WinogradAutotuneJobsModel).get(self.emit_message.job_id)
        new_model = session.query(TopologiesModel).get(winograd_autotune_job.result_model_id)
        tuned_path = new_model.path
        set_status_in_db(WinogradAutotuneJobsModel, self.emit_message.job_id, StatusEnum.running, session)
        session.close()
        self.emit_message.emit_progress()
        parameters = WinogradParameters(self.emit_message.config)
        parameters.set_parameter('o', tuned_path)
        parser = WinogradConsoleOutputParser(self.emit_message, WinogradToolStages.get_stages())

        return_code, message = run_console_tool(parameters, parser)
        if return_code:
            message = parser.get_error() if not message else message
            error_message = WinogradErrorMessageProcessor.recognize_error(message, 'winograd autotuner')
            self.emit_message.add_error(error_message)
            raise WinogradAutotuneError(error_message, self.emit_message.job_id)
        session = CeleryDBAdapter.session()
        set_status_in_db(WinogradAutotuneJobsModel, self.emit_message.job_id, StatusEnum.ready, session)
        winograd_autotune_job = session.query(WinogradAutotuneJobsModel).get(self.emit_message.job_id)
        winograd_autotune_job.progress = 100
        write_record(winograd_autotune_job, session)
        set_status_in_db(WinogradAutotuneJobsModel, self.emit_message.job_id, StatusEnum.ready, session, force=True)
        session.close()
        self.emit_message.emit_message()
 def total_progress(self):
     session = CeleryDBAdapter.session()
     artifact = session.query(TopologiesModel).get(self.job_id)
     progress = self.local_progress * self.weight + self.previous_progress
     artifact.progress = progress
     write_record(artifact, session)
     session.close()
     return progress
Exemplo n.º 9
0
 def set_inference_result_to_record(self, results):
     session = CeleryDBAdapter.session()
     infer_result = session.query(InferenceResultsModel).get(
         self.inference_result_record_id)
     infer_result.progress = 100
     infer_result.update(results)
     write_record(infer_result, session)
     session.close()
Exemplo n.º 10
0
 def update_progress_for_inference_result(self):
     session = CeleryDBAdapter.session()
     infer_result = session.query(InferenceResultsModel).get(
         self.inference_result_record_id)
     infer_result.progress = self.local_progress
     infer_result.status = StatusEnum.running
     write_record(infer_result, session)
     session.close()
Exemplo n.º 11
0
 def update_progress_in_database(self):
     job_progress = self.local_progress
     session = CeleryDBAdapter.session()
     record = session.query(ModelDownloaderModel).filter_by(
         job_id=self.job_id).first()
     record.progress = job_progress
     write_record(record, session)
     session.close()
Exemplo n.º 12
0
 def update_model_optimizer_scan_result(self, results: str):
     session = CeleryDBAdapter.session()
     mo_job_record = (session.query(ModelOptimizerScanJobsModel).filter_by(
         topology_id=self.config.topology_id).first())
     mo_job_record.information = self.cleanup_results(results)
     mo_job_record.progress = 100
     mo_job_record.status = StatusEnum.ready
     write_record(mo_job_record, session)
     session.close()
Exemplo n.º 13
0
 def on_failure(self):
     super().on_failure()
     session = CeleryDBAdapter.session()
     mo_job_record = session.query(ModelOptimizerScanJobsModel).filter_by(
         topology_id=self.emit_message.config.topology_id).first()
     mo_job_record.status = StatusEnum.error
     write_record(mo_job_record, session)
     set_status_in_db(TopologiesModel, self.emit_message.config.topology_id, StatusEnum.error, session)
     session.close()
 def set_task_id(self, task_id):
     session = CeleryDBAdapter.session()
     downloader_job = session.query(ModelDownloaderModel).filter_by(
         job_id=self.emit_message.job_id).first()
     topology = session.query(TopologiesModel).get(
         downloader_job.result_model_id)
     topology.task_id = task_id
     write_record(topology, session)
     downloader_job.task_id = task_id
     write_record(downloader_job, session)
     session.close()
Exemplo n.º 15
0
 def total_progress(self):
     session = CeleryDBAdapter.session()
     artifact = session.query(TopologiesModel).get(
         self.config.result_model_id)
     current_job_record = session.query(
         ModelDownloaderConversionJobsModel).get(self.job_id)
     total_progress = current_job_record.progress * self.weight
     artifact.progress = self.previous_accumulated_progress + total_progress
     progress = artifact.progress
     write_record(artifact, session)
     session.close()
     return progress
 def set_task_id(self, task_id):
     session = CeleryDBAdapter.session()
     topology_convert_job = session.query(
         ModelDownloaderConversionJobsModel).filter_by(
             job_id=self.emit_message.job_id).first()
     topology = topology_convert_job.result_model
     self.previous_task_id = topology.task_id
     topology.task_id = task_id
     write_record(topology, session)
     topology_convert_job.task_id = task_id
     write_record(topology_convert_job, session)
     session.close()
Exemplo n.º 17
0
 def create_files(cls, files: dict, artifact_id: int,
                  session_id: str) -> dict:
     result = {}
     for file_name, file_data in files.items():
         file_record = cls(file_data['name'], artifact_id,
                           file_data['size'], session_id)
         write_record(file_record, get_db().session)
         file_record.path = os.path.join(file_record.artifact.path,
                                         str(file_record.name))
         write_record(file_record, get_db().session)
         result[file_name] = file_record.id
     return result
Exemplo n.º 18
0
def save_pipeline_config(content: str, topology_id: int):
    topology = TopologiesModel.query.get(topology_id)
    pipeline_config_path = os.path.join(topology.path, 'pipeline.config')
    with open(pipeline_config_path, 'w+') as pipeline_config_file:
        pipeline_config_file.writelines(content)
    size = get_size_of_files(pipeline_config_path)
    config_file_record = FilesModel('pipeline.config', topology_id, size,
                                    topology.session_id)
    config_file_record.progress = 100
    config_file_record.status = StatusEnum.ready
    config_file_record.uploaded_blob_size = size
    config_file_record.path = pipeline_config_path
    write_record(config_file_record, get_db().session)
Exemplo n.º 19
0
def run_accuracy_check(session_id: str):
    data = request.get_json()
    data['session_id'] = session_id
    data['projectId'] = data['projectId']
    data['accuracyConfig'] = ''

    accuracy_job = AccuracyJobsModel(data)

    write_record(accuracy_job, get_db().session)
    TASK.apply_async(args=(None, JobTypesEnum.accuracy_type.value,
                           accuracy_job.job_id),
                     task_id=str(accuracy_job.job_id))
    return jsonify({'jobId': accuracy_job.job_id})
Exemplo n.º 20
0
def set_status_in_db(table: type(BaseModel),
                     item_id: int,
                     status: StatusEnum,
                     session,
                     message: str = None,
                     force: bool = False):
    record = session.query(table).get(item_id)
    if record and (force or
                   STATUS_PRIORITY[record.status] < STATUS_PRIORITY[status]):
        record.status = status
        if message:
            record.error_message = message
        write_record(record, session)
Exemplo n.º 21
0
 def total_progress(self):
     if self.from_celery:
         session = CeleryDBAdapter.session()
         artifact = session.query(ArtifactsModel).get(self.artifact_id)
     else:
         session = get_db().session
         artifact = ArtifactsModel.query.get(self.artifact_id)
     progress = self.local_progress * self.weight + self.previous_progress
     artifact.progress = progress
     write_record(artifact, session)
     if self.from_celery:
         session.close()
     return progress
Exemplo n.º 22
0
    def run(self):
        parameters = self.emit_message.config
        dataset_id = self.emit_message.job_id
        current_job = self.emit_message.add_stage(
            IEmitMessageStage('Setup dataset parameters', weight=0.1))
        session = CeleryDBAdapter.session()
        dataset = session.query(DatasetsModel).get(self.emit_message.job_id)
        dataset.status = StatusEnum.running
        dataset_path = dataset.path
        write_record(dataset, session)
        session.close()
        image_size = parameters.width * parameters.height * parameters.channels
        create_empty_dir(dataset_path)
        try:
            random_generator = DistributionLaw(
                parameters.dist_law, parameters.params_dist).random_generator
        except AssertionError as exception:
            self.emit_message.add_error(str(exception))
            raise
        self.emit_message.update_progress(current_job, 100)
        current_job = self.emit_message.add_stage(
            IEmitMessageStage('Generate dataset', weight=0.9))
        log.debug('Starting of generating dataset %s', dataset_id)
        index = 0
        while index < self.emit_message.config.size:
            file_name = os.path.join(dataset_path, '{}.jpg'.format(index))
            cv2.imwrite(
                file_name,
                random_generator(image_size).reshape(
                    parameters.height, parameters.width,
                    parameters.channels).astype(np.uint8))
            percent = (index / (parameters.size + 2)) * 100

            if index % np.ceil(parameters.size / 10) == 0:
                self.emit_message.update_progress(current_job, percent)
            with open(os.path.join(dataset_path, parameters.name + '.txt'),
                      'a') as desc_file:
                desc_file.write('{}.jpg 0\n'.format(index))
            index += 1

        session = CeleryDBAdapter.session()
        dataset = session.query(DatasetsModel).get(self.emit_message.job_id)
        dataset.progress = 100
        dataset.status = StatusEnum.ready
        dataset.size = get_size_of_files(dataset_path)
        write_record(dataset, session)
        session.close()

        self.emit_message.update_progress(current_job, 100)
        log.debug('Finish of generating dataset %s', dataset_id)
Exemplo n.º 23
0
def cancel_job_in_db(job_id: int):
    job = JobsModel.query.get(job_id)
    if job:
        job.status = StatusEnum.cancelled
        write_record(job, get_db().session)
        compound_inference_job = CompoundInferenceJobsModel.query.get(job_id)
        if not compound_inference_job:
            return
        inference_results = compound_inference_job.inference_results
        for inference_result in inference_results:
            if inference_result.status in (StatusEnum.running,
                                           StatusEnum.queued):
                inference_result.status = StatusEnum.cancelled
                write_record(inference_result, get_db().session)
Exemplo n.º 24
0
    def _run_python_calibration(self,
                                tuned_path: str,
                                int8_model_id: int):
        try:
            self.emit_message.emit_progress()
            session = CeleryDBAdapter.session()

            new_int8_model = session.query(TopologiesModel).get(int8_model_id)
            int8_job = session.query(Int8AutotuneJobsModel).get(self.emit_message.job_id)
            original_model = session.query(TopologiesModel).get(new_int8_model.optimized_from)

            project_model = session.query(ProjectsModel).get(int8_job.project_id)
            dataset_model = session.query(DatasetsModel).get(project_model.dataset_id)

            config = construct_accuracy_tool_config(original_model, dataset_model, DevicesEnum.cpu)
            config.dataset.subsample_size = '{}%'.format(int8_job.subset_size)

            int8_job.status = StatusEnum.running
            int8_job.calibration_config = json.dumps(config.to_dict())

            write_record(int8_job, session)
            session.close()

            tuned_model_path = os.path.join(tuned_path, str(self.emit_message.job_id))
            yml_file = '{}.yml'.format(tuned_model_path)

            config.dump_to_yml(yml_file)

            cli_params = AccuracyCheckerCliParameters()
            cli_params.exe = os.path.join(PYTHON_CLI_FOLDER, os.path.join('calibration', 'calibrate.py'))
            cli_params.set_parameter('y', yml_file)
            cli_params.set_parameter('th', self.emit_message.config.threshold)
            cli_params.set_parameter('tp', tuned_model_path)

            self.emit_message.add_stage(IEmitMessageStage(job_type='int8_tuning'))
            cli_parser = ProgressParser(self.emit_message, None)
            code, error = run_console_tool(cli_params, cli_parser, self)
            if code:
                self.emit_message.add_error('Calibration tool failed')
                raise Int8AutotuneError(error, self.emit_message.job_id)
            self._update_db_on_success()
            self.emit_message.emit_message()

        except Exception as exc:
            log.debug('[ INT8 python ] ERROR: calibration job failed')
            log.debug(exc)
            remove_dir(tuned_path)
            self.emit_message.add_error('Calibration tool failed')
            raise Int8AutotuneError(str(exc), self.emit_message.job_id)
Exemplo n.º 25
0
def generate_queue_of_single_inference_tasks(data: dict,
                                             job_id: int,
                                             start_tasks: list = None,
                                             previous_weight=0):
    min_nireq = data['minNireq']
    max_nireq = data['maxNireq']
    step_nireq = data['stepNireq']

    min_batch = data['minBatch']
    max_batch = data['maxBatch']
    step_batch = data['stepBatch']

    queue = []
    if start_tasks:
        for task in start_tasks:
            queue.append(task)
    num_runs = math.ceil((max_batch - min_batch + 1) / step_batch) * math.ceil(
        (max_nireq - min_nireq + 1) / step_nireq)
    weight_single_run = (1 - previous_weight) / num_runs
    for batch in range(min_batch, max_batch + 1, step_batch):
        for nireq in range(min_nireq, max_nireq + 1, step_nireq):
            queue.append(
                TASK.subtask(args=(JobTypesEnum.single_inference_type.value,
                                   job_id),
                             kwargs={
                                 'data': ExecInfo(batch, nireq).json(),
                                 'progress_weight': weight_single_run
                             }))
            inference_result = InferenceResultsModel({
                'jobId': job_id,
                'execInfo': {
                    'batch': batch,
                    'nireq': nireq
                }
            })
            write_record(inference_result, get_db().session)

    if not start_tasks:
        queue.pop(0)
        queue.insert(
            0,
            TASK.subtask(args=tuple(
                [None, JobTypesEnum.single_inference_type.value, job_id]),
                         kwargs={
                             'data': ExecInfo(min_batch, min_nireq).json(),
                             'progress_weight': weight_single_run
                         }))
        get_db().session().commit()
    return queue
Exemplo n.º 26
0
 def total_progress(self):
     session = CeleryDBAdapter.session()
     infer_results = session.query(InferenceResultsModel).filter_by(
         job_id=self.job_id).all()
     compound_infer_record = session.query(
         CompoundInferenceJobsModel).filter_by(job_id=self.job_id).first()
     num_single_inferences = compound_infer_record.num_single_inferences
     progress = 0.0
     for infer_result in infer_results:
         progress += infer_result.progress
     total_progress = progress / num_single_inferences
     compound_infer_record.progress = total_progress
     write_record(compound_infer_record, session)
     session.close()
     return total_progress
Exemplo n.º 27
0
def rename_mxnet_files(artifact_id: int):
    model = TopologiesModel.query.get(artifact_id)
    if model and model.framework == SupportedFrameworksEnum.mxnet:
        files = model.files
        for file in files:
            old_path = Path(file.path)
            new_name = model.name + {
                '.params': '-00001.params',
                '.json': '-symbol.json'
            }[old_path.suffix]
            new_path = old_path.parent / new_name
            os.rename(str(old_path), str(new_path))
            file.path = str(new_path)
            file.name = new_name
            write_record(file, get_db().session)
Exemplo n.º 28
0
def write_chunk(upload_id, request):
    file_record = FilesModel.query.get(upload_id)
    artifact = file_record.artifact
    chunk = request.files['file'].stream.read()
    file_name = os.path.join(artifact.path, file_record.name)

    with open(file_name, "ab") as file:
        file.write(chunk)

    if file_record.uploaded_blob_size:
        file_record.uploaded_blob_size += len(chunk)
    else:
        file_record.uploaded_blob_size = len(chunk)
    file_record.progress = file_record.uploaded_blob_size / file_record.size * 100
    write_record(file_record, get_db().session)
Exemplo n.º 29
0
 def update_inference_result(self, results):
     for job in self.jobs:
         job.progress = 100
     self.set_inference_result_to_record(results)
     if self.total_progress >= 100:
         session = CeleryDBAdapter.session()
         job = session.query(CompoundInferenceJobsModel).get(self.job_id)
         infer_results = session.query(InferenceResultsModel).filter_by(
             job_id=self.job_id).all()
         for infer_result in infer_results:
             infer_result.status = StatusEnum.ready
             write_record(infer_result, session)
         job.status = StatusEnum.ready
         write_record(job, session)
         session.close()
     self.emit_message()
Exemplo n.º 30
0
def set_model_advanced_configuration(model_id: int):
    config = request.get_json()

    try_load_configuration(config)

    model = TopologiesModel.query.get(model_id)
    if not model:
        return 'Model with id {} was not found in the database'.format(
            model_id), 404

    model.meta.task_type = config['taskType']
    model.meta.topology_type = config['taskMethod']
    model.meta.advanced_configuration = json.dumps(config)
    write_record(model, get_db().session)

    return jsonify(model.short_json())