Exemple #1
0
def create_tasks_chain_for_upload_model(model_id: int) -> list:
    topology = TopologiesModel.query.get(model_id)
    if topology.framework != SupportedFrameworksEnum.openvino:
        topology = TopologiesModel.query.filter_by(
            converted_from=model_id).first()
        model_id = topology.id
        weights = JobsWeight.upload_and_convert_openvino_model()
        return [
            TASK.subtask(args=tuple(
                [None, JobTypesEnum.model_optimizer_scan_type.value,
                 model_id]),
                         kwargs={
                             'progress_weight':
                             weights[JobTypesEnum.model_optimizer_scan_type]
                         })
        ]
    weights = JobsWeight.upload_openvino_model()
    return [
        TASK.subtask(args=tuple(
            [None, JobTypesEnum.model_analyzer_type.value, model_id]),
                     kwargs={
                         'progress_weight':
                         weights[JobTypesEnum.model_analyzer_type]
                     })
    ]
def run_accuracy_check(session_id: str):
    data = request.get_json()
    data['session_id'] = session_id
    data['projectId'] = data['projectId']
    data['accuracyConfig'] = ''

    accuracy_job = AccuracyJobsModel(data)

    write_record(accuracy_job, get_db().session)
    TASK.apply_async(args=(None, JobTypesEnum.accuracy_type.value,
                           accuracy_job.job_id),
                     task_id=str(accuracy_job.job_id))
    return jsonify({'jobId': accuracy_job.job_id})
Exemple #3
0
def generate_queue_of_single_inference_tasks(data: dict,
                                             job_id: int,
                                             start_tasks: list = None,
                                             previous_weight=0):
    min_nireq = data['minNireq']
    max_nireq = data['maxNireq']
    step_nireq = data['stepNireq']

    min_batch = data['minBatch']
    max_batch = data['maxBatch']
    step_batch = data['stepBatch']

    queue = []
    if start_tasks:
        for task in start_tasks:
            queue.append(task)
    num_runs = math.ceil((max_batch - min_batch + 1) / step_batch) * math.ceil(
        (max_nireq - min_nireq + 1) / step_nireq)
    weight_single_run = (1 - previous_weight) / num_runs
    for batch in range(min_batch, max_batch + 1, step_batch):
        for nireq in range(min_nireq, max_nireq + 1, step_nireq):
            queue.append(
                TASK.subtask(args=(JobTypesEnum.single_inference_type.value,
                                   job_id),
                             kwargs={
                                 'data': ExecInfo(batch, nireq).json(),
                                 'progress_weight': weight_single_run
                             }))
            inference_result = InferenceResultsModel({
                'jobId': job_id,
                'execInfo': {
                    'batch': batch,
                    'nireq': nireq
                }
            })
            write_record(inference_result, get_db().session)

    if not start_tasks:
        queue.pop(0)
        queue.insert(
            0,
            TASK.subtask(args=tuple(
                [None, JobTypesEnum.single_inference_type.value, job_id]),
                         kwargs={
                             'data': ExecInfo(min_batch, min_nireq).json(),
                             'progress_weight': weight_single_run
                         }))
        get_db().session().commit()
    return queue
Exemple #4
0
def convert_downloaded_model(data: dict):
    topology_id = data['topologyId']
    topology = TopologiesModel.query.get(topology_id)
    topology.precision = ModelPrecisionEnum(data['dataType'])
    omz_topology = OMZTopologyModel.query.filter_by(name=topology.name).first()
    convert_job_record = ModelDownloaderConversionJobsModel.query.filter_by(
        result_model_id=topology_id).first()
    convert_job_record.conversion_args = json.dumps(({
        'precision':
        data['dataType'],
    }))
    write_record(convert_job_record, get_db().session)
    weights = JobsWeight.download_source_model()
    tasks = [
        TASK.subtask(args=[
            None, JobTypesEnum.model_convert_type.value,
            convert_job_record.job_id
        ],
                     kwargs={
                         'progress_weight':
                         weights[JobTypesEnum.model_convert_type]
                     }),
    ]
    source_path = os.path.join(MODEL_DOWNLOADS_FOLDER, str(topology_id),
                               omz_topology.path)
    destination_path = topology.path
    ir_postprocessing(tasks, source_path, destination_path, topology.id,
                      weights)

    chain(tasks).apply_async()
    return jsonify({})
Exemple #5
0
def convert(mo_job_record: ModelOptimizerJobModel, data: dict,
            chain_progress_weight: dict):
    """Validate MO params, prepare them, update MO job record and launch MO chain."""

    pipeline_config = data.get('pipelineConfigFile', None)
    if pipeline_config:
        del data['pipelineConfigFile']
        save_pipeline_config(pipeline_config,
                             mo_job_record.original_topology_id)
    mo_form = MOForm(data, mo_job_record.original_topology.framework.value)
    if mo_form.is_invalid:
        set_status_in_db(ModelOptimizerJobModel, mo_job_record.job_id,
                         StatusEnum.error,
                         get_db().session)
        set_status_in_db(TopologiesModel, mo_job_record.result_model_id,
                         StatusEnum.error,
                         get_db().session)
        return jsonify({'errors': mo_form.errors}), 400

    mo_job_record.mo_args = json.dumps(mo_form.get_args())
    write_record(mo_job_record, get_db().session)

    chain([
        TASK.subtask(
            args=(None, JobTypesEnum.model_optimizer_type.value,
                  mo_job_record.job_id),
            kwargs={
                'progress_weight':
                chain_progress_weight[JobTypesEnum.model_optimizer_type]
            }),
        TASK.subtask(
            args=(JobTypesEnum.model_analyzer_type.value,
                  mo_job_record.result_model_id),
            kwargs={
                'progress_weight':
                chain_progress_weight[JobTypesEnum.model_analyzer_type],
            })
    ]).apply_async()

    return jsonify({
        'irId': mo_job_record.result_model_id,
        'modelOptimizerJobId': mo_job_record.job_id,
    })
Exemple #6
0
def generate_dataset(session_id: str):
    number_images = request.get_json()['numberOfImages']
    name = request.get_json()['datasetName']
    channels = request.get_json()['channels']
    width = request.get_json()['width']
    height = request.get_json()['height']
    dist_law = request.get_json()['distLaw']
    params_dist = request.get_json()['distLawParams']
    dataset = DatasetsModel(name, session_id)
    dataset.dataset_type = DatasetTypesEnum.imagenet.value
    write_record(dataset, get_db().session)
    dataset.path = get_dataset_folder(str(dataset.id))
    write_record(dataset, get_db().session)
    config = DatasetGenerationConfigsModel(dataset.id, number_images, channels,
                                           width, height, dist_law,
                                           params_dist)
    write_record(config, get_db().session)
    TASK.apply_async(
        (None, JobTypesEnum.add_generated_dataset_type.value, dataset.id),
        task_id=str(dataset.id))
    return jsonify(dataset.json())
Exemple #7
0
def create_tasks_chain_for_upload_dataset(dataset_id: int) -> list:
    weights = JobsWeight.upload_dataset()
    return [
        TASK.subtask(args=tuple(
            [None, JobTypesEnum.dataset_extractor_type.value, dataset_id]),
                     kwargs={
                         'progress_weight':
                         weights[JobTypesEnum.dataset_extractor_type]
                     }),
        TASK.subtask(args=tuple(
            [JobTypesEnum.dataset_recognizer_type.value, dataset_id]),
                     kwargs={
                         'progress_weight':
                         weights[JobTypesEnum.dataset_recognizer_type]
                     }),
        TASK.subtask(args=tuple(
            [JobTypesEnum.dataset_validator_type.value, dataset_id]),
                     kwargs={
                         'progress_weight':
                         weights[JobTypesEnum.dataset_validator_type]
                     })
    ]
Exemple #8
0
def archive_model(session_id, project_id):
    project = ProjectsModel.query.get(project_id)
    artifact = ArtifactsModel.query.get(project.model_id)

    exists, path = DownloadModelJob.archive_exists(artifact.id)
    if exists:
        return jsonify({
            'jobId': None,
            'message': 'archive already exists',
            'path': path
        })

    name = request.args.get('name')
    download_job = DownloadConfigsModel(
        dict(session_id=session_id, projectId=project_id, path=path,
             name=name))
    write_record(download_job, get_db().session)

    TASK.apply_async(args=(None, JobTypesEnum.download_model_type.value,
                           download_job.job_id))

    return jsonify({'jobId': download_job.job_id})
Exemple #9
0
def ir_postprocessing(tasks: list, source_path: str, destination_path: str,
                      job_id: int, weight: dict):
    move_file_args = [
        JobTypesEnum.move_model_from_downloader_type.value, job_id
    ]
    if not tasks:
        move_file_args.insert(0, None)
    tasks.append(
        TASK.subtask(args=move_file_args,
                     kwargs={
                         'data': {
                             'sourcePath': source_path,
                             'destinationPath': destination_path
                         },
                         'progress_weight':
                         weight[JobTypesEnum.move_model_from_downloader_type]
                     }))
    tasks.append(
        TASK.subtask(args=(JobTypesEnum.model_analyzer_type.value, job_id),
                     kwargs={
                         'progress_weight':
                         weight[JobTypesEnum.model_analyzer_type],
                     }))
Exemple #10
0
def update_model_advanced_configuration(session_id: str, model_id: int):
    config = request.get_json()

    try_load_configuration(config)

    dataset_id = config['datasetId']
    target = DevicesEnum(config['device'])

    model = TopologiesModel.query.get(model_id)
    if not model:
        return 'Model with id {} was not found in the database'.format(
            model_id), 404

    model.meta.advanced_configuration = json.dumps(config)
    write_record(model, get_db().session)

    affected_topologies_ids = [t.id for t in model.meta.topologies]
    projects = (ProjectsModel.query.filter(
        ProjectsModel.model_id.in_(affected_topologies_ids)).filter_by(
            dataset_id=dataset_id, target=target))
    affected_projects_ids = [p.id for p in projects]

    for project_id in affected_projects_ids:
        data = {
            'session_id': session_id,
            'projectId': project_id,
            'accuracyConfig': ''
        }
        accuracy_job = AccuracyJobsModel(data)
        write_record(accuracy_job, get_db().session)
        TASK.apply_async(args=(None, JobTypesEnum.accuracy_type.value,
                               accuracy_job.job_id))

    return jsonify({
        'modelIds': affected_topologies_ids,
        'projectIds': affected_projects_ids
    })
Exemple #11
0
def download_model(session_id: str):
    data = request.get_json()
    precision = ModelPrecisionEnum(data['precision'])
    model_name = data['modelName']

    topology = OMZTopologyModel.query.filter_by(name=model_name,
                                                precision=precision).first()

    metadata = TopologiesMetaDataModel()
    write_record(metadata, get_db().session)
    new_model = TopologiesModel(model_name, SupportedFrameworksEnum.openvino,
                                metadata.id, session_id)
    new_model.source = ModelSourceEnum.omz
    new_model.precision = precision
    new_model.downloaded_from = topology.id
    write_record(new_model, get_db().session)

    new_model.path = os.path.join(UPLOAD_FOLDER_MODELS, str(new_model.id),
                                  ORIGINAL_FOLDER)

    new_model.meta.task_type = topology.task_type
    new_model.meta.topology_type = topology.topology_type
    new_model.meta.advanced_configuration = topology.advanced_configuration
    write_record(new_model, get_db().session)

    new_model_json = new_model.short_json()
    new_model_json['session_id'] = session_id

    tasks = []

    weights = JobsWeight.download_model()

    download_job_record = ModelDownloaderModel(new_model_json)
    download_job_record.result_model_id = new_model.id

    write_record(download_job_record, get_db().session)
    tasks.append(
        TASK.subtask(args=(None, JobTypesEnum.model_downloader_type.value,
                           download_job_record.job_id),
                     kwargs={
                         'progress_weight':
                         weights[JobTypesEnum.model_downloader_type]
                     }))
    analysis_data = TopologyAnalysisJobsModel({
        'session_id': session_id,
        'model_id': new_model.id,
    })
    write_record(analysis_data, get_db().session)

    if topology.framework != SupportedFrameworksEnum.openvino:
        weights = JobsWeight.download_source_model()

        convert_job_record = ModelDownloaderConversionJobsModel(new_model_json)
        convert_job_record.result_model_id = new_model.id
        convert_job_record.parent_job = download_job_record.job_id
        write_record(convert_job_record, get_db().session)

        converter_args = [
            JobTypesEnum.model_convert_type.value, convert_job_record.job_id
        ]
        tasks.append(
            TASK.subtask(args=converter_args,
                         kwargs={
                             'progress_weight':
                             weights[JobTypesEnum.model_convert_type]
                         }))
        analysis_data.parent_job = convert_job_record.job_id
    else:
        weights = JobsWeight.download_openvino_model()
        analysis_data.parent_job = download_job_record.job_id
    write_record(analysis_data, get_db().session)
    source_path = os.path.join(MODEL_DOWNLOADS_FOLDER, str(new_model.id),
                               topology.path)
    destination_path = new_model.path

    ir_postprocessing(tasks, source_path, destination_path, new_model.id,
                      weights)

    chain(tasks).apply_async()

    result = new_model.short_json()
    result['originalModelFramework'] = topology.framework.value
    return jsonify(result)
Exemple #12
0
def run_int8autotune(session_id: str):
    data = request.get_json()
    int8_data = data['int8AutotuneConfig']
    compound_inference_data = data['compoundInferenceConfig']

    model_id = int8_data['modelId']
    dataset_id = int8_data['datasetId']
    device = DevicesEnum(int8_data['device'])

    project_id = create_project(OptimizationTypesEnum.int8autotune, model_id, dataset_id, device, get_db().session)

    original_model = TopologiesModel.query.get(model_id)

    int8_data['session_id'] = session_id
    int8_data['projectId'] = project_id

    compound_inference_data['session_id'] = session_id

    int8_data['taskType'] = original_model.meta.task_type
    int8_data['taskMethod'] = original_model.meta.topology_type
    int8_data['calibrationConfig'] = ''
    int8_job = Int8AutotuneJobsModel(int8_data)
    write_record(int8_job, get_db().session)

    model_path = original_model.path
    if ORIGINAL_FOLDER in original_model.path:
        model_path = os.path.dirname(original_model.path)
    tuned_path = os.path.join(model_path, str(int8_job.job_id))

    new_int8_model = TopologiesModel(
        name='{}_{}'.format(original_model.name, int8_job.job_id),
        framework=SupportedFrameworksEnum.openvino,
        metadata_id=original_model.metadata_id,
        session_id=session_id
    )

    new_int8_model.path = tuned_path
    new_int8_model.optimized_from = original_model.id
    new_int8_model.precision = ModelPrecisionEnum.mixed
    new_int8_model.status = StatusEnum.running
    new_int8_model.source = ModelSourceEnum.ir
    write_record(new_int8_model, get_db().session)

    # check existing projects
    model_id = new_int8_model.id
    dataset_id = compound_inference_data['datasetId']
    device = DevicesEnum(compound_inference_data['device'])
    inference_project_id = create_project(OptimizationTypesEnum.int8autotune, model_id, dataset_id, device,
                                          get_db().session)

    int8_job = Int8AutotuneJobsModel.query.get(int8_job.job_id)
    int8_job.result_model_id = model_id
    write_record(int8_job, get_db().session)
    analysis_data = TopologyAnalysisJobsModel({
        'model_id': new_int8_model.id,
        'session_id': session_id,
        'previousJobId': int8_job.job_id,
    })
    write_record(analysis_data, get_db().session())
    infer_data = {
        **compound_inference_data,
        'previousJobId': int8_job.job_id,
        'projectId': inference_project_id
    }
    infer_job = CompoundInferenceJobsModel(infer_data)
    write_record(infer_job, get_db().session)

    weights = JobsWeight.int8_model()
    tasks = list()
    tasks.append(TASK.subtask(args=(None, JobTypesEnum.int8autotune_type.value, int8_job.job_id),
                              kwargs={
                                  'progress_weight': weights[JobTypesEnum.int8autotune_type]
                              }))
    tasks.append(TASK.subtask(args=(JobTypesEnum.model_analyzer_type.value, model_id),
                              kwargs={
                                  'progress_weight': weights[JobTypesEnum.model_analyzer_type],
                              }
                              ))
    tasks[0].on_failure = lambda: delete_model_from_db(int8_job.job_id)
    tasks_queue = generate_queue_of_single_inference_tasks(infer_data, infer_job.job_id,
                                                           start_tasks=tasks)

    chain(tasks_queue).apply_async()

    return jsonify({'jobId': int8_job.job_id})
def run_winograd_autotune(session_id: str):
    data = request.get_json()

    winograd_data = data['winogradAutotuneConfig']
    compound_inference_data = data['compoundInferenceConfig']

    model_id = winograd_data['modelId']
    dataset_id = winograd_data['datasetId']
    device = DevicesEnum.cpu

    project_id = create_project(OptimizationTypesEnum.winograd_autotune,
                                model_id, dataset_id, device,
                                get_db().session)

    original_model = TopologiesModel.query.get(model_id)

    winograd_data['session_id'] = session_id
    winograd_data['projectId'] = project_id

    compound_inference_data['session_id'] = session_id

    winograd_job = WinogradAutotuneJobsModel(winograd_data)

    write_record(winograd_job, get_db().session)

    model_path = original_model.path
    if ORIGINAL_FOLDER in original_model.path:
        model_path = os.path.dirname(original_model.path)
    tuned_model_path = os.path.join(model_path, str(winograd_job.job_id))

    new_winograd_model = TopologiesModel(
        name='{}_{}'.format(original_model.name, winograd_job.job_id),
        framework=SupportedFrameworksEnum.openvino,
        metadata_id=original_model.metadata_id,
        session_id=session_id)
    new_winograd_model.path = tuned_model_path
    new_winograd_model.optimized_from = original_model.id
    new_winograd_model.precision = original_model.precision
    new_winograd_model.status = StatusEnum.running
    write_record(new_winograd_model, get_db().session)

    winograd_model_id = new_winograd_model.id
    dataset_id = compound_inference_data['datasetId']
    device = DevicesEnum(compound_inference_data['device'])
    if device != DevicesEnum.cpu:
        raise InconsistentConfigError(
            message='Device {} does not support Winograd optimization'.format(
                device.value))

    inference_project_id = create_project(
        OptimizationTypesEnum.winograd_autotune, winograd_model_id, dataset_id,
        device,
        get_db().session)

    winograd_job = WinogradAutotuneJobsModel.query.get(winograd_job.job_id)
    winograd_job.result_model_id = winograd_model_id
    write_record(winograd_job, get_db().session)

    infer_data = {
        **compound_inference_data, 'previousJobId': winograd_job.job_id,
        'projectId': inference_project_id
    }

    inference_job = CompoundInferenceJobsModel(infer_data)
    write_record(inference_job, get_db().session)
    analysis_data = TopologyAnalysisJobsModel({
        'model_id':
        winograd_model_id,
        'session_id':
        session_id,
        'previousJobId':
        winograd_job.job_id,
    })
    write_record(analysis_data, get_db().session())
    tasks = list()
    tasks.append(
        TASK.subtask(args=(None, JobTypesEnum.winograd_autotune_type.value,
                           winograd_job.job_id)))
    tasks.append(
        TASK.subtask(args=(JobTypesEnum.model_analyzer_type.value,
                           winograd_model_id)))

    tasks_queue = generate_queue_of_single_inference_tasks(
        infer_data, inference_job.job_id, start_tasks=tasks)

    chain(tasks_queue).apply_async()

    return jsonify({'jobId': winograd_job.job_id})