Пример #1
0
def delete_dataset_from_db(dataset_id: int):
    derived_scope = dataset_related_information(dataset_id)

    tuple(map(lambda el: delete_rows(el, get_db().session), derived_scope))

    dataset = DatasetsModel.query.get(dataset_id)

    if dataset:
        dataset_path = dataset.path
        delete_rows((dataset, ), get_db().session)
        remove_dir(dataset_path)
Пример #2
0
 def create_files(cls, files: dict, artifact_id: int,
                  session_id: str) -> dict:
     result = {}
     for file_name, file_data in files.items():
         file_record = cls(file_data['name'], artifact_id,
                           file_data['size'], session_id)
         write_record(file_record, get_db().session)
         file_record.path = os.path.join(file_record.artifact.path,
                                         str(file_record.name))
         write_record(file_record, get_db().session)
         result[file_name] = file_record.id
     return result
Пример #3
0
def cancel_job_in_db(job_id: int):
    job = JobsModel.query.get(job_id)
    if job:
        job.status = StatusEnum.cancelled
        write_record(job, get_db().session)
        compound_inference_job = CompoundInferenceJobsModel.query.get(job_id)
        if not compound_inference_job:
            return
        inference_results = compound_inference_job.inference_results
        for inference_result in inference_results:
            if inference_result.status in (StatusEnum.running,
                                           StatusEnum.queued):
                inference_result.status = StatusEnum.cancelled
                write_record(inference_result, get_db().session)
Пример #4
0
def on_new_chunk_received(request, file_id: int):
    file_record = FilesModel.query.get(file_id)
    artifact = file_record.artifact

    if not artifact or artifact.status == StatusEnum.cancelled or file_record.status == StatusEnum.cancelled:
        return {}
    try:
        write_chunk(file_id, request)
    except OSError:
        return 'Internal server error', 500

    if TopologiesModel.query.get(file_record.artifact_id):
        emit_message = create_upload_emit_message_for_topology(file_record)
    elif DatasetsModel.query.get(file_record.artifact_id):
        emit_message = create_upload_emit_message_for_dataset(file_record)
    else:
        return 'Cannot find artifact for this file {}'.format(file_id), 404

    uploaded_progress = update_artifact_upload_progress(file_id, emit_message)

    if uploaded_progress >= 100 or all(f.uploaded_blob_size == f.size
                                       for f in artifact.files):
        celery_tasks_chain = []
        if TopologiesModel.query.get(artifact.id):
            upload_job = UploadJobsModel.query.filter_by(
                artifact_id=artifact.id).first()
            upload_job.status = StatusEnum.ready
            upload_job.progress = 100
            write_record(upload_job, get_db().session)
            celery_tasks_chain = create_tasks_chain_for_upload_model(
                artifact.id)
        elif DatasetsModel.query.get(artifact.id):
            celery_tasks_chain = create_tasks_chain_for_upload_dataset(
                artifact.id)
        artifact.size = get_size_of_files(artifact.path)
        write_record(artifact, get_db().session)
        set_status_in_db(ArtifactsModel, artifact.id, StatusEnum.running,
                         get_db().session)
        try:
            write_record(artifact, get_db().session)
        except orm.exc.StaleDataError:
            pass

        # pylint: disable=fixme
        # TODO: Remove as soon as Model Optimizer fixes filenames handling.
        rename_mxnet_files(artifact.id)
        if celery_tasks_chain:
            chain(celery_tasks_chain).apply_async()
    return {}
Пример #5
0
def generate_queue_of_single_inference_tasks(data: dict,
                                             job_id: int,
                                             start_tasks: list = None,
                                             previous_weight=0):
    min_nireq = data['minNireq']
    max_nireq = data['maxNireq']
    step_nireq = data['stepNireq']

    min_batch = data['minBatch']
    max_batch = data['maxBatch']
    step_batch = data['stepBatch']

    queue = []
    if start_tasks:
        for task in start_tasks:
            queue.append(task)
    num_runs = math.ceil((max_batch - min_batch + 1) / step_batch) * math.ceil(
        (max_nireq - min_nireq + 1) / step_nireq)
    weight_single_run = (1 - previous_weight) / num_runs
    for batch in range(min_batch, max_batch + 1, step_batch):
        for nireq in range(min_nireq, max_nireq + 1, step_nireq):
            queue.append(
                TASK.subtask(args=(JobTypesEnum.single_inference_type.value,
                                   job_id),
                             kwargs={
                                 'data': ExecInfo(batch, nireq).json(),
                                 'progress_weight': weight_single_run
                             }))
            inference_result = InferenceResultsModel({
                'jobId': job_id,
                'execInfo': {
                    'batch': batch,
                    'nireq': nireq
                }
            })
            write_record(inference_result, get_db().session)

    if not start_tasks:
        queue.pop(0)
        queue.insert(
            0,
            TASK.subtask(args=tuple(
                [None, JobTypesEnum.single_inference_type.value, job_id]),
                         kwargs={
                             'data': ExecInfo(min_batch, min_nireq).json(),
                             'progress_weight': weight_single_run
                         }))
        get_db().session().commit()
    return queue
Пример #6
0
def delete_job_from_db(job_id: int):
    job = get_job_by_id(job_id)
    if not job:
        return

    children = JobsModel.query.filter_by(parent_job=job_id).all()

    for child_job in children:
        delete_job_from_db(child_job.job_id)

    dependent_inference_results = InferenceResultsModel.query.filter_by(
        job_id=job_id).all()

    delete_rows(dependent_inference_results, get_db().session)
    delete_rows([job], get_db().session)
Пример #7
0
def fetch_downloadable_models():
    omz_meta = get_metadata_for_omz_models()
    parameters = InfoDumperParameters()
    parser = InfoDumperParser()
    return_code, _ = run_console_tool(parameters, parser)
    if return_code:
        return
    models = json.loads(parser.stdout)
    for model in models:
        model_meta = omz_meta.get(
            model['name'], {
                'topology_type': TaskMethodEnum.generic,
                'advanced_configuration': None
            })
        for precision in model['precisions']:
            existing_model = (OMZTopologyModel.query.filter_by(
                name=model['name'],
                precision=ModelPrecisionEnum(precision)).first())
            if model['framework'] == 'dldt':
                model['framework'] = SupportedFrameworksEnum.openvino.value

            if not existing_model:
                if model_meta['topology_type'] != TaskMethodEnum.generic:
                    task_type = define_topology_task_type(model)
                else:
                    task_type = TaskEnum.generic
                record = OMZTopologyModel(
                    data=model,
                    task_type=task_type,
                    topology_type=model_meta['topology_type'],
                    advanced_configuration=model_meta[
                        'advanced_configuration'],
                    precision=ModelPrecisionEnum(precision))
                write_record(record, get_db().session)
Пример #8
0
def create_dataset(session_id: str):
    data = request.get_json()
    name = data['datasetName']
    files = data['files']
    dataset = DatasetsModel(name=name, session_id=session_id)
    write_record(dataset, get_db().session)
    dataset.path = os.path.join(UPLOADS_FOLDER, str(dataset.id))
    write_record(dataset, get_db().session)
    files_ids = FilesModel.create_files(files, dataset.id, session_id)
    dataset.size = round(sum([f.size for f in dataset.files]) / (1024**2))
    write_record(dataset, get_db().session)
    create_empty_dir(dataset.path)
    return jsonify({
        'datasetItem': dataset.short_json(),
        'files': files_ids,
    })
Пример #9
0
def convert_downloaded_model(data: dict):
    topology_id = data['topologyId']
    topology = TopologiesModel.query.get(topology_id)
    topology.precision = ModelPrecisionEnum(data['dataType'])
    omz_topology = OMZTopologyModel.query.filter_by(name=topology.name).first()
    convert_job_record = ModelDownloaderConversionJobsModel.query.filter_by(
        result_model_id=topology_id).first()
    convert_job_record.conversion_args = json.dumps(({
        'precision':
        data['dataType'],
    }))
    write_record(convert_job_record, get_db().session)
    weights = JobsWeight.download_source_model()
    tasks = [
        TASK.subtask(args=[
            None, JobTypesEnum.model_convert_type.value,
            convert_job_record.job_id
        ],
                     kwargs={
                         'progress_weight':
                         weights[JobTypesEnum.model_convert_type]
                     }),
    ]
    source_path = os.path.join(MODEL_DOWNLOADS_FOLDER, str(topology_id),
                               omz_topology.path)
    destination_path = topology.path
    ir_postprocessing(tasks, source_path, destination_path, topology.id,
                      weights)

    chain(tasks).apply_async()
    return jsonify({})
Пример #10
0
def delete_model_from_db(model_id: int):
    all_models = TopologiesModel.query.all()
    derived_models = tuple(
        filter(lambda m: is_descendant_of(model_id, m.id), all_models))
    derived_models_ids = tuple(map(lambda m: m.id, derived_models))

    for derived_model_id in derived_models_ids:
        delete_model_from_db(derived_model_id)

    derived_scope = model_related_information(derived_models_ids)

    for rows in derived_scope:
        delete_rows(rows, get_db().session)

    parent_int8 = Int8AutotuneJobsModel.query.filter_by(
        result_model_id=model_id).all()
    parent_winograd = WinogradAutotuneJobsModel.query.filter_by(
        result_model_id=model_id).all()
    parent_mo = ModelOptimizerJobModel.query.filter(
        or_(ModelOptimizerJobModel.original_topology_id == model_id,
            ModelOptimizerJobModel.result_model_id == model_id)).all()

    delete_rows([*parent_int8, *parent_winograd, *parent_mo], get_db().session)
    project_ids = tuple(
        map(
            lambda p: p.id,
            ProjectsModel.query.filter(
                ProjectsModel.model_id == model_id).all()))

    all_accuracy_results = AccuracyJobsModel.query \
        .filter(AccuracyJobsModel.project_id.in_(project_ids)) \
        .all()
    delete_rows(all_accuracy_results, get_db().session)

    original_scope = model_related_information((model_id, ))
    for rows in original_scope:
        delete_rows(rows, get_db().session)

    model = TopologiesModel.query.get(model_id)

    if model:
        model_path = model.path
        delete_rows([model], get_db().session)
        remove_dir(model_path)
Пример #11
0
def convert(mo_job_record: ModelOptimizerJobModel, data: dict,
            chain_progress_weight: dict):
    """Validate MO params, prepare them, update MO job record and launch MO chain."""

    pipeline_config = data.get('pipelineConfigFile', None)
    if pipeline_config:
        del data['pipelineConfigFile']
        save_pipeline_config(pipeline_config,
                             mo_job_record.original_topology_id)
    mo_form = MOForm(data, mo_job_record.original_topology.framework.value)
    if mo_form.is_invalid:
        set_status_in_db(ModelOptimizerJobModel, mo_job_record.job_id,
                         StatusEnum.error,
                         get_db().session)
        set_status_in_db(TopologiesModel, mo_job_record.result_model_id,
                         StatusEnum.error,
                         get_db().session)
        return jsonify({'errors': mo_form.errors}), 400

    mo_job_record.mo_args = json.dumps(mo_form.get_args())
    write_record(mo_job_record, get_db().session)

    chain([
        TASK.subtask(
            args=(None, JobTypesEnum.model_optimizer_type.value,
                  mo_job_record.job_id),
            kwargs={
                'progress_weight':
                chain_progress_weight[JobTypesEnum.model_optimizer_type]
            }),
        TASK.subtask(
            args=(JobTypesEnum.model_analyzer_type.value,
                  mo_job_record.result_model_id),
            kwargs={
                'progress_weight':
                chain_progress_weight[JobTypesEnum.model_analyzer_type],
            })
    ]).apply_async()

    return jsonify({
        'irId': mo_job_record.result_model_id,
        'modelOptimizerJobId': mo_job_record.job_id,
    })
Пример #12
0
def update_artifact_upload_progress(file_id: int,
                                    emit_message: UploadEmitMessage) -> float:
    file_record = FilesModel.query.get(file_id)
    artifact = file_record.artifact

    if file_record.uploaded_blob_size == file_record.size:
        file_status = StatusEnum.ready
    else:
        file_status = StatusEnum.running

    uploaded_progress = min(artifact.uploaded_progress, 100)

    artifact.progress = uploaded_progress * emit_message.weight
    topology = TopologiesModel.query.get(artifact.id)

    total_progress = uploaded_progress

    if topology and topology.framework != SupportedFrameworksEnum.openvino:
        mo_job = ModelOptimizerJobModel.query.filter_by(
            original_topology_id=artifact.id).first()
        result_topology = mo_job.result_model
        weights = JobsWeight.upload_and_convert_openvino_model()
        result_topology.status = StatusEnum.running
        result_topology.progress = uploaded_progress * weights[
            JobTypesEnum.iuploader_type]
        write_record(result_topology, get_db().session)
        total_progress = result_topology.progress

    write_record(artifact, get_db().session)

    set_status_in_db(FilesModel, file_id, file_status, get_db().session)

    if artifact.progress == 100:
        set_status_in_db(ArtifactsModel, artifact.id, StatusEnum.ready,
                         get_db().session)
    else:
        set_status_in_db(ArtifactsModel, artifact.id, StatusEnum.running,
                         get_db().session)

    emit_message.add_stage(
        IEmitMessageStage('uploading', progress=total_progress))
    return uploaded_progress
Пример #13
0
def delete_project(project_id: int):
    project = ProjectsModel.query.get(project_id)
    if not project:
        return 'Project with id {} was not found'.format(project), 404
    derived_projects = get_derived_projects(project)
    derived_projects_ids = [i.id for i in derived_projects]

    jobs = JobsModel.query.filter(
        JobsModel.project_id.in_([*derived_projects_ids, project.id])).all()
    jobs_ids = tuple(map(lambda job: job.job_id, jobs))

    all_jobs = []
    inference_results = []

    int8_job = Int8AutotuneJobsModel.query.filter_by(
        result_model_id=project.model_id).first()
    winograd_job = WinogradAutotuneJobsModel.query.filter_by(
        result_model_id=project.model_id).first()
    if int8_job:
        all_jobs.append(int8_job)

    if winograd_job:
        all_jobs.append(winograd_job)

    table_rows = JobsModel.query.filter(JobsModel.job_id.in_(jobs_ids)).all()

    for table_row in table_rows:
        all_jobs.append(get_job_by_id(table_row.job_id))
        if CompoundInferenceJobsModel.query.get(table_row.job_id):
            for res in InferenceResultsModel.query.filter_by(
                    job_id=table_row.job_id).all():
                inference_results.append(res)

    cancel_tasks(all_jobs)

    delete_rows(inference_results, get_db().session)
    delete_rows(all_jobs, get_db().session)
    delete_rows(all_jobs, get_db().session)
    delete_rows(derived_projects, get_db().session)
    delete_rows([project], get_db().session)

    return jsonify({'id': project.id})
Пример #14
0
def run_compound_inference(session_id: str):
    data = request.get_json()
    data['session_id'] = session_id
    model_id = data['modelId']
    dataset_id = data['datasetId']
    device = DevicesEnum(data['device'])
    project_id = create_project(OptimizationTypesEnum.inference, model_id,
                                dataset_id, device,
                                get_db().session)
    data['projectId'] = project_id
    job_record = CompoundInferenceJobsModel(data)
    write_record(job_record, get_db().session)
    tasks_queue = generate_queue_of_single_inference_tasks(
        data, job_record.job_id)
    chain(tasks_queue).apply_async()
    original_model_id = get_top_level_model_id(project_id)
    return jsonify({
        'jobId': job_record.job_id,
        'projectId': project_id,
        'originalModelId': original_model_id
    })
Пример #15
0
def generate_dataset(session_id: str):
    number_images = request.get_json()['numberOfImages']
    name = request.get_json()['datasetName']
    channels = request.get_json()['channels']
    width = request.get_json()['width']
    height = request.get_json()['height']
    dist_law = request.get_json()['distLaw']
    params_dist = request.get_json()['distLawParams']
    dataset = DatasetsModel(name, session_id)
    dataset.dataset_type = DatasetTypesEnum.imagenet.value
    write_record(dataset, get_db().session)
    dataset.path = get_dataset_folder(str(dataset.id))
    write_record(dataset, get_db().session)
    config = DatasetGenerationConfigsModel(dataset.id, number_images, channels,
                                           width, height, dist_law,
                                           params_dist)
    write_record(config, get_db().session)
    TASK.apply_async(
        (None, JobTypesEnum.add_generated_dataset_type.value, dataset.id),
        task_id=str(dataset.id))
    return jsonify(dataset.json())
Пример #16
0
 def total_progress(self):
     if self.from_celery:
         session = CeleryDBAdapter.session()
         artifact = session.query(ArtifactsModel).get(self.artifact_id)
     else:
         session = get_db().session
         artifact = ArtifactsModel.query.get(self.artifact_id)
     progress = self.local_progress * self.weight + self.previous_progress
     artifact.progress = progress
     write_record(artifact, session)
     if self.from_celery:
         session.close()
     return progress
Пример #17
0
def cancel_upload_in_db(artifact_id: int) -> bool:
    artifact = DatasetsModel.query.get(artifact_id)
    if not artifact:
        artifact = TopologiesModel.query.get(artifact_id)
        if artifact:
            model_optimize = ModelOptimizerJobModel.query.filter_by(
                result_model_id=artifact_id).first()
            if model_optimize:
                set_status_in_db(ModelOptimizerJobModel, model_optimize.job_id,
                                 StatusEnum.cancelled,
                                 get_db().session)
            model_downloader = ModelDownloaderModel.query.filter_by(
                result_model_id=artifact_id).first()
            if model_downloader:
                set_status_in_db(ModelDownloaderModel, model_downloader.job_id,
                                 StatusEnum.cancelled,
                                 get_db().session)
    if artifact:
        set_status_in_db(ArtifactsModel, artifact_id, StatusEnum.cancelled,
                         get_db().session)
        return True
    return False
Пример #18
0
def save_pipeline_config(content: str, topology_id: int):
    topology = TopologiesModel.query.get(topology_id)
    pipeline_config_path = os.path.join(topology.path, 'pipeline.config')
    with open(pipeline_config_path, 'w+') as pipeline_config_file:
        pipeline_config_file.writelines(content)
    size = get_size_of_files(pipeline_config_path)
    config_file_record = FilesModel('pipeline.config', topology_id, size,
                                    topology.session_id)
    config_file_record.progress = 100
    config_file_record.status = StatusEnum.ready
    config_file_record.uploaded_blob_size = size
    config_file_record.path = pipeline_config_path
    write_record(config_file_record, get_db().session)
Пример #19
0
def run_accuracy_check(session_id: str):
    data = request.get_json()
    data['session_id'] = session_id
    data['projectId'] = data['projectId']
    data['accuracyConfig'] = ''

    accuracy_job = AccuracyJobsModel(data)

    write_record(accuracy_job, get_db().session)
    TASK.apply_async(args=(None, JobTypesEnum.accuracy_type.value,
                           accuracy_job.job_id),
                     task_id=str(accuracy_job.job_id))
    return jsonify({'jobId': accuracy_job.job_id})
Пример #20
0
 def full_json(self):
     if self.from_celery:
         session = CeleryDBAdapter.session()
         artifact = session.query(self.job.db_table).get(self.artifact_id)
     else:
         session = get_db().session
         artifact = ArtifactsModel.query.get(self.artifact_id)
     json_message = artifact.json()
     if self.from_celery:
         session.close()
     json_message.update({
         'creationTimestamp': self.date,
     })
     return json_message
Пример #21
0
def update_model_advanced_configuration(session_id: str, model_id: int):
    config = request.get_json()

    try_load_configuration(config)

    dataset_id = config['datasetId']
    target = DevicesEnum(config['device'])

    model = TopologiesModel.query.get(model_id)
    if not model:
        return 'Model with id {} was not found in the database'.format(
            model_id), 404

    model.meta.advanced_configuration = json.dumps(config)
    write_record(model, get_db().session)

    affected_topologies_ids = [t.id for t in model.meta.topologies]
    projects = (ProjectsModel.query.filter(
        ProjectsModel.model_id.in_(affected_topologies_ids)).filter_by(
            dataset_id=dataset_id, target=target))
    affected_projects_ids = [p.id for p in projects]

    for project_id in affected_projects_ids:
        data = {
            'session_id': session_id,
            'projectId': project_id,
            'accuracyConfig': ''
        }
        accuracy_job = AccuracyJobsModel(data)
        write_record(accuracy_job, get_db().session)
        TASK.apply_async(args=(None, JobTypesEnum.accuracy_type.value,
                               accuracy_job.job_id))

    return jsonify({
        'modelIds': affected_topologies_ids,
        'projectIds': affected_projects_ids
    })
Пример #22
0
def prepare_data_for_mo_pipeline(topology: TopologiesModel, upload_job_id: int,
                                 session_id: str):
    converted_model = TopologiesModel(topology.name,
                                      SupportedFrameworksEnum.openvino,
                                      topology.metadata_id, session_id)
    converted_model.source = ModelSourceEnum.original
    converted_model.converted_from = topology.id
    write_record(converted_model, get_db().session)
    model_optimizer_scan_job = ModelOptimizerScanJobsModel({
        'topology_id':
        topology.id,
        'previousJobId':
        upload_job_id,
        'session_id':
        session_id,
    })
    write_record(model_optimizer_scan_job, get_db().session)
    model_optimizer_job = ModelOptimizerJobModel({
        'session_id':
        session_id,
        'name':
        topology.name,
        'original_topology_id':
        topology.id,
        'result_model_id':
        converted_model.id,
        'previousJobId':
        model_optimizer_scan_job.job_id,
    })
    write_record(model_optimizer_job, get_db().session)
    analysis_data = TopologyAnalysisJobsModel({
        'session_id': session_id,
        'model_id': converted_model.id
    })
    analysis_data.parent_job = model_optimizer_job.job_id
    write_record(analysis_data, get_db().session)
    return converted_model, model_optimizer_job
Пример #23
0
def write_chunk(upload_id, request):
    file_record = FilesModel.query.get(upload_id)
    artifact = file_record.artifact
    chunk = request.files['file'].stream.read()
    file_name = os.path.join(artifact.path, file_record.name)

    with open(file_name, "ab") as file:
        file.write(chunk)

    if file_record.uploaded_blob_size:
        file_record.uploaded_blob_size += len(chunk)
    else:
        file_record.uploaded_blob_size = len(chunk)
    file_record.progress = file_record.uploaded_blob_size / file_record.size * 100
    write_record(file_record, get_db().session)
Пример #24
0
def rename_mxnet_files(artifact_id: int):
    model = TopologiesModel.query.get(artifact_id)
    if model and model.framework == SupportedFrameworksEnum.mxnet:
        files = model.files
        for file in files:
            old_path = Path(file.path)
            new_name = model.name + {
                '.params': '-00001.params',
                '.json': '-symbol.json'
            }[old_path.suffix]
            new_path = old_path.parent / new_name
            os.rename(str(old_path), str(new_path))
            file.path = str(new_path)
            file.name = new_name
            write_record(file, get_db().session)
Пример #25
0
def set_model_advanced_configuration(model_id: int):
    config = request.get_json()

    try_load_configuration(config)

    model = TopologiesModel.query.get(model_id)
    if not model:
        return 'Model with id {} was not found in the database'.format(
            model_id), 404

    model.meta.task_type = config['taskType']
    model.meta.topology_type = config['taskMethod']
    model.meta.advanced_configuration = json.dumps(config)
    write_record(model, get_db().session)

    return jsonify(model.short_json())
Пример #26
0
def upload_model(session_id: str):
    data = request.get_json()

    model_name = data['modelName']
    framework = SupportedFrameworksEnum(data['framework'])
    files = data['files']

    metadata = TopologiesMetaDataModel()
    write_record(metadata, get_db().session)

    topology = TopologiesModel(model_name, framework, metadata.id, session_id)
    topology.source = ModelSourceEnum.ir if framework == SupportedFrameworksEnum.openvino else ModelSourceEnum.original
    write_record(topology, get_db().session)
    topology.path = os.path.join(UPLOAD_FOLDER_MODELS, str(topology.id),
                                 ORIGINAL_FOLDER)
    write_record(topology, get_db().session)
    create_empty_dir(topology.path)

    upload_job = UploadJobsModel({
        'session_id': session_id,
        'artifactId': topology.id
    })
    write_record(upload_job, get_db().session)

    files_ids = FilesModel.create_files(files, topology.id, session_id)
    topology.size = round(sum(f.size for f in topology.files) /
                          2**(10 * 2))  # bytes / 2**10 = mb
    write_record(topology, get_db().session)
    result = {
        'modelItem': topology.short_json(),
        'files': files_ids,
    }

    if framework != SupportedFrameworksEnum.openvino:
        converted_topology, model_optimizer_job = prepare_data_for_mo_pipeline(
            topology, upload_job.job_id, session_id)
        result['modelItem'] = converted_topology.short_json()
        result['modelItem']['modelOptimizerJobId'] = model_optimizer_job.job_id
    else:
        analysis_data = TopologyAnalysisJobsModel({
            'session_id':
            session_id,
            'model_id':
            topology.id,
            'previousJobId':
            upload_job.job_id,
        })
        write_record(analysis_data, get_db().session)
    result['modelItem']['originalModelFramework'] = framework.value
    return jsonify(result)
Пример #27
0
def archive_model(session_id, project_id):
    project = ProjectsModel.query.get(project_id)
    artifact = ArtifactsModel.query.get(project.model_id)

    exists, path = DownloadModelJob.archive_exists(artifact.id)
    if exists:
        return jsonify({
            'jobId': None,
            'message': 'archive already exists',
            'path': path
        })

    name = request.args.get('name')
    download_job = DownloadConfigsModel(
        dict(session_id=session_id, projectId=project_id, path=path,
             name=name))
    write_record(download_job, get_db().session)

    TASK.apply_async(args=(None, JobTypesEnum.download_model_type.value,
                           download_job.job_id))

    return jsonify({'jobId': download_job.job_id})
Пример #28
0
def convert_edit(session_id):
    """Rerun IR conversion with changed MO params."""

    data = request.get_json()
    topology_id = data.pop('irId')

    topology = TopologiesModel.query.get(topology_id)
    if not topology:
        return 'Model with id {} was not found in the database'.format(
            topology_id), 404

    mo_job_record = ModelOptimizerJobModel({
        'original_topology_id': topology.converted_from,
        'result_model_id': topology_id,
        'session_id': session_id,
    })
    write_record(mo_job_record, get_db().session)

    topology.progress = 0
    topology.status = StatusEnum.queued
    topology.error_message = None

    return convert(mo_job_record, data, JobsWeight.model_optimizer())
Пример #29
0
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
 You may obtain a copy of the License at

      http://www.apache.org/licenses/LICENSE-2.0

 Unless required by applicable law or agreed to in writing, software
 distributed under the License is distributed on an "AS IS" BASIS,
 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
"""
import datetime

from sqlalchemy import Column, DateTime

from app.extensions_factories.database import get_db

DB = get_db()


class BaseModel(DB.Model):
    __abstract__ = True

    creation_timestamp = Column(DateTime,
                                nullable=False,
                                default=datetime.datetime.utcnow)
    last_modified = Column(DateTime,
                           onupdate=datetime.datetime.utcnow,
                           default=datetime.datetime.utcnow)
Пример #30
0
def download_model(session_id: str):
    data = request.get_json()
    precision = ModelPrecisionEnum(data['precision'])
    model_name = data['modelName']

    topology = OMZTopologyModel.query.filter_by(name=model_name,
                                                precision=precision).first()

    metadata = TopologiesMetaDataModel()
    write_record(metadata, get_db().session)
    new_model = TopologiesModel(model_name, SupportedFrameworksEnum.openvino,
                                metadata.id, session_id)
    new_model.source = ModelSourceEnum.omz
    new_model.precision = precision
    new_model.downloaded_from = topology.id
    write_record(new_model, get_db().session)

    new_model.path = os.path.join(UPLOAD_FOLDER_MODELS, str(new_model.id),
                                  ORIGINAL_FOLDER)

    new_model.meta.task_type = topology.task_type
    new_model.meta.topology_type = topology.topology_type
    new_model.meta.advanced_configuration = topology.advanced_configuration
    write_record(new_model, get_db().session)

    new_model_json = new_model.short_json()
    new_model_json['session_id'] = session_id

    tasks = []

    weights = JobsWeight.download_model()

    download_job_record = ModelDownloaderModel(new_model_json)
    download_job_record.result_model_id = new_model.id

    write_record(download_job_record, get_db().session)
    tasks.append(
        TASK.subtask(args=(None, JobTypesEnum.model_downloader_type.value,
                           download_job_record.job_id),
                     kwargs={
                         'progress_weight':
                         weights[JobTypesEnum.model_downloader_type]
                     }))
    analysis_data = TopologyAnalysisJobsModel({
        'session_id': session_id,
        'model_id': new_model.id,
    })
    write_record(analysis_data, get_db().session)

    if topology.framework != SupportedFrameworksEnum.openvino:
        weights = JobsWeight.download_source_model()

        convert_job_record = ModelDownloaderConversionJobsModel(new_model_json)
        convert_job_record.result_model_id = new_model.id
        convert_job_record.parent_job = download_job_record.job_id
        write_record(convert_job_record, get_db().session)

        converter_args = [
            JobTypesEnum.model_convert_type.value, convert_job_record.job_id
        ]
        tasks.append(
            TASK.subtask(args=converter_args,
                         kwargs={
                             'progress_weight':
                             weights[JobTypesEnum.model_convert_type]
                         }))
        analysis_data.parent_job = convert_job_record.job_id
    else:
        weights = JobsWeight.download_openvino_model()
        analysis_data.parent_job = download_job_record.job_id
    write_record(analysis_data, get_db().session)
    source_path = os.path.join(MODEL_DOWNLOADS_FOLDER, str(new_model.id),
                               topology.path)
    destination_path = new_model.path

    ir_postprocessing(tasks, source_path, destination_path, new_model.id,
                      weights)

    chain(tasks).apply_async()

    result = new_model.short_json()
    result['originalModelFramework'] = topology.framework.value
    return jsonify(result)