def on_failure(self): session = CeleryDBAdapter.session() set_status_in_db(DatasetsModel, self.emit_message.job_id, StatusEnum.error, session) session.close() remove_dir( os.path.join(UPLOAD_FOLDER_DATASETS, self.emit_message.job_id))
def on_failure(self): session = CeleryDBAdapter.session() self.emit_message.add_error('') set_status_in_db(self.db_table, self.emit_message.job_id, StatusEnum.error, session) session.close() remove_dir( os.path.join(self.emit_message.config.destination_path, self.emit_message.config.path))
def on_failure(self): super().on_failure() session = CeleryDBAdapter.session() set_status_in_db(TopologiesModel, self.emit_message.config.result_model_id, StatusEnum.error, session) remove_dir( session.query(TopologiesModel).get( self.emit_message.config.result_model_id).path) session.close()
def delete_dataset_from_db(dataset_id: int): derived_scope = dataset_related_information(dataset_id) tuple(map(lambda el: delete_rows(el, get_db().session), derived_scope)) dataset = DatasetsModel.query.get(dataset_id) if dataset: dataset_path = dataset.path delete_rows((dataset, ), get_db().session) remove_dir(dataset_path)
def run(self): session = CeleryDBAdapter.session() precision_str = session.query(TopologiesModel).get( self.emit_message.job_id).precision.value session.close() create_empty_dir(self.emit_message.config.destination_path) src_dir = os.path.join(self.emit_message.config.source_path, precision_str) for file_name in os.listdir(src_dir): full_file_name = os.path.join(src_dir, file_name) if os.path.isfile(full_file_name): shutil.move(full_file_name, self.emit_message.config.destination_path) remove_dir(src_dir)
def _run_python_calibration(self, tuned_path: str, int8_model_id: int): try: self.emit_message.emit_progress() session = CeleryDBAdapter.session() new_int8_model = session.query(TopologiesModel).get(int8_model_id) int8_job = session.query(Int8AutotuneJobsModel).get(self.emit_message.job_id) original_model = session.query(TopologiesModel).get(new_int8_model.optimized_from) project_model = session.query(ProjectsModel).get(int8_job.project_id) dataset_model = session.query(DatasetsModel).get(project_model.dataset_id) config = construct_accuracy_tool_config(original_model, dataset_model, DevicesEnum.cpu) config.dataset.subsample_size = '{}%'.format(int8_job.subset_size) int8_job.status = StatusEnum.running int8_job.calibration_config = json.dumps(config.to_dict()) write_record(int8_job, session) session.close() tuned_model_path = os.path.join(tuned_path, str(self.emit_message.job_id)) yml_file = '{}.yml'.format(tuned_model_path) config.dump_to_yml(yml_file) cli_params = AccuracyCheckerCliParameters() cli_params.exe = os.path.join(PYTHON_CLI_FOLDER, os.path.join('calibration', 'calibrate.py')) cli_params.set_parameter('y', yml_file) cli_params.set_parameter('th', self.emit_message.config.threshold) cli_params.set_parameter('tp', tuned_model_path) self.emit_message.add_stage(IEmitMessageStage(job_type='int8_tuning')) cli_parser = ProgressParser(self.emit_message, None) code, error = run_console_tool(cli_params, cli_parser, self) if code: self.emit_message.add_error('Calibration tool failed') raise Int8AutotuneError(error, self.emit_message.job_id) self._update_db_on_success() self.emit_message.emit_message() except Exception as exc: log.debug('[ INT8 python ] ERROR: calibration job failed') log.debug(exc) remove_dir(tuned_path) self.emit_message.add_error('Calibration tool failed') raise Int8AutotuneError(str(exc), self.emit_message.job_id)
def delete_model_from_db(model_id: int): all_models = TopologiesModel.query.all() derived_models = tuple( filter(lambda m: is_descendant_of(model_id, m.id), all_models)) derived_models_ids = tuple(map(lambda m: m.id, derived_models)) for derived_model_id in derived_models_ids: delete_model_from_db(derived_model_id) derived_scope = model_related_information(derived_models_ids) for rows in derived_scope: delete_rows(rows, get_db().session) parent_int8 = Int8AutotuneJobsModel.query.filter_by( result_model_id=model_id).all() parent_winograd = WinogradAutotuneJobsModel.query.filter_by( result_model_id=model_id).all() parent_mo = ModelOptimizerJobModel.query.filter( or_(ModelOptimizerJobModel.original_topology_id == model_id, ModelOptimizerJobModel.result_model_id == model_id)).all() delete_rows([*parent_int8, *parent_winograd, *parent_mo], get_db().session) project_ids = tuple( map( lambda p: p.id, ProjectsModel.query.filter( ProjectsModel.model_id == model_id).all())) all_accuracy_results = AccuracyJobsModel.query \ .filter(AccuracyJobsModel.project_id.in_(project_ids)) \ .all() delete_rows(all_accuracy_results, get_db().session) original_scope = model_related_information((model_id, )) for rows in original_scope: delete_rows(rows, get_db().session) model = TopologiesModel.query.get(model_id) if model: model_path = model.path delete_rows([model], get_db().session) remove_dir(model_path)
def run(self): self.emit_message.add_stage(IEmitMessageStage('extracting', progress=0), silent=True) session = CeleryDBAdapter.session() dataset = session.query(DatasetsModel).get( self.emit_message.artifact_id) file = dataset.files[0] if dataset.status == StatusEnum.cancelled: return uploaded_archive_path = file.path session.close() self.unpack(file.name, dataset.id, uploaded_archive_path, UPLOAD_FOLDER_DATASETS) session = CeleryDBAdapter.session() dataset = session.query(DatasetsModel).get(self.emit_message.job_id) dataset.path = os.path.join(UPLOAD_FOLDER_DATASETS, str(dataset.id)) dataset.size = get_size_of_files(dataset.path) dataset.progress = self.emit_message.total_progress write_record(dataset, session) session.close() remove_dir(os.path.join(UPLOADS_FOLDER, str(self.emit_message.job_id)))
def on_failure(self): super().on_failure() remove_dir(self.emit_message.config.output)