def save_pipeline_config(content: str, topology_id: int): topology = TopologiesModel.query.get(topology_id) pipeline_config_path = os.path.join(topology.path, 'pipeline.config') with open(pipeline_config_path, 'w+') as pipeline_config_file: pipeline_config_file.writelines(content) size = get_size_of_files(pipeline_config_path) config_file_record = FilesModel('pipeline.config', topology_id, size, topology.session_id) config_file_record.progress = 100 config_file_record.status = StatusEnum.ready config_file_record.uploaded_blob_size = size config_file_record.path = pipeline_config_path write_record(config_file_record, get_db().session)
def run(self): parameters = self.emit_message.config dataset_id = self.emit_message.job_id current_job = self.emit_message.add_stage( IEmitMessageStage('Setup dataset parameters', weight=0.1)) session = CeleryDBAdapter.session() dataset = session.query(DatasetsModel).get(self.emit_message.job_id) dataset.status = StatusEnum.running dataset_path = dataset.path write_record(dataset, session) session.close() image_size = parameters.width * parameters.height * parameters.channels create_empty_dir(dataset_path) try: random_generator = DistributionLaw( parameters.dist_law, parameters.params_dist).random_generator except AssertionError as exception: self.emit_message.add_error(str(exception)) raise self.emit_message.update_progress(current_job, 100) current_job = self.emit_message.add_stage( IEmitMessageStage('Generate dataset', weight=0.9)) log.debug('Starting of generating dataset %s', dataset_id) index = 0 while index < self.emit_message.config.size: file_name = os.path.join(dataset_path, '{}.jpg'.format(index)) cv2.imwrite( file_name, random_generator(image_size).reshape( parameters.height, parameters.width, parameters.channels).astype(np.uint8)) percent = (index / (parameters.size + 2)) * 100 if index % np.ceil(parameters.size / 10) == 0: self.emit_message.update_progress(current_job, percent) with open(os.path.join(dataset_path, parameters.name + '.txt'), 'a') as desc_file: desc_file.write('{}.jpg 0\n'.format(index)) index += 1 session = CeleryDBAdapter.session() dataset = session.query(DatasetsModel).get(self.emit_message.job_id) dataset.progress = 100 dataset.status = StatusEnum.ready dataset.size = get_size_of_files(dataset_path) write_record(dataset, session) session.close() self.emit_message.update_progress(current_job, 100) log.debug('Finish of generating dataset %s', dataset_id)
def on_new_chunk_received(request, file_id: int): file_record = FilesModel.query.get(file_id) artifact = file_record.artifact if not artifact or artifact.status == StatusEnum.cancelled or file_record.status == StatusEnum.cancelled: return {} try: write_chunk(file_id, request) except OSError: return 'Internal server error', 500 if TopologiesModel.query.get(file_record.artifact_id): emit_message = create_upload_emit_message_for_topology(file_record) elif DatasetsModel.query.get(file_record.artifact_id): emit_message = create_upload_emit_message_for_dataset(file_record) else: return 'Cannot find artifact for this file {}'.format(file_id), 404 uploaded_progress = update_artifact_upload_progress(file_id, emit_message) if uploaded_progress >= 100 or all(f.uploaded_blob_size == f.size for f in artifact.files): celery_tasks_chain = [] if TopologiesModel.query.get(artifact.id): upload_job = UploadJobsModel.query.filter_by( artifact_id=artifact.id).first() upload_job.status = StatusEnum.ready upload_job.progress = 100 write_record(upload_job, get_db().session) celery_tasks_chain = create_tasks_chain_for_upload_model( artifact.id) elif DatasetsModel.query.get(artifact.id): celery_tasks_chain = create_tasks_chain_for_upload_dataset( artifact.id) artifact.size = get_size_of_files(artifact.path) write_record(artifact, get_db().session) set_status_in_db(ArtifactsModel, artifact.id, StatusEnum.running, get_db().session) try: write_record(artifact, get_db().session) except orm.exc.StaleDataError: pass # pylint: disable=fixme # TODO: Remove as soon as Model Optimizer fixes filenames handling. rename_mxnet_files(artifact.id) if celery_tasks_chain: chain(celery_tasks_chain).apply_async() return {}
def run(self): self.emit_message.add_stage(IEmitMessageStage('extracting', progress=0), silent=True) session = CeleryDBAdapter.session() dataset = session.query(DatasetsModel).get( self.emit_message.artifact_id) file = dataset.files[0] if dataset.status == StatusEnum.cancelled: return uploaded_archive_path = file.path session.close() self.unpack(file.name, dataset.id, uploaded_archive_path, UPLOAD_FOLDER_DATASETS) session = CeleryDBAdapter.session() dataset = session.query(DatasetsModel).get(self.emit_message.job_id) dataset.path = os.path.join(UPLOAD_FOLDER_DATASETS, str(dataset.id)) dataset.size = get_size_of_files(dataset.path) dataset.progress = self.emit_message.total_progress write_record(dataset, session) session.close() remove_dir(os.path.join(UPLOADS_FOLDER, str(self.emit_message.job_id)))
def run(self): emit_msg = self.emit_message config = emit_msg.config session = CeleryDBAdapter.session() original_topology = session.query(TopologiesModel).get( config.original_topology_id) log.debug('[ MODEL OPTIMIZER ] Optimizing model %s', original_topology.name) mo_job_record = (session.query(ModelOptimizerJobModel).filter_by( result_model_id=config.result_model_id).order_by( desc(ModelOptimizerJobModel.creation_timestamp)).first()) mo_job_id = mo_job_record.job_id mo_job_record.status = StatusEnum.running resulting_topology = session.query(TopologiesModel).get( config.result_model_id) resulting_topology.converted_from = config.original_topology_id resulting_topology.status = StatusEnum.running resulting_topology.path = os.path.join(UPLOAD_FOLDER_MODELS, str(config.result_model_id), ORIGINAL_FOLDER) write_record(resulting_topology, session) create_empty_dir(resulting_topology.path) resolve_file_args(emit_msg.job_id, config, original_topology) mo_job_record.mo_args = json.dumps(config.mo_args) write_record(mo_job_record, session) config.mo_args.update({ 'model_name': original_topology.name, 'framework': original_topology.framework.value, 'output_dir': resulting_topology.path, 'steps': True, }) session.close() parameters = ModelOptimizerParameters(config.mo_args) parser = ModelOptimizerParser(self.emit_message, ModelOptimizerStages.get_stages()) return_code, message = run_console_tool(parameters, parser, self) if return_code: match = re.search( r': (.+)\.\s+For more information please refer to Model Optimizer FAQ', message) short_error_message = match.group( 1) if match else 'Model Optimizer failed' log.error('[ MODEL OPTIMIZER ] [ ERROR ]: %s', short_error_message) session = CeleryDBAdapter.session() mo_job_record = session.query(ModelOptimizerJobModel).get( mo_job_id) mo_job_record.status = StatusEnum.error mo_job_record.error_message = short_error_message mo_job_record.detailed_error_message = re.sub( r'\[ ERROR \]\s*', '', re.sub(r'(\n\s*)+', '\n', message)) write_record(mo_job_record, session) resulting_topology = session.query(TopologiesModel).get( config.result_model_id) resulting_topology.status = StatusEnum.error resulting_topology.error_message = short_error_message write_record(resulting_topology, session) session.close() self.emit_message.emit_message() raise ModelOptimizerError(short_error_message, self.emit_message.job_id) session = CeleryDBAdapter.session() mo_job_record = session.query(ModelOptimizerJobModel).get(mo_job_id) mo_job_record.progress = 100 mo_job_record.status = StatusEnum.ready write_record(mo_job_record, session) resulting_topology = session.query(TopologiesModel).get( config.result_model_id) resulting_topology.size = get_size_of_files(resulting_topology.path) write_record(resulting_topology, session) session.close() self.emit_message.emit_message()
def run(self): self.emit_message.add_stage(IEmitMessageStage('analyzing', weight=1), silent=True) session = CeleryDBAdapter.session() model = session.query(TopologiesModel).get(self.emit_message.job_id) model.size = get_size_of_files(model.path) write_record(model, session) if model.status in (StatusEnum.cancelled, StatusEnum.queued, StatusEnum.error): session.close() return if not PERFORM_ANALYSIS: self.emit_message.update_analyze_progress(100) model.progress = 100 model.status = StatusEnum.ready model.size = get_size_of_files(model.path) write_record(model, session) session.close() return model_path = model.path try: analyze_data = self.analyze(model_path) session = CeleryDBAdapter.session() topology_analysis = ( session.query(TopologyAnalysisJobsModel).filter_by( model_id=self.emit_message.job_id).first()) self.emit_message.update_analyze_progress(50) topology_analysis.set_analysis_data(analyze_data) write_record(topology_analysis, session) session.close() except ModelAnalyzerError as error: FeedEmitMessage.socket_io = self.emit_message.socket_io FeedEmitMessage.emit(ModelAnalyzerError.code, str(error)) model_xml = find_all_paths(model_path, ('.xml', ))[0] model_bin = os.path.splitext(model_xml)[0] + '.bin' try: net = IENetwork(model_xml, weights=model_bin) except Exception as exc: error_message = str(exc) self.emit_message.add_error(error_message) raise ModelAnalyzerError(error_message, self.emit_message.job_id) session.close() session = CeleryDBAdapter.session() model = session.query(TopologiesModel).get(self.emit_message.job_id) if not model.precision: model.precision = ModelPrecisionEnum(net.precision) model.status = StatusEnum.ready model.size = get_size_of_files(model.path) write_record(model, session) topology_analysis = ( session.query(TopologyAnalysisJobsModel).filter_by( model_id=self.emit_message.job_id).first()) topology_analysis.status = StatusEnum.ready topology_analysis.progress = 100 write_record(topology_analysis, session) model = session.query(TopologiesModel).get(self.emit_message.job_id) model.progress = 100 model.status = StatusEnum.ready write_record(model, session) session.close() self.emit_message.emit_message()