def upload_dataset(self, dataset, dataset_id): progress_cache = None items_count = len(dataset) item_names = [] img_paths = [] ann_paths = [] for item_name in dataset: item_names.append(item_name) item_paths = dataset.get_item_paths(item_name) img_paths.append(item_paths.img_path) ann_paths.append(item_paths.ann_path) if self.has_images_storage(): if progress_cache is None: progress_cache = sly.Progress('Dataset {!r}: cache images'.format(dataset.name), items_count, self.logger) img_hash = sly.fs.get_file_hash(item_paths.img_path) self.storage.images.write_object(item_paths.img_path, img_hash) progress_cache.iter_done_report() progress = sly.Progress('Dataset {!r}: upload images'.format(dataset.name), items_count, self.logger) image_infos = self.public_api.image.upload_paths(dataset_id, item_names, img_paths, progress.iters_done_report) progress = sly.Progress('Dataset {!r}: upload annotations'.format(dataset.name), items_count, self.logger) self.public_api.annotation.upload_paths([info.id for info in image_infos], ann_paths, progress.iters_done_report)
def export_project_images_metadata(api: sly.Api, task_id, context, state, app_logger): project = api.project.get_info_by_id(PROJECT_ID) result_dir_name = "{}_{}".format(project.id, project.name) RESULT_DIR = os.path.join(my_app.data_dir, result_dir_name, result_dir_name) sly.fs.mkdir(RESULT_DIR) ARCHIVE_NAME = result_dir_name + ".tar" RESULT_ARCHIVE = os.path.join(my_app.data_dir, ARCHIVE_NAME) if DATASET_ID: dataset_info = api.dataset.get_info_by_id(DATASET_ID) progress = sly.Progress( 'Get meta from images in {!r} dataset'.format(dataset_info.name), len(api.dataset.get_list(PROJECT_ID))) res_dataset = os.path.join(RESULT_DIR, dataset_info.name) sly.fs.mkdir(res_dataset) get_meta_from_dataset(api, res_dataset, DATASET_ID, app_logger) else: datasets = api.dataset.get_list(PROJECT_ID) for dataset in datasets: progress = sly.Progress( 'Get meta from images in dataset {}'.format(dataset.name), len(datasets), app_logger) res_dataset = os.path.join(RESULT_DIR, dataset.name) sly.fs.mkdir(res_dataset) get_meta_from_dataset(api, res_dataset, dataset.id, app_logger) RESULT_DIR = os.path.join(my_app.data_dir, result_dir_name) sly.fs.archive_directory(RESULT_DIR, RESULT_ARCHIVE) app_logger.info("Result directory is archived") progress.iter_done_report() remote_archive_path = "/ApplicationsData/Export-Metadata/{}/{}".format( task_id, ARCHIVE_NAME) upload_progress = [] def _print_progress(monitor, upload_progress): if len(upload_progress) == 0: upload_progress.append( sly.Progress(message="Upload {!r}".format(ARCHIVE_NAME), total_cnt=monitor.len, ext_logger=app_logger, is_size=True)) upload_progress[0].set_current_value(monitor.bytes_read) file_info = api.file.upload(TEAM_ID, RESULT_ARCHIVE, remote_archive_path, lambda m: _print_progress(m, upload_progress)) app_logger.info("Uploaded to Team-Files: {!r}".format( file_info.full_storage_url)) api.task.set_output_archive(task_id, file_info.id, ARCHIVE_NAME, file_url=file_info.full_storage_url) sly.fs.remove_dir(RESULT_DIR) my_app.stop()
def download_dataset(self, dataset, dataset_id): images = self.public_api.image.get_list(dataset_id) progress_anns = sly.Progress('Dataset {!r}: download annotations'.format(dataset.name), len(images), self.logger) images_to_download = images # copy images from cache to task folder and download corresponding annotations if self.has_images_storage(): images_to_download, images_in_cache, images_cache_paths = self._split_images_by_cache(images) self.logger.info('Dataset {!r}'.format(dataset.name), extra={'total_images': len(images), 'images_in_cache': len(images_in_cache), 'images_to_download': len(images_to_download)}) if len(images_to_download) + len(images_in_cache) != len(images): raise RuntimeError("Error with images cache during download. Please contact support.") if len(images_in_cache) > 0: progress_imgs_cache = sly.Progress( 'Dataset {!r}: restoring images from cache'.format(dataset.name), len(images_in_cache), self.logger) img_cache_ids = [img_info.id for img_info in images_in_cache] ann_info_list = self.public_api.annotation.download_batch( dataset_id, img_cache_ids, progress_anns.iters_done_report) img_name_to_ann = {ann.image_id: ann.annotation for ann in ann_info_list} for img_info, img_cache_path in zip(images_in_cache, images_cache_paths): item_name = _maybe_append_image_extension(img_info.name, img_info.ext) dataset.add_item_file(item_name, img_cache_path, img_name_to_ann[img_info.id], _validate_img=False, _use_hardlink=True) progress_imgs_cache.iter_done_report() # download images from server if len(images_to_download) > 0: progress_imgs_download = sly.Progress( 'Dataset {!r}: download images'.format(dataset.name), len(images_to_download), self.logger) #prepare lists for api methods img_ids = [] img_paths = [] for img_info in images_to_download: img_ids.append(img_info.id) # TODO download to a temp file and use dataset api to add the image to the dataset. img_paths.append( os.path.join(dataset.img_dir, _maybe_append_image_extension(img_info.name, img_info.ext))) # download annotations ann_info_list = self.public_api.annotation.download_batch( dataset_id, img_ids, progress_anns.iters_done_report) img_name_to_ann = {ann.image_id: ann.annotation for ann in ann_info_list} self.public_api.image.download_paths( dataset_id, img_ids, img_paths, progress_imgs_download.iters_done_report) for img_info, img_path in zip(images_to_download, img_paths): dataset.add_item_file(img_info.name, img_path, img_name_to_ann[img_info.id]) if self.has_images_storage(): progress_cache = sly.Progress( 'Dataset {!r}: cache images'.format(dataset.name), len(img_paths), self.logger) img_hashes = [img_info.hash for img_info in images_to_download] self.storage.images.write_objects(img_paths, img_hashes, progress_cache.iter_done_report)
def convert(): settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH) out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) for directory in ['train', 'test']: if directory == 'train': imgs_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'ch4_training_images') inst_dir = os.path.join( sly.TaskPaths.DATA_DIR, 'ch4_training_localization_transcription_gt') else: imgs_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'ch4_test_images') inst_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'Challenge4_Test_Task1_GT') src_datasets = read_datasets(inst_dir, directory) for ds_name, sample_names in src_datasets.items(): ds = out_project.create_dataset(ds_name) #make train -> img, ann progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger for name in sample_names: src_img_path = os.path.join(imgs_dir, name + '.jpg') inst_path = os.path.join(inst_dir, 'gt_' + name + '.txt') if all((os.path.isfile(x) or (x is None) for x in [src_img_path, inst_path])): ann = get_ann(src_img_path, inst_path) ds.add_item_file(name, src_img_path, ann=ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=classes_dict) out_project.set_meta(out_meta)
def convert(): sly.fs.clean_dir(sly.TaskPaths.RESULTS_DIR) settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH) all_img = os.path.join( sly.TaskPaths.DATA_DIR, 'graz50_facade_dataset/graz50_facade_dataset/images') all_ann = os.path.join( sly.TaskPaths.DATA_DIR, 'graz50_facade_dataset/graz50_facade_dataset/labels_full') out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) src_datasets = read_datasets(all_ann) for ds_name, sample_names in src_datasets.items(): ds = out_project.create_dataset(ds_name) #make train -> img, ann progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger for name in sample_names: src_img_path = os.path.join(all_img, name + '.png') inst_path = os.path.join(all_ann, name + '.png') if all((os.path.isfile(x) or (x is None) for x in [src_img_path, inst_path])): ann = get_ann(src_img_path, inst_path, default_classes_colors) ds.add_item_file(name, src_img_path, ann=ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=classes_dict) out_project.set_meta(out_meta)
def convert_video(): task_settings = json.load(open(sly.TaskPaths.SETTINGS_PATH, 'r')) step = DEFAULT_STEP if 'step' in task_settings['options']: step = int(task_settings['options']['step']) else: sly.logger.warning( 'step parameter not found. set to default: {}'.format( DEFAULT_STEP)) video_paths = sly.fs.list_files(sly.TaskPaths.DATA_DIR, sly.video.ALLOWED_VIDEO_EXTENSIONS) if len(video_paths) < 0: raise RuntimeError("Videos not found") project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, task_settings['res_names']['project']), sly.OpenMode.CREATE) for video_path in video_paths: ds_name = sly.fs.get_file_name(video_path) ds = project.create_dataset(ds_name=ds_name) vreader = skvideo.io.FFmpegReader(video_path) vlength = vreader.getShape()[0] progress = sly.Progress('Import video: {}'.format(ds_name), vlength) for frame_id, image in enumerate(vreader.nextFrame()): if frame_id % step == 0: img_name = "frame_{:05d}".format(frame_id) ds.add_item_np(img_name, image, img_ext='.png') progress.iter_done_report()
def convert(self): out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, self.settings['res_names']['project']), sly.OpenMode.CREATE) progress = sly.Progress('Dataset:', len(self.src_datasets)) for ds_name, samples_paths in self.src_datasets.items(): ds = out_project.create_dataset(ds_name) for src_img_path in samples_paths: try: ann_path = self.get_ann_path(src_img_path) if all( (os.path.isfile(x) for x in [src_img_path, ann_path])): ann = self.get_ann(src_img_path, ann_path) ds.add_item_file(os.path.basename(src_img_path), src_img_path, ann=ann) except Exception as e: exc_str = str(e) sly.logger.warn( 'Input sample skipped due to error: {}'.format( exc_str), exc_info=True, extra={ 'exc_str': exc_str, 'dataset_name': ds_name, 'image_name': src_img_path, }) progress.iter_done_report() out_meta = sly.ProjectMeta( obj_classes=sly.ObjClassCollection(self.id_to_obj_class.values())) out_project.set_meta(out_meta)
def _docker_pull(self, raise_exception=True): self.logger.info('Docker image will be pulled', extra={'image_name': self.docker_image_name}) progress_dummy = sly.Progress('Pulling image...', 1, ext_logger=self.logger) progress_dummy.iter_done_report() try: pulled_img = self._docker_api.images.pull(self.docker_image_name) self.logger.info('Docker image has been pulled', extra={ 'pulled': { 'tags': pulled_img.tags, 'id': pulled_img.id } }) except DockerException as e: if raise_exception is True: raise DockerException( 'Unable to pull image: see actual error above. ' 'Please, run the task again or contact support team.') else: self.logger.warn( "Pulling step is skipped. Unable to pull image: {!r}.". format(str(e)))
def add_metadata_to_images(api, path_to_files, dataset_id, app_logger): path_to_images = [ sly.fs.get_file_name(json_name) for json_name in os.listdir(path_to_files) ] images = api.image.get_list(dataset_id) image_names = [image_info.name for image_info in images] matches = list(set(path_to_images) & set(image_names)) if len(path_to_images) != len(matches): app_logger.warn( '{} metadata files were given, {} matches image names in dataset'. format(len(path_to_images), len(matches))) progress = sly.Progress('Uploading metadata to images', len(images), app_logger) for batch in sly.batched(images): for image_info in batch: if image_info.name not in path_to_images: app_logger.warn( 'Metadata file for image {} was not found in directory {}'. format(image_info.name, path_to_files)) continue meta = load_json_file( os.path.join(path_to_files, image_info.name + '.json')) if RESOLVE == "merge": meta_copy = meta.copy() for key in meta.keys(): if key in image_info.meta: meta_copy[key + "-original"] = image_info.meta[key] meta = {**image_info.meta, **meta_copy} api.image.update_meta(image_info.id, meta) progress.iters_done_report(len(batch))
def convert(): settings = load_json_file(sly.TaskPaths.SETTINGS_PATH) out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) all_dirs = os.path.join(sly.TaskPaths.DATA_DIR, 'RANGE') src_datasets = read_datasets(all_dirs) for ds_name, sample_names in src_datasets.items(): ds = out_project.create_dataset(ds_name) #make train -> img, ann progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger for name in sample_names: subdir = os.path.join(all_dirs, ds_name) img_foto = os.path.join(subdir, 'd_images') img_mat = os.path.join(subdir, 'd_masks') src_img_path = os.path.join(img_foto, name + '.jpg') inst_path = os.path.join(img_mat, name + '.mat') if all((os.path.isfile(x) or (x is None) for x in [src_img_path, inst_path])): ann = get_ann(src_img_path, inst_path, number_class, pixel_color) ds.add_item_file(name, src_img_path, ann=ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=classes_dict) out_project.set_meta(out_meta)
def convert(): settings = load_json_file(sly.TaskPaths.SETTINGS_PATH) imgs_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'Pratheepan_Dataset') inst_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'Ground_Truth') default_classes_colors = {'background': [1, 1, 1], 'skin': [255, 255, 255]} out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) src_datasets = read_datasets(inst_dir) for ds_name, sample_names in src_datasets.items(): ds = out_project.create_dataset(ds_name) progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger img_dir_temp = os.path.join(imgs_dir, ds_name) inst_dir_temp = os.path.join(inst_dir, 'GroundT_' + ds_name) for name in sample_names: src_img_path = os.path.join(img_dir_temp, name + '.jpg') inst_path = os.path.join(inst_dir_temp, name + '.png') if all((os.path.isfile(x) or (x is None) for x in [src_img_path, inst_path])): ann = get_ann(src_img_path, inst_path, default_classes_colors) ds.add_item_file(name, src_img_path, ann=ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=classes_dict) out_project.set_meta(out_meta)
def preprocess(): global model, half, device, imgsz, meta, final_weights global stride # download weights progress = sly.Progress("Downloading weights", 1, is_size=True, need_info_log=True) local_path = os.path.join(my_app.data_dir, "weights.pt") if modelWeightsOptions == "pretrained": url = f"https://github.com/ultralytics/yolov5/releases/download/v5.0/{pretrained_weights}.pt" final_weights = url sly.fs.download(url, local_path, my_app.cache, progress) elif modelWeightsOptions == "custom": final_weights = custom_weights configs = os.path.join(Path(custom_weights).parents[1], 'opt.yaml') configs_local_path = os.path.join(my_app.data_dir, 'opt.yaml') file_info = my_app.public_api.file.get_info_by_path( TEAM_ID, custom_weights) progress.set(current=0, total=file_info.sizeb) my_app.public_api.file.download(TEAM_ID, custom_weights, local_path, my_app.cache, progress.iters_done_report) my_app.public_api.file.download(TEAM_ID, configs, configs_local_path) else: raise ValueError( "Unknown weights option {!r}".format(modelWeightsOptions)) # load model on device model, half, device, imgsz, stride = load_model(local_path, device=DEVICE_STR) meta = construct_model_meta(model) sly.logger.info("Model has been successfully deployed")
def _construct_and_fill_model(self): # TODO: Move it progress to base class progress_dummy = sly.Progress('Building model:', 1) progress_dummy.iter_done_report() self.model = create_model( n_cls=(max(self.class_title_to_idx.values()) + 1), device_ids=self.device_ids) if sly.fs.dir_empty(sly.TaskPaths.MODEL_DIR): sly.logger.info('Weights will not be inited.') # @TODO: add random init (m.weight.data.normal_(0, math.sqrt(2. / n)) else: wi_type = self.config['weights_init_type'] ewit = {'weights_init_type': wi_type} sly.logger.info('Weights will be inited from given model.', extra=ewit) weights_rw = WeightsRW(sly.TaskPaths.MODEL_DIR) if wi_type == TRANSFER_LEARNING: self.model = weights_rw.load_for_transfer_learning( self.model, ignore_matching_layers=['last_conv'], logger=logger) elif wi_type == CONTINUE_TRAINING: self.model = weights_rw.load_strictly(self.model) sly.logger.info('Weights are loaded.', extra=ewit)
def process_dataset_links(project_id, file_path): dataset_name = os.path.splitext(os.path.basename(file_path))[0] dataset_id = create_dataset_api(project_id, dataset_name) with open(file_path) as fp: lines = fp.readlines() progress = sly.Progress('Import dataset: {}'.format(dataset_name), len(lines)) for line in lines: url = line.strip() if url: try: image_split_name = os.path.splitext(os.path.basename(url)) image_name = image_split_name[0] image_name = slugify(image_name) if len(image_split_name) == 2: image_ext = image_split_name[1] image_name = image_name + image_ext add_image_to_dataset(dataset_id, image_name, url) except Exception as e: exc_str = str(e) sly.logger.warn( 'Input link skipped due to error: {}'.format(exc_str), exc_info=True, extra={ 'exc_str': exc_str, 'file_path': file_path, 'link': line, }) progress.iter_done_report()
def convert(): settings = load_json_file(sly.TaskPaths.SETTINGS_PATH) imgs_dir = sly.TaskPaths.DATA_DIR inst_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'stuffthingmaps_trainval2017') labels = os.path.join(sly.TaskPaths.DATA_DIR, 'labels.txt') number_class, pixel_color = read_colors(labels) out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) src_datasets = read_datasets(inst_dir) for ds_name, sample_names in src_datasets.items(): ds = out_project.create_dataset(ds_name) #make train -> img, ann progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger imgs_dir_new = os.path.join(imgs_dir, ds_name) inst_dir_new = os.path.join(inst_dir, ds_name) for name in sample_names: src_img_path = os.path.join(imgs_dir_new, name + '.jpg') inst_path = os.path.join(inst_dir_new, name + '.png') if all((os.path.isfile(x) or (x is None) for x in [src_img_path, inst_path])): ann = get_ann(src_img_path, inst_path, number_class, pixel_color) ds.add_item_file(name, src_img_path, ann=ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=classes_dict) out_project.set_meta(out_meta)
def convert(): settings = load_json_file(sly.TaskPaths.SETTINGS_PATH) all_img = os.path.join(sly.TaskPaths.DATA_DIR, 'ADEChallengeData2016/images') all_ann = os.path.join(sly.TaskPaths.DATA_DIR, 'annotations_instance') out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) src_datasets = read_datasets(all_ann) default_classes_colors = {'background': (10, 10, 10)} default_colors_classes = {(10, 10, 10): 'background'} for ds_name, sample_names in src_datasets.items(): ds = out_project.create_dataset(ds_name) progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger all_img_temp = os.path.join(all_img, ds_name) all_ann_temp = os.path.join(all_ann, ds_name) for name in sample_names: src_img_path = os.path.join(all_img_temp, name + '.jpg') inst_path = os.path.join(all_ann_temp, name + '.png') if all((os.path.isfile(x) or (x is None) for x in [src_img_path, inst_path])): ann = get_ann(src_img_path, inst_path, default_classes_colors, default_colors_classes) ds.add_item_file(name, src_img_path, ann=ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=classes_dict) out_project.set_meta(out_meta)
def upload_artifacts(local_dir, remote_dir): def _gen_message(current, total): return f"Upload artifacts to Team Files [{current}/{total}] " _save_link_to_ui(local_dir, globals.my_app.app_url) local_files = sly.fs.list_files_recursively(local_dir) total_size = sum( [sly.fs.get_file_size(file_path) for file_path in local_files]) progress = sly.Progress(_gen_message(0, len(local_files)), total_size, is_size=True) progress_cb = partial(update_uploading_progress, api=globals.api, task_id=globals.task_id, progress=progress) progress_cb(0) for idx, local_path in enumerate(local_files): remote_path = os.path.join( remote_dir, local_path.replace(local_dir, '').lstrip("/")) if globals.api.file.exists(globals.team_id, remote_path): progress.iters_done_report(sly.fs.get_file_size(local_path)) else: progress_last = progress.current globals.api.file.upload( globals.team_id, local_path, remote_path, lambda monitor: progress_cb(progress_last + monitor.bytes_read)) progress.message = _gen_message(idx + 1, len(local_files)) time.sleep(0.5)
def download_import_files(self, task_id, data_dir): import_struct = self.api.simple_request('GetImportStructure', sly.api_proto.ListFiles, sly.api_proto.Id(id=task_id)) progress = sly.Progress('Downloading', len(import_struct.files), self.logger) def close_fh(fh): fpath = fh.file_path if fh.close_and_check(): progress.iter_done_report() else: self.logger.warning('file was skipped while downloading', extra={'file_path': fpath}) file_handler = None for chunk in self.api.get_stream_with_data('GetImportFiles', sly.api_proto.ChunkFile, sly.api_proto.ImportRequest(task_id=task_id, files=import_struct.files)): new_fpath = chunk.file.path if new_fpath: # non-empty if file_handler is not None: close_fh(file_handler) real_fpath = os.path.join(data_dir, new_fpath.lstrip('/')) self.logger.trace('download import file', extra={'file_path': real_fpath}) file_handler = sly.ChunkedFileWriter(file_path=real_fpath) file_handler.write(chunk.chunk) close_fh(file_handler)
def _construct_and_fill_model(self): # Progress reporting to show a progress bar in the UI. model_build_progress = sly.Progress('Building model:', 1) # Check the class name --> index mapping to infer the number of model output dimensions. num_classes = max(self.class_title_to_idx.values()) + 1 # Initialize the model. model = PyTorchSegmentation(num_classes=num_classes) sly.logger.info('Model has been instantiated.') # Load model weights appropriate for the given training mode. weights_rw = WeightsRW(sly.TaskPaths.MODEL_DIR) weights_init_type = self.config[WEIGHTS_INIT_TYPE] if weights_init_type == TRANSFER_LEARNING: # For transfer learning, do not attempt to load the weights for the model head. The existing snapshot may # have been trained on a different dataset, even on a different set of classes, and is in general not # compatible with the current model even in terms of dimensions. The head of the model will be initialized # randomly. self._model = weights_rw.load_for_transfer_learning( model, ignore_matching_layers=['_head'], logger=sly.logger) elif weights_init_type == CONTINUE_TRAINING: # Continuing training from an older snapshot requires full compatibility between the two models, including # class index mapping. Hence the snapshot weights must exactly match the structure of our model instance. self._model = weights_rw.load_strictly(model) # Model weights have been loaded, move them over to the GPU. self._model.cuda() # Advance the progress bar and log a progress message. sly.logger.info('Weights have been loaded.', extra={WEIGHTS_INIT_TYPE: weights_init_type}) model_build_progress.iter_done_report()
def upload_tar_file(self, task_id, file_path): size_mb = sly.fs.get_file_size(file_path) / 1024.0 / 1024 progress = sly.Progress("Uploading file", size_mb, ext_logger=self.logger) self.public_api.task.upload_dtl_archive(task_id, file_path, progress.set_current_value)
def _print_progress(monitor, upload_progress): if len(upload_progress) == 0: upload_progress.append(sly.Progress(message="Upload {!r}".format(ARCHIVE_NAME), total_cnt=monitor.len, ext_logger=app_logger, is_size=True)) upload_progress[0].set_current_value(monitor.bytes_read)
def convert(): settings = load_json_file(sly.TaskPaths.SETTINGS_PATH) out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) imgs_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'train2017') ann_file = os.path.join(sly.TaskPaths.DATA_DIR, 'COCO_Text.json') src_datasets = read_datasets(ann_file) photo_to_coords_text = read_coords_text(ann_file) NAME_ZERO_PADDING = 12 for ds_name, sample_names in src_datasets.items(): ds = out_project.create_dataset(ds_name) #make train -> img, ann progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger for name in sample_names: full_img_name = name.zfill(NAME_ZERO_PADDING) + '.jpg' src_img_path = os.path.join(imgs_dir, full_img_name) if all((os.path.isfile(x) or (x is None) for x in [src_img_path])): try: coords_text = photo_to_coords_text[int(name)] except KeyError: continue ann = get_ann(src_img_path, coords_text) ds.add_item_file(name, src_img_path, ann=ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=classes_dict) out_project.set_meta(out_meta)
def convert(): sly.fs.clean_dir(sly.TaskPaths.RESULTS_DIR) settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH) all_img = os.path.join(sly.TaskPaths.DATA_DIR, 'ParisArtDecoFacadesDataset-master/images') all_ann = os.path.join(sly.TaskPaths.DATA_DIR, 'ParisArtDecoFacadesDataset-master/labels') out_project = sly.Project(os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) src_datasets = read_datasets(all_ann) number_class = {2: 'Door', 3: 'Shop', 4: 'Balcony', 5: 'Window', 6: 'Wall', 7: 'Sky', 8: 'Roof', 1: 'Unknown'} pixel_color = {2: (255, 255, 0), 3: (0, 128, 0), 4: (0, 0, 255), 5: (128, 255, 0), 6: (255, 0, 0), 7: (0, 255, 255), 8: (211, 211, 211), 1: (0, 0, 0)} for ds_name, sample_names in src_datasets.items(): ds = out_project.create_dataset(ds_name) #make train -> img, ann progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger for name in sample_names: src_img_path = os.path.join(all_img, name + '.png') inst_path = os.path.join(all_ann, name + '.txt') if all((os.path.isfile(x) or (x is None) for x in [src_img_path, inst_path])): ann = get_ann(src_img_path, inst_path, number_class, pixel_color) ds.add_item_file(name, src_img_path, ann=ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=classes_dict) out_project.set_meta(out_meta)
def convert(): settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH) imgs_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'JPEGImages') inst_dir_trainval = os.path.join(sly.TaskPaths.DATA_DIR, 'Annotations_Part') out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) src_datasets = read_datasets(inst_dir_trainval) for ds_name, sample_names in src_datasets.items(): ds = out_project.create_dataset(ds_name) progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) # for logger for name in sample_names: src_img_path = os.path.join(imgs_dir, name + '.jpg') inst_path = os.path.join(inst_dir_trainval, name + '.mat') if all((os.path.isfile(x) or (x is None) for x in [src_img_path, inst_path])): ann = get_ann(src_img_path, inst_path) ds.add_item_file(name, src_img_path, ann=ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=classes_dict) out_project.set_meta(out_meta)
def convert(self): out_project = sly.Project(os.path.join(sly.TaskPaths.RESULTS_DIR, self.settings['res_names']['project']), sly.OpenMode.CREATE) for ds_name, sample_names in self.src_datasets.items(): progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) progress.report_every = 10 # By default progress for 18000 samples report only every 180 - too big. ds = out_project.create_dataset(ds_name) for name in sample_names: img_name = name + '.jpg' src_img_path = os.path.join(self._imgs_dir(ds_name), img_name) inst_path = os.path.join(self._inst_dir(ds_name), name + '.png') try: ann = self._generate_annotation(src_img_path, inst_path) ds.add_item_file(img_name, src_img_path, ann=ann) except Exception as e: exc_str = str(e) sly.logger.warn('Input sample skipped due to error: {}'.format(exc_str), exc_info=True, extra={ 'exc_str': exc_str, 'dataset_name': ds_name, 'image': src_img_path, }) progress.iter_done_report() sly.logger.info("Dataset '{}' samples processing is done.".format(ds_name), extra={}) out_meta = sly.ProjectMeta(obj_classes=sly.ObjClassCollection(self._class_id_to_object_class.values())) out_project.set_meta(out_meta) sly.logger.info("Mapillary samples processing is done.", extra={})
def get_progress_cb(message, total, is_size=False): progress = sly.Progress(message, total, is_size=is_size) progress_cb = partial(update_progress, api=globals.api, task_id=globals.task_id, progress=progress) progress_cb(0) return progress_cb
def install_pip_requirements(self, container_id=None): if self._need_sync_pip_cache is True: self.logger.info("Installing app requirements") progress_dummy = sly.Progress('Installing app requirements...', 1, ext_logger=self.logger) progress_dummy.iter_done_report() command = "pip3 install -r " + os.path.join(self.dir_task_src_container, "requirements.txt") self._exec_command(command, add_envs=self.main_step_envs(), container_id=container_id) self.process_logs() self.logger.info("Requirements are installed")
def cache_annotations(api: sly.Api, task_id, context, state, app_logger): progress = sly.Progress("Cache annotations", project_info.items_count) for dataset in api.dataset.get_list(project_id): images = api.image.get_list(dataset.id) for batch in sly.batched(images): image_ids = [image_info.id for image_info in batch] ann_infos = api.annotation.download_batch(dataset.id, image_ids) for image_id, image_info, ann_info in zip(image_ids, batch, ann_infos): ann = sly.Annotation.from_json(ann_info.annotation, meta) anns[image_id] = ann images_info[image_id] = image_info for label in ann.labels: labels[label.obj_class.name][image_id].append(label) progress.iters_done_report(len(batch)) progress = sly.Progress("App is ready", 1) progress.iter_done_report()
def create_foreground(api: sly.Api, task_id, context, state, app_logger): global meta project_info = api.project.get_info_by_id(project_id) meta = create_classes(api) progress = sly.Progress("Processing", project_info.items_count) for dataset in api.dataset.get_list(project_id): images_infos = api.image.get_list(dataset.id) for batch_infos in sly.batched(images_infos, 20): ids = [] names = [] local_paths = [] for info in batch_infos: ids.append(info.id) names.append(info.name) local_paths.append(os.path.join(app.data_dir, info.name)) api.image.download_paths(dataset.id, ids, local_paths) anns_infos = api.annotation.download_batch(dataset.id, ids) anns = [ sly.Annotation.from_json(info.annotation, meta) for info in anns_infos ] res_ids = [] res_anns = [] for img_id, img_name, img_path, ann in zip(ids, names, local_paths, anns): img = sly.image.read(img_path, remove_alpha_channel=False) if len(img.shape) == 3: if img.shape[2] != 4: sly.logger.warn( f"Image {img_name} (id={img_id}) does not have alpha channel, will be skipped" ) continue else: sly.logger.warn( f"Image {img_name} (id={img_id}) does not have alpha channel, will be skipped" ) continue fg, fuzzy = get_masks(img) new_ann = ann.add_label(fg) if fuzzy is not None: new_ann = new_ann.add_label(fuzzy) res_ids.append(img_id) res_anns.append(new_ann) api.annotation.upload_anns(res_ids, res_anns) for img_path in local_paths: sly.fs.silent_remove(img_path) progress.iters_done_report(len(batch_infos)) api.task.set_output_project(task_id, project_id) app.stop()
def get_progress_cb(api, task_id, message, total, is_size=False, func=update_progress): progress = sly.Progress(message, total, is_size=is_size) progress_cb = partial(func, api=api, task_id=task_id, progress=progress) return progress_cb