def convert(self): out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, self.settings['res_names']['project']), sly.OpenMode.CREATE) progress = sly.Progress('Dataset:', len(self.src_datasets)) for ds_name, samples_paths in self.src_datasets.items(): ds = out_project.create_dataset(ds_name) for src_img_path in samples_paths: try: ann_path = self.get_ann_path(src_img_path) if all( (os.path.isfile(x) for x in [src_img_path, ann_path])): ann = self.get_ann(src_img_path, ann_path) ds.add_item_file(os.path.basename(src_img_path), src_img_path, ann=ann) except Exception as e: exc_str = str(e) sly.logger.warn( 'Input sample skipped due to error: {}'.format( exc_str), exc_info=True, extra={ 'exc_str': exc_str, 'dataset_name': ds_name, 'image_name': src_img_path, }) progress.iter_done_report() out_meta = sly.ProjectMeta( obj_classes=sly.ObjClassCollection(self.id_to_obj_class.values())) out_project.set_meta(out_meta)
def convert_to_nonoverlapping(meta: sly.ProjectMeta, ann: sly.Annotation) -> (sly.ProjectMeta, sly.Annotation): common_img = np.zeros(ann.img_size, np.int32) # size is (h, w) for idx, lbl in enumerate(ann.labels, start=1): if need_convert(lbl.obj_class.geometry_type): if allow_render_for_any_shape(lbl) is True: lbl.draw(common_img, color=idx) else: sly.logger.warn( "Object of class {!r} (shape: {!r}) has non spatial shape {!r}. It will not be rendered." .format(lbl.obj_class.name, lbl.obj_class.geometry_type.geometry_name(), lbl.geometry.geometry_name())) new_classes = sly.ObjClassCollection() new_labels = [] for idx, lbl in enumerate(ann.labels, start=1): if not need_convert(lbl.obj_class.geometry_type): new_labels.append(lbl.clone()) else: if allow_render_for_any_shape(lbl) is False: continue # @TODO: get part of the common_img for speedup mask = common_img == idx if np.any(mask): # figure may be entirely covered by others g = lbl.geometry new_bmp = sly.Bitmap(data=mask) if new_classes.get(lbl.obj_class.name) is None: new_classes = new_classes.add(lbl.obj_class.clone(geometry_type=sly.Bitmap)) new_lbl = lbl.clone(geometry=new_bmp, obj_class=new_classes.get(lbl.obj_class.name)) new_labels.append(new_lbl) new_meta = meta.clone(obj_classes=new_classes) new_ann = ann.clone(labels=new_labels) return (new_meta, new_ann)
def create_obj_class_collection( classes_mapping: Dict) -> sly.ObjClassCollection: cls_list = [ sly.ObjClass(cls_name, sly.Bitmap) for cls_name in classes_mapping.keys() ] return sly.ObjClassCollection(cls_list)
def _determine_model_classes(self): if 'classes' not in self.config: # Key-value tags are ignored as a source of class labels. img_tags = set(tag_meta.name for tag_meta in self.project.meta.img_tag_metas if tag_meta.value_type == sly.TagValueType.NONE) img_tags -= set(self.config['dataset_tags'].values()) train_classes = sorted(img_tags) else: train_classes = self.config['classes'] if 'ignore_tags' in self.config: for tag in self.config['ignore_tags']: if tag in train_classes: train_classes.remove(tag) if len(train_classes) < 2: raise RuntimeError('Training requires at least two input classes.') in_classification_tags_to_idx, self.classification_tags_sorted = create_classes(train_classes) self.classification_tags_to_idx = infer_training_class_to_idx_map(self.config['weights_init_type'], in_classification_tags_to_idx, sly.TaskPaths.MODEL_CONFIG_PATH, class_to_idx_config_key=self.classification_tags_to_idx_key) self.class_title_to_idx = {} self.out_classes = sly.ObjClassCollection() logger.info('Determined model internal class mapping', extra={'class_mapping': self.class_title_to_idx}) logger.info('Determined model out classes', extra={'classes': self.classification_tags_sorted})
def convert(self): out_project = sly.Project(os.path.join(sly.TaskPaths.RESULTS_DIR, self.settings['res_names']['project']), sly.OpenMode.CREATE) for ds_name, sample_names in self.src_datasets.items(): progress = sly.Progress('Dataset: {!r}'.format(ds_name), len(sample_names)) progress.report_every = 10 # By default progress for 18000 samples report only every 180 - too big. ds = out_project.create_dataset(ds_name) for name in sample_names: img_name = name + '.jpg' src_img_path = os.path.join(self._imgs_dir(ds_name), img_name) inst_path = os.path.join(self._inst_dir(ds_name), name + '.png') try: ann = self._generate_annotation(src_img_path, inst_path) ds.add_item_file(img_name, src_img_path, ann=ann) except Exception as e: exc_str = str(e) sly.logger.warn('Input sample skipped due to error: {}'.format(exc_str), exc_info=True, extra={ 'exc_str': exc_str, 'dataset_name': ds_name, 'image': src_img_path, }) progress.iter_done_report() sly.logger.info("Dataset '{}' samples processing is done.".format(ds_name), extra={}) out_meta = sly.ProjectMeta(obj_classes=sly.ObjClassCollection(self._class_id_to_object_class.values())) out_project.set_meta(out_meta) sly.logger.info("Mapillary samples processing is done.", extra={})
def prepare_meta(meta): new_classes = [] for cls in meta.obj_classes: cls: sly.ObjClass new_classes.append(cls.clone(geometry_type=GET_GEOMETRY_FROM_STR("polygon"))) meta = meta.clone(obj_classes=sly.ObjClassCollection(new_classes)) return meta
def __init__(self): self.settings = load_json_file(sly.TaskPaths.TASK_CONFIG_PATH) self.lists_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'ImageSets/Segmentation') self.imgs_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'JPEGImages') self.segm_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'SegmentationClass') self.inst_dir = os.path.join(sly.TaskPaths.DATA_DIR, 'SegmentationObject') self.colors_file = os.path.join(sly.TaskPaths.DATA_DIR, 'colors.txt') self.with_instances = os.path.isdir(self.inst_dir) sly.logger.info('Will import data {} instance info.'.format('with' if self.with_instances else 'without')) self.obj_classes = sly.ObjClassCollection() self._read_datasets() self._read_colors()
def highlight_instances(meta: sly.ProjectMeta, ann: sly.Annotation) -> (sly.ProjectMeta, sly.Annotation): new_classes = [] new_labels = [] for idx, label in enumerate(ann.labels): new_cls = label.obj_class.clone(name=str(idx), color=sly.color.random_rgb()) new_lbl = label.clone(obj_class=new_cls) new_classes.append(new_cls) new_labels.append(new_lbl) res_meta = meta.clone(obj_classes=sly.ObjClassCollection(new_classes)) res_ann = ann.clone(labels=new_labels) return (res_meta, res_ann)
def set_project_meta(api, project_id, state): fg_class = sly.ObjClass(state[const.FG_NAME], GET_GEOMETRY_FROM_STR(state[const.FG_SHAPE]), color=sly.color.hex2rgb(state[const.FG_COLOR])) st_class = sly.ObjClass(state[const.ST_NAME], GET_GEOMETRY_FROM_STR(state[const.ST_SHAPE]), color=sly.color.hex2rgb(state[const.ST_COLOR])) meta = sly.ProjectMeta( obj_classes=sly.ObjClassCollection([fg_class, st_class])) api.project.update_meta( project_id, sly.ProjectMeta().to_json()) # clear previous labels and classes api.project.update_meta(project_id, meta.to_json()) return fg_class, st_class
def convert(): settings = load_json_file(sly.TaskPaths.SETTINGS_PATH) out_project = sly.Project( os.path.join(sly.TaskPaths.RESULTS_DIR, settings['res_names']['project']), sly.OpenMode.CREATE) classes_collection = sly.ObjClassCollection() instance_classes, id_to_class, class_to_color = read_colors() src_datasets = read_datasets() skipped_count = 0 samples_count = 0 for ds_name, sample_names in src_datasets.items(): dataset = out_project.create_dataset(ds_name) dataset_progress = sly.Progress('Dataset {!r}'.format(ds_name), len(sample_names)) for name in sample_names: try: src_img_path = osp.join(images_dir(ds_name), name) inst_path = osp.join(instances_dir(ds_name), name) ann, classes_collection = generate_annotation( src_img_path, inst_path, id_to_class, class_to_color, classes_collection) item_name = osp.splitext(name)[0] dataset.add_item_file(item_name, src_img_path, ann) samples_count += 1 except Exception as e: exc_str = str(e) sly.logger.warn( 'Input sample skipped due to error: {}'.format(exc_str), exc_info=True, extra={ 'exc_str': exc_str, 'dataset_name': ds_name, 'image_name': name }) skipped_count += 1 dataset_progress.iter_done_report() sly.logger.info('Processed.', extra={ 'samples': samples_count, 'skipped': skipped_count }) out_meta = sly.ProjectMeta(obj_classes=classes_collection) out_project.set_meta(out_meta)
def transform_for_instance_segmentation(meta: sly.ProjectMeta, ann: sly.Annotation) -> (sly.ProjectMeta, sly.Annotation): new_classes = {} for obj_class in meta.obj_classes: obj_class: sly.ObjClass new_class = obj_class.clone(name=obj_class.name + "-mask") new_classes[obj_class.name] = new_class new_class_collection = sly.ObjClassCollection(list(new_classes.values())) new_labels = [] for label in ann.labels: obj_class = new_classes[label.obj_class.name] new_labels.append(label.clone(obj_class=obj_class)) res_meta = meta.clone(obj_classes=new_class_collection) res_ann = ann.clone(labels=new_labels) return (res_meta, res_ann)
def transform_for_detection(meta: sly.ProjectMeta, ann: sly.Annotation) -> (sly.ProjectMeta, sly.Annotation): new_classes = sly.ObjClassCollection() new_labels = [] for label in ann.labels: new_class = label.obj_class.clone(name=label.obj_class.name + "-bbox", geometry_type=sly.Rectangle) if label.obj_class.geometry_type is sly.Rectangle: new_labels.append(label.clone(obj_class=new_class)) if new_classes.get(new_class.name) is None: new_classes = new_classes.add(new_class) else: bbox = label.geometry.to_bbox() if new_classes.get(new_class.name) is None: new_classes = new_classes.add(new_class) new_labels.append(label.clone(bbox, new_class)) res_meta = meta.clone(obj_classes=new_classes) res_ann = ann.clone(labels=new_labels) return (res_meta, res_ann)
def rename_meta_and_annotations(meta: sly.ProjectMeta, ann: sly.Annotation, suffix="original"): def _get_new_name(current_name): return f"{current_name}-{suffix}" new_classes = [] for obj_class in meta.obj_classes: obj_class: sly.ObjClass new_classes.append(obj_class.clone(name=_get_new_name(obj_class.name))) new_meta = meta.clone(obj_classes=sly.ObjClassCollection(new_classes)) new_labels = [] for label in ann.labels: dest_name = _get_new_name(label.obj_class.name) dest_class = new_meta.get_obj_class(dest_name) new_labels.append(label.clone(obj_class=dest_class)) new_ann = ann.clone(labels=new_labels) return new_meta, new_ann
def merge(api: sly.Api, task_id, context, state, app_logger): classes = _merge(CLASSES_INFO, META1.obj_classes, META2.obj_classes, state["mergeClasses"], state["resolveClasses"]) tags = _merge(TAGS_INFO, META1.tag_metas, META2.tag_metas, state["mergeTags"], state["resolveTags"]) res_meta = sly.ProjectMeta(obj_classes=sly.ObjClassCollection(classes), tag_metas=sly.TagMetaCollection(tags), project_type=PROJECT1.type) res_project = api.project.create( state["workspaceId"], state["resultProjectName"], type=PROJECT1.type, description=f"{PROJECT1.name} + {PROJECT2.name}", change_name_if_conflict=True) api.project.update_meta(res_project.id, res_meta.to_json()) api.project.update_custom_data( res_project.id, { "project1": { "id": PROJECT1.id, "name": PROJECT1.name }, "project2": { "id": PROJECT2.id, "name": PROJECT2.name } }) fields = [ { "field": "data.createdProjectId", "payload": res_project.id }, { "field": "data.createdProjectName", "payload": res_project.name }, ] api.app.set_fields(task_id, fields) app_logger.info("Project is created", extra={ 'project_id': res_project.id, 'project_name': res_project.name }) #api.task.set_output_project(task_id, res_project.id, res_project.name) my_app.stop()
def upload_project_meta(api, project_id, config_yaml_info): classes = [] for class_id, class_name in enumerate(config_yaml_info["names"]): yaml_class_color = config_yaml_info["colors"][class_id] obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Rectangle, color=yaml_class_color) classes.append(obj_class) tags_arr = [ sly.TagMeta(name="train", value_type=sly.TagValueType.NONE), sly.TagMeta(name="val", value_type=sly.TagValueType.NONE) ] project_meta = sly.ProjectMeta( obj_classes=sly.ObjClassCollection(items=classes), tag_metas=sly.TagMetaCollection(items=tags_arr)) api.project.update_meta(project_id, project_meta.to_json()) return project_meta
def construct_model_meta(model): names = model.module.names if hasattr(model, 'module') else model.names colors = None if hasattr(model, 'module') and hasattr(model.module, 'colors'): colors = model.module.colors elif hasattr(model, 'colors'): colors = model.colors else: colors = [] for i in range(len(names)): colors.append(sly.color.generate_rgb(exist_colors=colors)) obj_classes = [sly.ObjClass(name, sly.Rectangle, color) for name, color in zip(names, colors)] tags = [sly.TagMeta(CONFIDENCE, sly.TagValueType.ANY_NUMBER)] meta = sly.ProjectMeta(obj_classes=sly.ObjClassCollection(obj_classes), tag_metas=sly.TagMetaCollection(tags)) return meta
def preview_augs(api: sly.Api, task_id, augs, infos, py_code=None): img_info, img = get_random_image(api) ann_json = api.annotation.download(img_info.id).annotation ann = sly.Annotation.from_json(ann_json, meta) res_meta, res_img, res_ann = sly.imgaug_utils.apply(augs, meta, img, ann) file_info = save_preview_image(api, task_id, res_img) # rename polygonal labels in existing annotation to keep them in gallery in before section # cheat code ############################################ _labels_new_classes = [] _new_classes = {} for label in ann.labels: label: sly.Label if type(label.obj_class.geometry_type) is sly.Rectangle: new_name = f"{label.obj_class.name}_polygon_for_gallery" if new_name not in _new_classes: _new_classes[new_name] = label.obj_class.clone(name=new_name) _labels_new_classes.append(label.clone(obj_class=_new_classes[new_name])) else: _labels_new_classes.append(label.clone()) _meta_renamed_polygons = sly.ProjectMeta(obj_classes=sly.ObjClassCollection(list(_new_classes.values()))) gallery_meta = res_meta.merge(_meta_renamed_polygons) # cheat code ############################################ gallery, sync_keys = ui.get_gallery(project_meta=gallery_meta, urls=[img_info.full_storage_url, file_info.full_storage_url], card_names=["original", "augmented"], img_labels=[_labels_new_classes, res_ann.labels]) fields = [ {"field": "data.gallery", "payload": gallery}, {"field": "state.galleryOptions.syncViewsBindings", "payload": sync_keys}, {"field": "state.previewPipelineLoading", "payload": False}, {"field": "state.previewAugLoading", "payload": False}, ] if len(infos) == 1 and py_code is None: fields.append({"field": "state.previewPy", "payload": infos[0]["python"]}) else: if py_code is None: py_code = sly.imgaug_utils.pipeline_to_python(infos, random_order=False) fields.append({"field": "state.previewPy", "payload": py_code}) api.task.set_fields(task_id, fields)
def main(): args = parse_args() with open(args.in_file) as f: lines = f.readlines() names_list = [ln for ln in (line.strip() for line in lines) if ln] out_classes = sly.ObjClassCollection(items=[ sly.ObjClass(name=name, geometry_type=sly.Rectangle) for name in names_list ]) cls_mapping = {x: idx for idx, x in enumerate(names_list)} res_cfg = { SETTINGS: {}, 'out_classes': out_classes.to_json(), 'class_title_to_idx': cls_mapping, } config_filename = os.path.join(args.out_dir, sly.TaskPaths.MODEL_CONFIG_NAME) dump_json_file(res_cfg, config_filename, indent=4) print('Done: {} -> {}'.format(args.in_file, config_filename))
def transform_for_segmentation(meta: sly.ProjectMeta, ann: sly.Annotation) -> (sly.ProjectMeta, sly.Annotation): new_classes = {} class_masks = {} for obj_class in meta.obj_classes: obj_class: sly.ObjClass new_class = obj_class.clone(name=obj_class.name + "-mask") new_classes[obj_class.name] = new_class class_masks[obj_class.name] = np.zeros(ann.img_size, np.uint8) new_class_collection = sly.ObjClassCollection(list(new_classes.values())) for label in ann.labels: label.draw(class_masks[label.obj_class.name], color=255) new_labels = [] for class_name, white_mask in class_masks.items(): mask = white_mask == 255 obj_class = new_classes[class_name] bitmap = sly.Bitmap(data=mask) new_labels.append(sly.Label(geometry=bitmap, obj_class=obj_class)) res_meta = meta.clone(obj_classes=new_class_collection) res_ann = ann.clone(labels=new_labels) return (res_meta, res_ann)
# coding: utf-8 import os, cv2 import numpy as np import supervisely_lib as sly from supervisely_lib.io.json import load_json_file classes_dict = sly.ObjClassCollection() default_classes_colors = { (0, 0, 0): 'unknown', (255, 0, 0): 'window', (255, 128, 0): 'door', (0, 255, 0): 'ground_floor', (255, 255, 0): 'facade', (128, 255, 255): 'sky', (0, 0, 255): 'roof', (128, 0, 255): 'balkony' } def read_datasets(all_ann): src_datasets = {} if not os.path.isdir(all_ann): raise RuntimeError( 'There is no directory {}, but it is necessary'.format(all_ann)) sample_names = [] for file in os.listdir(all_ann): if file.endswith('.png'): sample_names.append(os.path.splitext(file)[0]) src_datasets['dataset'] = sample_names sly.logger.info('Found source dataset with {} sample(s).'.format(
def __init__(self): self.settings = json.load(open(sly.TaskPaths.TASK_CONFIG_PATH)) self.colors_file = os.path.join(sly.TaskPaths.DATA_DIR, 'config.json') self.obj_classes = sly.ObjClassCollection() self._read_colors() self._read_datasets()
src_project = sly.Project(directory=src_project_dir, mode=sly.OpenMode.READ) dst_project_dir = os.path.join(sly.TaskPaths.OUT_PROJECTS_DIR, dst_project_name) dst_project = sly.Project(directory=dst_project_dir, mode=sly.OpenMode.CREATE) tag_meta_train = sly.TagMeta(train_tag_name, sly.TagValueType.NONE) tag_meta_val = sly.TagMeta(val_tag_name, sly.TagValueType.NONE) bbox_class_mapping = { obj_class.name: ( obj_class if (obj_class.geometry_type == sly.Rectangle) else sly.ObjClass(obj_class.name + '_bbox', sly.Rectangle, color=obj_class.color)) for obj_class in src_project.meta.obj_classes} dst_meta = src_project.meta.clone( obj_classes=sly.ObjClassCollection(bbox_class_mapping.values()), tag_metas=src_project.meta.tag_metas.add_items([tag_meta_train, tag_meta_val])) dst_project.set_meta(dst_meta) crop_side_fraction = (min_crop_side_fraction, max_crop_side_fraction) total_images = api.project.get_images_count(src_project_info.id) if total_images <= 1: raise RuntimeError('Need at least 2 images in a project to prepare a training set (at least 1 each for training ' 'and validation).') is_train_image = sly_dataset.partition_train_val(total_images, validation_fraction) # Iterate over datasets and items. image_idx = 0 for dataset in src_project: sly.logger.info('Dataset processing', extra={'dataset_name': dataset.name})
for ds_name, img_paths in zip(datasets, dataset_images): ds = api.dataset.create(project.id, ds_name) print('Dataset {!r} has been sucessfully creates: id={}'.format(ds.name, ds.id)) for img_path in img_paths: img_hash = api.image.upload_path(img_path) image_info = api.image.add(ds.id, sly.fs.get_file_name(img_path), img_hash) print('Image (id={}, name={}) has been sucessfully added'.format(image_info.id, image_info.name)) print("Number of images in created projects: ", api.project.get_images_count(project.id)) #define object classes class_person = sly.ObjClass('person', sly.Rectangle, color=[255, 0, 0]) class_car = sly.ObjClass('car', sly.Polygon, color=[0, 255, 0]) class_road = sly.ObjClass('road', sly.Bitmap, color=[0, 0, 255]) obj_class_collection = sly.ObjClassCollection([class_person, class_car, class_road]) #define tags for images tagmeta_weather = sly.TagMeta(name='weather', value_type=sly.TagValueType.ONEOF_STRING, possible_values=['rain', 'sun', 'cloud'], color=[153, 0, 153]) tagmeta_annotate = sly.TagMeta('to_annotation', sly.TagValueType.NONE) #define tags for objects tagmeta_vehicle_type = sly.TagMeta('vehicle_type', sly.TagValueType.ONEOF_STRING, ['sedan', 'suv', 'hatchback']) tagmeta_confidence = sly.TagMeta('confidence', sly.TagValueType.ANY_NUMBER) tagmeta_collection = sly.TagMetaCollection( [tagmeta_weather, tagmeta_annotate, tagmeta_vehicle_type, tagmeta_confidence])
def __init__(self): self.settings = json.load(open(sly.TaskPaths.TASK_CONFIG_PATH)) self.obj_classes = sly.ObjClassCollection() self.tag_metas = sly.TagMetaCollection()
def main(): api = sly.Api.from_env() # read source project src_project = api.project.get_info_by_id(PROJECT_ID) if src_project.type != str(sly.ProjectType.IMAGES): raise RuntimeError("Project {!r} has type {!r}. App works only with type {!r}" .format(src_project.name, src_project.type, sly.ProjectType.IMAGES)) src_project_meta_json = api.project.get_meta(src_project.id) src_project_meta = sly.ProjectMeta.from_json(src_project_meta_json) # create destination project DST_PROJECT_NAME = "{} (rasterized)".format(src_project.name) dst_project = api.project.create(WORKSPACE_ID, DST_PROJECT_NAME, description="rasterized", change_name_if_conflict=True) sly.logger.info('Destination project is created.', extra={'project_id': dst_project.id, 'project_name': dst_project.name}) # mapping polygons -> bitmaps new_classes_lst = [] for cls in src_project_meta.obj_classes: if need_convert(cls.geometry_type): new_class = cls.clone(geometry_type=sly.Bitmap) else: new_class = cls.clone() new_classes_lst.append(new_class) dst_classes = sly.ObjClassCollection(new_classes_lst) # create destination meta dst_project_meta = src_project_meta.clone(obj_classes=dst_classes) api.project.update_meta(dst_project.id, dst_project_meta.to_json()) def convert_to_nonoverlapping(src_ann: sly.Annotation) -> sly.Annotation: common_img = np.zeros(src_ann.img_size, np.int32) # size is (h, w) for idx, lbl in enumerate(src_ann.labels, start=1): if need_convert(lbl.obj_class.geometry_type): if allow_render_non_spatial_for_any_shape(lbl) == True: lbl.draw(common_img, color=idx) else: sly.logger.warn( "Object of class {!r} (class shape: {!r}) has non spatial shape {!r}. It will not be rendered." .format(lbl.obj_class.name, lbl.obj_class.geometry_type.geometry_name(), lbl.geometry.geometry_name())) new_labels = [] for idx, lbl in enumerate(src_ann.labels, start=1): new_cls = dst_project_meta.obj_classes.get(lbl.obj_class.name) if not need_convert(lbl.obj_class.geometry_type): new_lbl = lbl.clone(obj_class=new_cls) new_labels.append(new_lbl) else: if allow_render_non_spatial_for_any_shape(lbl) == False: continue mask = common_img == idx if np.any(mask): # figure may be entirely covered by others g = lbl.geometry new_bmp = sly.Bitmap(data=mask, labeler_login=g.labeler_login, updated_at=g.updated_at, created_at=g.created_at) new_lbl = lbl.clone(geometry=new_bmp, obj_class=new_cls) new_labels.append(new_lbl) return src_ann.clone(labels=new_labels) for ds_info in api.dataset.get_list(src_project.id): ds_progress = sly.Progress('Processing dataset: {!r}/{!r}'.format(src_project.name, ds_info.name), total_cnt=ds_info.images_count) dst_dataset = api.dataset.create(dst_project.id, ds_info.name) img_infos_all = api.image.get_list(ds_info.id) for img_infos in sly.batched(img_infos_all): img_names, img_ids, img_metas = zip(*((x.name, x.id, x.meta) for x in img_infos)) ann_infos = api.annotation.download_batch(ds_info.id, img_ids) anns = [sly.Annotation.from_json(x.annotation, src_project_meta) for x in ann_infos] new_anns = [convert_to_nonoverlapping(ann) for ann in anns] new_img_infos = api.image.upload_ids(dst_dataset.id, img_names, img_ids, metas=img_metas) new_img_ids = [x.id for x in new_img_infos] api.annotation.upload_anns(new_img_ids, new_anns) ds_progress.iters_done_report(len(img_infos)) api.task.set_output_project(task_id, dst_project.id, dst_project.name)
def synthesize(api: sly.Api, task_id, state, meta: sly.ProjectMeta, image_infos, labels, bg_images, cache_dir, preview=True): progress_cb = refresh_progress_preview if preview is False: progress_cb = refresh_progress augs = yaml.safe_load(state["augs"]) sly.logger.info("Init augs from yaml file") aug.init_fg_augs(augs) visibility_threshold = augs['objects'].get('visibility', 0.8) classes = state["selectedClasses"] bg_info = random.choice(bg_images) sly.logger.info("Download background") bg = api.image.download_np(bg_info.id) sly.logger.debug(f"BG shape: {bg.shape}") res_image = bg.copy() res_labels = [] # sequence of objects that will be generated res_classes = [] to_generate = [] for class_name in classes: original_class: sly.ObjClass = meta.get_obj_class(class_name) res_classes.append(original_class.clone(geometry_type=sly.Bitmap)) count_range = augs["objects"]["count"] count = random.randint(*count_range) for i in range(count): to_generate.append(class_name) random.shuffle(to_generate) res_meta = sly.ProjectMeta(obj_classes=sly.ObjClassCollection(res_classes)) progress = sly.Progress("Processing foregrounds", len(to_generate)) progress_cb(api, task_id, progress) progress_every = max(10, int(len(to_generate) / 20)) cover_img = np.zeros(res_image.shape[:2], np.int32) # size is (h, w) objects_area = defaultdict(lambda: defaultdict(float)) cached_images = {} # generate objects for idx, class_name in enumerate(to_generate, start=1): if class_name not in labels: progress.iter_done_report() continue image_id = random.choice(list(labels[class_name].keys())) label: sly.Label = random.choice(labels[class_name][image_id]) if image_id in cached_images: source_image = cached_images[image_id] else: image_info = image_infos[image_id] source_image = _get_image_using_cache(api, cache_dir, image_id, image_info) cached_images[image_id] = source_image label_img, label_mask = get_label_foreground(source_image, label) #sly.image.write(os.path.join(cache_dir, f"{index}_label_img.png"), label_img) #sly.image.write(os.path.join(cache_dir, f"{index}_label_mask.png"), label_mask) label_img, label_mask = aug.apply_to_foreground(label_img, label_mask) #sly.image.write(os.path.join(cache_dir, f"{index}_aug_label_img.png"), label_img) #sly.image.write(os.path.join(cache_dir, f"{index}_aug_label_mask.png"), label_mask) label_img, label_mask = aug.resize_foreground_to_fit_into_image( res_image, label_img, label_mask) #label_area = g.area find_place = False for attempt in range(3): origin = aug.find_origin(res_image.shape, label_mask.shape) g = sly.Bitmap(label_mask[:, :, 0].astype(bool), origin=sly.PointLocation(row=origin[1], col=origin[0])) difference = count_visibility(cover_img, g, idx, origin[0], origin[1]) allow_placement = True for object_idx, diff in difference.items(): new_area = objects_area[object_idx]['current'] - diff visibility_portion = new_area / objects_area[object_idx][ 'original'] if visibility_portion < visibility_threshold: #sly.logger.warn(f"Object '{idx}', attempt {attempt + 1}: " # f"visible portion ({visibility_portion}) < threshold ({visibility_threshold})") allow_placement = False break if allow_placement is True: find_place = True break else: continue if find_place is False: sly.logger.warn( f"Object '{idx}' is skipped: can not be placed to satisfy visibility threshold" ) continue try: aug.place_fg_to_bg(label_img, label_mask, res_image, origin[0], origin[1]) g.draw(cover_img, color=idx) for object_idx, diff in difference.items(): objects_area[object_idx]['current'] -= diff current_obj_area = g.area objects_area[idx]['current'] = current_obj_area objects_area[idx]['original'] = current_obj_area res_labels.append(sly.Label(g, res_meta.get_obj_class(class_name))) except Exception as e: #sly.logger.warn(repr(e)) sly.logger.warn( f"FG placement error:: label shape: {label_img.shape}; mask shape: {label_mask.shape}", extra={"error": repr(e)}) progress.iter_done_report() if idx % progress_every == 0: # progress.need_report(): progress_cb(api, task_id, progress) progress_cb(api, task_id, progress) res_ann = sly.Annotation(img_size=bg.shape[:2], labels=res_labels) # debug visualization # sly.image.write(os.path.join(cache_dir, "__res_img.png"), res_image) #res_ann.draw(res_image) #sly.image.write(os.path.join(cache_dir, "__res_ann.png"), res_image) res_meta, res_ann = rasterize.convert_to_nonoverlapping(res_meta, res_ann) return res_image, res_ann, res_meta
def generate(api: sly.Api, task_id, context, state, app_logger): global PRODUCT_TAGS products_count = len(PRODUCTS.keys()) train_count = state["trainCount"] val_count = state["valCount"] total_count = products_count * (train_count + val_count) augs_settings = yaml.safe_load(state["augs"]) augs.init_fg_augs(augs_settings) PRODUCT_TAGS = PRODUCT_TAGS.add_items([TRAIN_TAG, VAL_TAG]) res_meta = sly.ProjectMeta( obj_classes=sly.ObjClassCollection([RESULT_CLASS]), tag_metas=PRODUCT_TAGS ) res_project = api.project.create(WORKSPACE_ID, state["outputProjectName"], change_name_if_conflict=True) api.project.update_meta(res_project.id, res_meta.to_json()) progress = sly.Progress("Generating", total_count) for product_id in PRODUCTS.keys(): dataset = api.dataset.create(res_project.id, str(product_id)) tag_meta = PRODUCT_TAGS.get(product_id) if tag_meta is None: raise ValueError(f"TagMeta {product_id} not found") # cache images for one product images = {} for image_id in PRODUCTS[product_id].keys(): images[image_id] = sly.image.read(IMAGE_PATH[image_id]) name_index = 0 for batch in sly.batched([TRAIN_TAG] * train_count + [VAL_TAG] * val_count, batch_size=10): final_images = [] final_anns = [] final_names = [] for tag in batch: image_id = random.choice(list(PRODUCTS[product_id].keys())) img = images[image_id] ann = random.choice(list(PRODUCTS[product_id][image_id])) label_image = None label_mask = None label_preview = None retry_count = 5 for retry_idx in range(5): try: label_image, label_mask, label_preview = \ try_generate_example( augs_settings, augs, preview=True, product_id=product_id, img=img, ann=ann ) break except Exception as e: if retry_idx == retry_count - 1: raise e continue res_ann = sly.Annotation(label_image.shape[:2], labels=[label_preview], img_tags=sly.TagCollection([tag, sly.Tag(tag_meta)])) final_images.append(label_image) final_anns.append(res_ann) final_names.append("{:05d}.jpg".format(name_index)) name_index += 1 new_images = api.image.upload_nps(dataset.id, final_names, final_images) new_image_ids = [image_info.id for image_info in new_images] api.annotation.upload_anns(new_image_ids, final_anns) progress.iters_done_report(len(batch)) refresh_progress(api, task_id, progress) refresh_progress(api, task_id, progress) res_project = api.project.get_info_by_id(res_project.id) fields = [ {"field": "data.started", "payload": False}, {"field": "data.resProjectId", "payload": res_project.id}, {"field": "data.resProjectName", "payload": res_project.name}, {"field": "data.resProjectPreviewUrl", "payload": api.image.preview_url(res_project.reference_image_url, 100, 100)}, ] api.task.set_fields(task_id, fields) api.task.set_output_project(task_id, res_project.id, res_project.name) app.stop()
def convert(api: sly.Api, task_id, context, state, app_logger): api.task.set_field(task_id, "data.started", True) TEAM_ID = int(os.environ['context.teamId']) WORKSPACE_ID = int(os.environ['context.workspaceId']) PROJECT_ID = int(os.environ['modal.state.slyProjectId']) src_project = api.project.get_info_by_id(PROJECT_ID) if src_project.type != str(sly.ProjectType.IMAGES): raise RuntimeError( "Project {!r} has type {!r}. App works only with type {!r}".format( src_project.name, src_project.type, sly.ProjectType.IMAGES)) src_meta_json = api.project.get_meta(src_project.id) src_meta = sly.ProjectMeta.from_json(src_meta_json) new_classes = [] need_action = False selectors = state["selectors"] for cls in src_meta.obj_classes: cls: sly.ObjClass dest = selectors[cls.name] if dest == REMAIN_UNCHANGED: new_classes.append(cls) else: need_action = True new_classes.append( cls.clone(geometry_type=GET_GEOMETRY_FROM_STR(dest))) if need_action is False: fields = [{ "field": "state.showWarningDialog", "payload": True }, { "field": "data.started", "payload": False, }] api.task.set_fields(task_id, fields) return dst_project = api.project.create(src_project.workspace_id, src_project.name + "(new shapes)", description="new shapes", change_name_if_conflict=True) sly.logger.info('Destination project is created.', extra={ 'project_id': dst_project.id, 'project_name': dst_project.name }) dst_meta = src_meta.clone(obj_classes=sly.ObjClassCollection(new_classes)) api.project.update_meta(dst_project.id, dst_meta.to_json()) total_progress = api.project.get_images_count(src_project.id) current_progress = 0 ds_progress = sly.Progress('Processing:', total_cnt=total_progress) for ds_info in api.dataset.get_list(src_project.id): dst_dataset = api.dataset.create(dst_project.id, ds_info.name) img_infos_all = api.image.get_list(ds_info.id) for img_infos in sly.batched(img_infos_all): img_names, img_ids, img_metas = zip(*((x.name, x.id, x.meta) for x in img_infos)) ann_infos = api.annotation.download_batch(ds_info.id, img_ids) anns = [ sly.Annotation.from_json(x.annotation, src_meta) for x in ann_infos ] new_anns = [convert_annotation(ann, dst_meta) for ann in anns] new_img_infos = api.image.upload_ids(dst_dataset.id, img_names, img_ids, metas=img_metas) new_img_ids = [x.id for x in new_img_infos] api.annotation.upload_anns(new_img_ids, new_anns) current_progress += len(img_infos) api.task.set_field(task_id, "data.progress", int(current_progress * 100 / total_progress)) ds_progress.iters_done_report(len(img_infos)) api.task.set_output_project(task_id, dst_project.id, dst_project.name) # to get correct "reference_image_url" res_project = api.project.get_info_by_id(dst_project.id) fields = [{ "field": "data.resultProject", "payload": dst_project.name, }, { "field": "data.resultProjectId", "payload": dst_project.id, }, { "field": "data.resultProjectPreviewUrl", "payload": api.image.preview_url(res_project.reference_image_url, 100, 100), }] api.task.set_fields(task_id, fields) my_app.stop()
def do(**kwargs): api = sly.Api.from_env() src_project = api.project.get_info_by_id(PROJECT_ID) if src_project.type != str(sly.ProjectType.IMAGES): raise Exception( "Project {!r} has type {!r}. App works only with type {!r}".format( src_project.name, src_project.type, sly.ProjectType.IMAGES)) src_project_meta_json = api.project.get_meta(src_project.id) src_project_meta = sly.ProjectMeta.from_json(src_project_meta_json) # check that project has anyshape classes find_anyshape = False new_classes_lst = [] for cls in src_project_meta.obj_classes: if cls.geometry_type == sly.AnyGeometry: find_anyshape = True continue new_classes_lst.append(cls.clone()) dst_classes = sly.ObjClassCollection(new_classes_lst) if find_anyshape is False: raise Exception( "Project {!r} doesn't have classes with shape \"Any\"".format( src_project.name)) # create destination project dst_name = src_project.name if _SUFFIX in src_project.name else src_project.name + _SUFFIX dst_project = api.project.create(WORKSPACE_ID, dst_name, description=_SUFFIX, change_name_if_conflict=True) sly.logger.info('Destination project is created.', extra={ 'project_id': dst_project.id, 'project_name': dst_project.name }) dst_project_meta = src_project_meta.clone(obj_classes=dst_classes) api.project.update_meta(dst_project.id, dst_project_meta.to_json()) def convert_annotation(src_ann, dst_project_meta): new_labels = [] for idx, lbl in enumerate(src_ann.labels): lbl: sly.Label if lbl.obj_class.geometry_type == sly.AnyGeometry: actual_geometry = type(lbl.geometry) new_class_name = "{}_{}".format( lbl.obj_class.name, actual_geometry.geometry_name()) new_class = dst_project_meta.get_obj_class(new_class_name) if new_class is None: new_class = sly.ObjClass(name=new_class_name, geometry_type=actual_geometry, color=sly.color.random_rgb()) dst_project_meta = dst_project_meta.add_obj_class( new_class) api.project.update_meta(dst_project.id, dst_project_meta.to_json()) new_labels.append(lbl.clone(obj_class=new_class)) else: new_labels.append(lbl) return src_ann.clone(labels=new_labels), dst_project_meta for ds_info in api.dataset.get_list(src_project.id): ds_progress = sly.Progress('Dataset: {!r}'.format(ds_info.name), total_cnt=ds_info.images_count) dst_dataset = api.dataset.create(dst_project.id, ds_info.name) img_infos_all = api.image.get_list(ds_info.id) for img_infos in sly.batched(img_infos_all): img_names, img_ids, img_metas = zip(*((x.name, x.id, x.meta) for x in img_infos)) ann_infos = api.annotation.download_batch(ds_info.id, img_ids) anns = [ sly.Annotation.from_json(x.annotation, src_project_meta) for x in ann_infos ] new_anns = [] for ann in anns: new_ann, dst_project_meta = convert_annotation( ann, dst_project_meta) new_anns.append(new_ann) new_img_infos = api.image.upload_ids(dst_dataset.id, img_names, img_ids, metas=img_metas) new_img_ids = [x.id for x in new_img_infos] api.annotation.upload_anns(new_img_ids, new_anns) ds_progress.iters_done_report(len(img_infos)) api.task.set_output_project(task_id, dst_project.id, dst_project.name) my_app.stop()
def import_cityscapes(api: sly.Api, task_id, context, state, app_logger): tag_metas = sly.TagMetaCollection() obj_classes = sly.ObjClassCollection() dataset_names = [] storage_dir = my_app.data_dir if INPUT_DIR: cur_files_path = INPUT_DIR extract_dir = os.path.join( storage_dir, str(Path(cur_files_path).parent).lstrip("/")) input_dir = os.path.join(extract_dir, Path(cur_files_path).name) archive_path = os.path.join( storage_dir, cur_files_path + ".tar") # cur_files_path.split("/")[-2] + ".tar" project_name = Path(cur_files_path).name else: cur_files_path = INPUT_FILE extract_dir = os.path.join(storage_dir, get_file_name(cur_files_path)) archive_path = os.path.join(storage_dir, get_file_name_with_ext(cur_files_path)) project_name = get_file_name(INPUT_FILE) input_dir = os.path.join(storage_dir, get_file_name(cur_files_path)) # extract_dir api.file.download(TEAM_ID, cur_files_path, archive_path) if tarfile.is_tarfile(archive_path): with tarfile.open(archive_path) as archive: archive.extractall(extract_dir) else: raise Exception("No such file".format(INPUT_FILE)) new_project = api.project.create(WORKSPACE_ID, project_name, change_name_if_conflict=True) tags_template = os.path.join(input_dir, "gtFine", "*") tags_paths = glob.glob(tags_template) tags = [os.path.basename(tag_path) for tag_path in tags_paths] if train_tag in tags and val_tag not in tags: split_train = True elif trainval_tag in tags and val_tag not in tags: split_train = True else: split_train = False search_fine = os.path.join(input_dir, "gtFine", "*", "*", "*_gt*_polygons.json") files_fine = glob.glob(search_fine) files_fine.sort() search_imgs = os.path.join(input_dir, "leftImg8bit", "*", "*", "*_leftImg8bit" + IMAGE_EXT) files_imgs = glob.glob(search_imgs) files_imgs.sort() if len(files_fine) == 0 or len(files_imgs) == 0: raise Exception('Input cityscapes format not correct') samples_count = len(files_fine) progress = sly.Progress('Importing images', samples_count) images_pathes_for_compare = [] images_pathes = {} images_names = {} anns_data = {} ds_name_to_id = {} if samples_count > 2: random_train_indexes = get_split_idxs(samples_count, samplePercent) for idx, orig_ann_path in enumerate(files_fine): parent_dir, json_filename = os.path.split( os.path.abspath(orig_ann_path)) dataset_name = os.path.basename(parent_dir) if dataset_name not in dataset_names: dataset_names.append(dataset_name) ds = api.dataset.create(new_project.id, dataset_name, change_name_if_conflict=True) ds_name_to_id[dataset_name] = ds.id images_pathes[dataset_name] = [] images_names[dataset_name] = [] anns_data[dataset_name] = [] orig_img_path = json_path_to_image_path(orig_ann_path) images_pathes_for_compare.append(orig_img_path) if not file_exists(orig_img_path): logger.warn( 'Image for annotation {} not found is dataset {}'.format( orig_ann_path.split('/')[-1], dataset_name)) continue images_pathes[dataset_name].append(orig_img_path) images_names[dataset_name].append( sly.io.fs.get_file_name_with_ext(orig_img_path)) tag_path = os.path.split(parent_dir)[0] train_val_tag = os.path.basename(tag_path) if split_train is True and samples_count > 2: if (train_val_tag == train_tag) or (train_val_tag == trainval_tag): if idx in random_train_indexes: train_val_tag = train_tag else: train_val_tag = val_tag # tag_meta = sly.TagMeta(train_val_tag, sly.TagValueType.NONE) tag_meta = sly.TagMeta('split', sly.TagValueType.ANY_STRING) if not tag_metas.has_key(tag_meta.name): tag_metas = tag_metas.add(tag_meta) # tag = sly.Tag(tag_meta) tag = sly.Tag(meta=tag_meta, value=train_val_tag) json_data = json.load(open(orig_ann_path)) ann = sly.Annotation.from_img_path(orig_img_path) for obj in json_data['objects']: class_name = obj['label'] if class_name == 'out of roi': polygon = obj['polygon'][:5] interiors = [obj['polygon'][5:]] else: polygon = obj['polygon'] if len(polygon) < 3: logger.warn( 'Polygon must contain at least 3 points in ann {}, obj_class {}' .format(orig_ann_path, class_name)) continue interiors = [] interiors = [convert_points(interior) for interior in interiors] polygon = sly.Polygon(convert_points(polygon), interiors) if city_classes_to_colors.get(class_name, None): obj_class = sly.ObjClass( name=class_name, geometry_type=sly.Polygon, color=city_classes_to_colors[class_name]) else: new_color = generate_rgb(city_colors) city_colors.append(new_color) obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Polygon, color=new_color) ann = ann.add_label(sly.Label(polygon, obj_class)) if not obj_classes.has_key(class_name): obj_classes = obj_classes.add(obj_class) ann = ann.add_tag(tag) anns_data[dataset_name].append(ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=obj_classes, tag_metas=tag_metas) api.project.update_meta(new_project.id, out_meta.to_json()) for ds_name, ds_id in ds_name_to_id.items(): dst_image_infos = api.image.upload_paths(ds_id, images_names[ds_name], images_pathes[ds_name]) dst_image_ids = [img_info.id for img_info in dst_image_infos] api.annotation.upload_anns(dst_image_ids, anns_data[ds_name]) stat_dct = { 'samples': samples_count, 'src_ann_cnt': len(files_fine), 'src_img_cnt': len(files_imgs) } logger.info('Found img/ann pairs.', extra=stat_dct) images_without_anns = set(files_imgs) - set(images_pathes_for_compare) if len(images_without_anns) > 0: logger.warn('Found source images without corresponding annotations:') for im_path in images_without_anns: logger.warn('Annotation not found {}'.format(im_path)) logger.info('Found classes.', extra={ 'cnt': len(obj_classes), 'classes': sorted([obj_class.name for obj_class in obj_classes]) }) logger.info('Created tags.', extra={ 'cnt': len(out_meta.tag_metas), 'tags': sorted([tag_meta.name for tag_meta in out_meta.tag_metas]) }) my_app.stop()