def read_colors(labels_file_path): number_class = {} pixel_color = {} if os.path.isfile(labels_file_path): sly.logger.info('Generate random color mapping.') with open(labels_file_path, "r") as file: all_lines = file.readlines() for line in all_lines: line = line.split('\n')[0].split(':') temp = int(line[0]) if temp == 0: temp = 256 number_class[temp - 1] = (line[1][1:]) default_classes_colors, colors = {}, [(0, 0, 0)] for class_name in number_class.values(): new_color = generate_rgb(colors) colors.append(new_color) default_classes_colors[class_name] = new_color for i, j in number_class.items(): pixel_color[i] = default_classes_colors[j] cls2col = default_classes_colors else: raise RuntimeError('There is no file {}, but it is necessary'.format( labels_file_path)) sly.logger.info('Determined {} class(es).'.format(len(cls2col)), extra={'classes': list(cls2col.keys())}) return number_class, pixel_color
def _check_add_color(self, dct): if 'color' not in dct: exists_colors = [ cls_dct['color'] for cls_dct in self.py_container if 'color' in cls_dct ] exists_colors_rgb = [hex2rgb(c) for c in exists_colors] new_rgb = generate_rgb(exists_colors_rgb) dct['color'] = color2code(new_rgb) else: pass # @TODO: validate string
def from_ann_to_class_mask(ann, mask_outpath, contour_thickness): exist_colors = [[0, 0, 0], pascal_contour_color] mask = np.zeros((ann.img_size[0], ann.img_size[1], 3), dtype=np.uint8) for label in ann.labels: if label.obj_class.name == "neutral": label.geometry.draw(mask, pascal_contour_color) continue new_color = generate_rgb(exist_colors) exist_colors.append(new_color) label.geometry.draw_contour(mask, pascal_contour_color, contour_thickness) label.geometry.draw(mask, new_color) im = Image.fromarray(mask) im = im.convert("P", palette=Image.ADAPTIVE) im.save(mask_outpath)
def get_ann(img_path, inst_path): global classes_dict default_classes_colors = {} colors = [(0, 0, 0)] ann = sly.Annotation.from_img_path(img_path) if inst_path is not None: mat = scipy.io.loadmat(inst_path) mask = mat['anno'] all_objects = mask[0][0][1][0] class_mask, unique_class_mask = {}, [] for obj in all_objects: object_name, object_mask = obj[0], obj[2] class_mask[object_name[0]] = object_mask unique_class_mask.append([object_name[0], object_mask]) if len(obj[3]) > 0: all_parts = obj[3][0] for part in all_parts: class_mask[part[0][0]] = part[1] unique_class_mask.append([part[0][0], part[1]]) for class_name in class_mask.keys(): if class_name not in default_classes_colors: new_color = generate_rgb(colors) colors.append(new_color) default_classes_colors[class_name] = new_color for temp in unique_class_mask: class_name, cl_mask = temp mask = cl_mask.astype(np.bool) new_color = default_classes_colors[class_name] bitmap = sly.Bitmap(data=mask) if not classes_dict.has_key(class_name): obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Bitmap, color=new_color) classes_dict = classes_dict.add( obj_class) # make it for meta.json ann = ann.add_label( sly.Label(bitmap, classes_dict.get(class_name))) return ann
def read_colors(colors_file): sly.logger.info('Generate random color mapping.') number_class = {} pixel_color = {} with open(colors_file, "r") as file: all_lines = file.readlines() for line in all_lines: line = line.split('\n')[0].split(':') number_class[line[0]] = (line[1][1:]) default_classes_colors, colors = {}, [(0, 0, 0)] for class_name in number_class.values(): new_color = generate_rgb(colors) colors.append(new_color) default_classes_colors[class_name] = new_color for i, j in number_class.items(): pixel_color[i] = default_classes_colors[j] class_to_color = default_classes_colors sly.logger.info('Determined {} class(es).'.format(len(class_to_color)), extra={'classes': list(class_to_color.keys())}) return number_class, pixel_color
def read_colors(labels_file_path): number_class = {1: 'background'} pixel_color = {} if os.path.isfile(labels_file_path): sly.logger.info('Generate random color mapping.') with open(labels_file_path, "r") as file: all_lines = file.readlines() for line in all_lines: line = line.strip('\n').split('\t') temp = line[4].split(',') try: number_class[int(line[0]) + 1] = temp[0] except ValueError: continue default_classes_colors, colors = {}, [(0, 0, 0)] for class_name in number_class.values(): if class_name == 'background': default_classes_colors[class_name] = [0, 0, 0] else: new_color = generate_rgb(colors) colors.append(new_color) default_classes_colors[class_name] = new_color for i, j in number_class.items(): pixel_color[i] = default_classes_colors[j] cls2col = default_classes_colors else: raise RuntimeError('There is no file {}, but it is necessary'.format( labels_file_path)) sly.logger.info('Determined {} class(es).'.format(len(cls2col)), extra={'classes': list(cls2col.keys())}) return number_class, pixel_color
def import_cityscapes(api: sly.Api, task_id, context, state, app_logger): tag_metas = sly.TagMetaCollection() obj_classes = sly.ObjClassCollection() dataset_names = [] storage_dir = my_app.data_dir if INPUT_DIR: cur_files_path = INPUT_DIR extract_dir = os.path.join( storage_dir, str(Path(cur_files_path).parent).lstrip("/")) input_dir = os.path.join(extract_dir, Path(cur_files_path).name) archive_path = os.path.join( storage_dir, cur_files_path + ".tar") # cur_files_path.split("/")[-2] + ".tar" project_name = Path(cur_files_path).name else: cur_files_path = INPUT_FILE extract_dir = os.path.join(storage_dir, get_file_name(cur_files_path)) archive_path = os.path.join(storage_dir, get_file_name_with_ext(cur_files_path)) project_name = get_file_name(INPUT_FILE) input_dir = os.path.join(storage_dir, get_file_name(cur_files_path)) # extract_dir api.file.download(TEAM_ID, cur_files_path, archive_path) if tarfile.is_tarfile(archive_path): with tarfile.open(archive_path) as archive: archive.extractall(extract_dir) else: raise Exception("No such file".format(INPUT_FILE)) new_project = api.project.create(WORKSPACE_ID, project_name, change_name_if_conflict=True) tags_template = os.path.join(input_dir, "gtFine", "*") tags_paths = glob.glob(tags_template) tags = [os.path.basename(tag_path) for tag_path in tags_paths] if train_tag in tags and val_tag not in tags: split_train = True elif trainval_tag in tags and val_tag not in tags: split_train = True else: split_train = False search_fine = os.path.join(input_dir, "gtFine", "*", "*", "*_gt*_polygons.json") files_fine = glob.glob(search_fine) files_fine.sort() search_imgs = os.path.join(input_dir, "leftImg8bit", "*", "*", "*_leftImg8bit" + IMAGE_EXT) files_imgs = glob.glob(search_imgs) files_imgs.sort() if len(files_fine) == 0 or len(files_imgs) == 0: raise Exception('Input cityscapes format not correct') samples_count = len(files_fine) progress = sly.Progress('Importing images', samples_count) images_pathes_for_compare = [] images_pathes = {} images_names = {} anns_data = {} ds_name_to_id = {} if samples_count > 2: random_train_indexes = get_split_idxs(samples_count, samplePercent) for idx, orig_ann_path in enumerate(files_fine): parent_dir, json_filename = os.path.split( os.path.abspath(orig_ann_path)) dataset_name = os.path.basename(parent_dir) if dataset_name not in dataset_names: dataset_names.append(dataset_name) ds = api.dataset.create(new_project.id, dataset_name, change_name_if_conflict=True) ds_name_to_id[dataset_name] = ds.id images_pathes[dataset_name] = [] images_names[dataset_name] = [] anns_data[dataset_name] = [] orig_img_path = json_path_to_image_path(orig_ann_path) images_pathes_for_compare.append(orig_img_path) if not file_exists(orig_img_path): logger.warn( 'Image for annotation {} not found is dataset {}'.format( orig_ann_path.split('/')[-1], dataset_name)) continue images_pathes[dataset_name].append(orig_img_path) images_names[dataset_name].append( sly.io.fs.get_file_name_with_ext(orig_img_path)) tag_path = os.path.split(parent_dir)[0] train_val_tag = os.path.basename(tag_path) if split_train is True and samples_count > 2: if (train_val_tag == train_tag) or (train_val_tag == trainval_tag): if idx in random_train_indexes: train_val_tag = train_tag else: train_val_tag = val_tag # tag_meta = sly.TagMeta(train_val_tag, sly.TagValueType.NONE) tag_meta = sly.TagMeta('split', sly.TagValueType.ANY_STRING) if not tag_metas.has_key(tag_meta.name): tag_metas = tag_metas.add(tag_meta) # tag = sly.Tag(tag_meta) tag = sly.Tag(meta=tag_meta, value=train_val_tag) json_data = json.load(open(orig_ann_path)) ann = sly.Annotation.from_img_path(orig_img_path) for obj in json_data['objects']: class_name = obj['label'] if class_name == 'out of roi': polygon = obj['polygon'][:5] interiors = [obj['polygon'][5:]] else: polygon = obj['polygon'] if len(polygon) < 3: logger.warn( 'Polygon must contain at least 3 points in ann {}, obj_class {}' .format(orig_ann_path, class_name)) continue interiors = [] interiors = [convert_points(interior) for interior in interiors] polygon = sly.Polygon(convert_points(polygon), interiors) if city_classes_to_colors.get(class_name, None): obj_class = sly.ObjClass( name=class_name, geometry_type=sly.Polygon, color=city_classes_to_colors[class_name]) else: new_color = generate_rgb(city_colors) city_colors.append(new_color) obj_class = sly.ObjClass(name=class_name, geometry_type=sly.Polygon, color=new_color) ann = ann.add_label(sly.Label(polygon, obj_class)) if not obj_classes.has_key(class_name): obj_classes = obj_classes.add(obj_class) ann = ann.add_tag(tag) anns_data[dataset_name].append(ann) progress.iter_done_report() out_meta = sly.ProjectMeta(obj_classes=obj_classes, tag_metas=tag_metas) api.project.update_meta(new_project.id, out_meta.to_json()) for ds_name, ds_id in ds_name_to_id.items(): dst_image_infos = api.image.upload_paths(ds_id, images_names[ds_name], images_pathes[ds_name]) dst_image_ids = [img_info.id for img_info in dst_image_infos] api.annotation.upload_anns(dst_image_ids, anns_data[ds_name]) stat_dct = { 'samples': samples_count, 'src_ann_cnt': len(files_fine), 'src_img_cnt': len(files_imgs) } logger.info('Found img/ann pairs.', extra=stat_dct) images_without_anns = set(files_imgs) - set(images_pathes_for_compare) if len(images_without_anns) > 0: logger.warn('Found source images without corresponding annotations:') for im_path in images_without_anns: logger.warn('Annotation not found {}'.format(im_path)) logger.info('Found classes.', extra={ 'cnt': len(obj_classes), 'classes': sorted([obj_class.name for obj_class in obj_classes]) }) logger.info('Created tags.', extra={ 'cnt': len(out_meta.tag_metas), 'tags': sorted([tag_meta.name for tag_meta in out_meta.tag_metas]) }) my_app.stop()
def render_video_labels_to_mp4(api: sly.Api, task_id, context, state, app_logger): global VIDEO_ID, START_FRAME, END_FRAME, PROJECT_ID if VIDEO_ID == "": raise ValueError("Video ID is not defined") VIDEO_ID = int(VIDEO_ID) video_info = api.video.get_info_by_id(VIDEO_ID) if video_info is None: raise ValueError("Video with id={!r} not found".format(VIDEO_ID)) PROJECT_ID = video_info.project_id if ALL_FRAMES is True: START_FRAME = 0 END_FRAME = video_info.frames_count - 1 else: if START_FRAME == 0 and END_FRAME == 0: raise ValueError("Frame Range is not defined") if END_FRAME >= video_info.frames_count: app_logger.warn( "End Frame {} is out of range: video has only {} frames". format(END_FRAME, video_info.frames_count)) END_FRAME = video_info.frames_count - 1 app_logger.warn("End Frame has been set to {}".format(END_FRAME)) frame_per_second = video_info.frames_to_timecodes[1] stream_speed = 1 / frame_per_second meta_json = api.project.get_meta(PROJECT_ID) meta = sly.ProjectMeta.from_json(meta_json) key_id_map = KeyIdMap() if len(meta.obj_classes) == 0: raise ValueError("No classes in project") ann_info = api.video.annotation.download(VIDEO_ID) ann = sly.VideoAnnotation.from_json(ann_info, meta, key_id_map) obj_to_color = {} exist_colors = [] video = None mp4_name = sly.fs.get_file_name(video_info.name) + ".mp4" local_path = os.path.join(my_app.data_dir, mp4_name) progress = sly.Progress(video_info.name, END_FRAME - START_FRAME + 1) for frame_number in range(START_FRAME, END_FRAME): frame_np = api.video.frame.download_np(VIDEO_ID, frame_number) ann_frame = ann.frames.get(frame_number, None) if ann_frame is not None: for fig in ann_frame.figures: if len(CLASSES ) == 0 or fig.video_object.obj_class.name in CLASSES: color = fig.video_object.obj_class.color if COLOR_INS: if fig.video_object.key not in obj_to_color: color = generate_rgb(exist_colors) obj_to_color[fig.video_object.key] = color exist_colors.append(color) else: color = obj_to_color[fig.video_object.key] bbox = None if fig.geometry.geometry_name( ) == BITMAP or fig.geometry.geometry_name() == 'polygon': mask = np.zeros(frame_np.shape, dtype=np.uint8) fig.geometry.draw(mask, color) frame_np = cv2.addWeighted(frame_np, 1, mask, OPACITY, 0) if SHOW_NAMES == True: bbox = fig.geometry.to_bbox() bbox.draw_contour(frame_np, color, THICKNESS) elif fig.geometry.geometry_name() == 'rectangle': bbox = fig.geometry bbox.draw_contour(frame_np, color, THICKNESS) if SHOW_NAMES == True: tl = 1 # line/font thickness c1, c2 = (bbox.left, bbox.top), (bbox.right, bbox.bottom) tf = 1 # font thickness t_size = cv2.getTextSize( fig.video_object.obj_class.name, FONT, fontScale=tl, thickness=tf)[0] c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 cv2.rectangle(frame_np, c1, c2, fig.video_object.obj_class.color, -1, cv2.LINE_AA) # filled cv2.putText(frame_np, fig.video_object.obj_class.name, (bbox.left + 1, bbox.top - 1), cv2.FONT_HERSHEY_SIMPLEX, 1, [255, 255, 255], thickness=THICKNESS, lineType=cv2.LINE_AA, bottomLeftOrigin=False) else: raise TypeError( "Geometry type {} not supported".format( fig.geometry.geometry_name())) if video is None: video = cv2.VideoWriter(local_path, cv2.VideoWriter_fourcc(*'MP4V'), stream_speed, (frame_np.shape[1], frame_np.shape[0])) frame_np = cv2.cvtColor(frame_np, cv2.COLOR_BGR2RGB) video.write(frame_np) progress.iter_done_report() if video is None: raise ValueError('No frames to create video') video.release() remote_path = os.path.join('/rendered_videos', "{}_{}".format(VIDEO_ID, mp4_name)) remote_path = api.file.get_free_name(TEAM_ID, remote_path) upload_progress = [] def _print_progress(monitor, upload_progress): if len(upload_progress) == 0: upload_progress.append( sly.Progress(message="Upload {!r}".format(mp4_name), total_cnt=monitor.len, ext_logger=app_logger, is_size=True)) upload_progress[0].set_current_value(monitor.bytes_read) file_info = api.file.upload(TEAM_ID, local_path, remote_path, lambda m: _print_progress(m, upload_progress)) app_logger.info("Uploaded to Team-Files: {!r}".format(remote_path)) api.task._set_custom_output(task_id, file_info.id, file_info.name, file_url=file_info.full_storage_url, description=f"File mp4: {remote_path}", icon="zmdi zmdi-cloud-download", download=True) sly.fs.silent_remove(local_path) my_app.stop()