def labelme_move_all_src_to_dst(self, img_src_pathlist: list, ann_src_pathlist: list): from .util.labelme_utils import move_annotation for ann_src_path in ann_src_pathlist: rootname = get_rootname_from_path(ann_src_path) img_src_path = f"{self.img_src}/{rootname}.png" if not self.auto_renaming: ann_dst_path = f"{self.ann_dst}/{rootname}.{self.ann_extension}" img_dst_path = f"{self.img_dst}/{rootname}.png" else: ann_dst_path = get_next_dump_path( dump_dir=self.ann_dst, file_extension=self.ann_extension) rootname = get_rootname_from_path(ann_dst_path) img_dst_path = f"{self.img_dst}/{rootname}.png" self.premove_check(img_src_path, img_dst_path, ann_src_path, ann_dst_path) move_file(img_src_path, img_dst_path, silent=False) move_annotation(src_img_path=img_src_path, src_json_path=ann_src_path, dst_img_path=img_dst_path, dst_json_path=ann_dst_path, bound_type='rect') src_preview = '/'.join(ann_src_path.split('/')[-3:]) dest_preview = '/'.join(ann_dst_path.split('/')[-3:]) print('Moved {} to {}'.format(src_preview, dest_preview))
def get_labelme_annotation_handler(self) -> LabelMeAnnotationHandler: labelme_annotation_handler = LabelMeAnnotationHandler() for labelimg_annotation in self.labelimg_annotation_handler.annotations.values( ): rootname = get_rootname_from_path( path=labelimg_annotation.annotation_path) labelme_annotation_path = f"{self.labelme_annotation_dir}/{rootname}.json" labelme_annotation = LabelMeAnnotation( annotation_path=labelme_annotation_path, img_dir=self.img_dir, bound_type='rect') shape_handler = self.get_shape_handler(labelimg_annotation) shapes = self.get_shapes(shape_handler=shape_handler) labelme_annotation.version = labelme.__version__ labelme_annotation.flags = {} labelme_annotation.shapes = shapes labelme_annotation.line_color = [0, 255, 0, 128] labelme_annotation.fill_color = [66, 255, 33, 128] labelme_annotation.img_path = labelimg_annotation.img_path labelme_annotation.img_height = labelimg_annotation.size.height labelme_annotation.img_width = labelimg_annotation.size.width labelme_annotation.shape_handler = shape_handler labelme_annotation_handler.annotations[len( labelme_annotation_handler.annotations)] = labelme_annotation return labelme_annotation_handler
def move_all_src_to_dst(self, img_src_pathlist: list, ann_src_pathlist: list): for ann_src_path in ann_src_pathlist: rootname = get_rootname_from_path(ann_src_path) img_src_path = f"{self.img_src}/{rootname}.png" if not self.auto_renaming: ann_dst_path = f"{self.ann_dst}/{rootname}.{self.ann_extension}" img_dst_path = f"{self.img_dst}/{rootname}.png" else: ann_dst_path = get_next_dump_path( dump_dir=self.ann_dst, file_extension=self.ann_extension) rootname = get_rootname_from_path(ann_dst_path) img_dst_path = f"{self.img_dst}/{rootname}.png" self.premove_check(img_src_path, img_dst_path, ann_src_path, ann_dst_path) move_file(ann_src_path, ann_dst_path, silent=False) move_file(img_src_path, img_dst_path, silent=False)
def load_annotation_paths(self): annotation_paths = get_all_files_of_extension( dir_path=self.labelimg_annotation_dir, extension='xml') for annotation_path in annotation_paths: rootname = get_rootname_from_path(path=annotation_path) img_path = f"{self.img_dir}/{rootname}.png" xml_path = f"{self.labelimg_annotation_dir}/{rootname}.xml" self.labelimg_annotation_handler.add(key=len( self.labelimg_annotation_handler.annotations), annotation_path=xml_path, img_path=img_path)
def run(self): img_dirs = get_all_files_of_extension(dir_path=self.img_dir, extension='png') annotation_paths = get_all_files_of_extension(dir_path=self.annotation_dir, extension='xml') for i, annotation_path in zip(range(len(annotation_paths)), annotation_paths): rootname = get_rootname_from_path(path=annotation_path) img_path = f"{self.img_dir}/{rootname}.png" xml_path = f"{self.annotation_dir}/{rootname}.xml" self.handler.add(key=len(self.handler.annotations), annotation_path=xml_path, img_path=img_path) self.handler.load_remaining()
def check_ann_exists_in_img(self, img_pathlist: list, ann_pathlist: list, img_dir_path: str): paths_not_found = [] for ann_path in ann_pathlist: rootname = get_rootname_from_path(ann_path) corresponding_img_path = f"{img_dir_path}/{rootname}.png" if not file_exists(corresponding_img_path): paths_not_found.append(corresponding_img_path) if len(paths_not_found) > 0: for path_not_found in paths_not_found: logger.error(f"File not found: {path_not_found}") raise Exception
def load_from_dir(cls, img_dir: str, json_dir: str, show_pbar: bool = True) -> NDDS_Frame_Handler: check_dir_exists(json_dir) check_dir_exists(img_dir) img_pathlist = get_valid_image_paths(img_dir) json_path_list = [ path for path in get_all_files_of_extension(dir_path=json_dir, extension='json') if not get_filename(path).startswith('_') ] json_path_list.sort() handler = NDDS_Frame_Handler() if show_pbar: pbar = tqdm(total=len(json_path_list), unit='ann(s)', leave=True) pbar.set_description(f'Loading {cls.__name__}') for json_path in json_path_list: check_file_exists(json_path) json_rootname = get_rootname_from_path(json_path) matching_img_path = None matching_cs_img_path = None matching_depth_img_path = None matching_is_img_path = None for img_path in img_pathlist: img_rootname = '.'.join(get_filename(img_path).split('.')[:-1]) if img_rootname == json_rootname: matching_img_path = img_path elif img_rootname == f'{json_rootname}.cs': matching_cs_img_path = img_path elif img_rootname == f'{json_rootname}.depth': matching_depth_img_path = img_path elif img_rootname == f'{json_rootname}.is': matching_is_img_path = img_path if matching_img_path and matching_cs_img_path and matching_depth_img_path and matching_is_img_path: break if matching_img_path is None: logger.error( f"Couldn't find image file that matches rootname of {get_filename(json_path)} in {img_dir}" ) raise FileNotFoundError frame = NDDS_Frame( img_path=matching_img_path, ndds_ann=NDDS_Annotation.load_from_path(json_path), cs_img_path=matching_cs_img_path, depth_img_path=matching_depth_img_path, is_img_path=matching_is_img_path) handler.append(frame) if show_pbar: pbar.update() return handler
# 'pvnet20201209-epoch599', 'pvnet-darwin20210105-epoch99', 'pvnet-darwin20210105-epoch199', 'pvnet-darwin20210105-epoch299', 'pvnet-darwin20210105-epoch399', 'pvnet-darwin20210105-epoch499', 'pvnet-darwin20210105-epoch599', 'pvnet-darwin20210105-epoch699', 'pvnet-darwin20210105-epoch799', ] test_root_dir = '/home/clayton/workspace/prj/data_keep/data/toyota/from_toyota/20201017/20201017_robot_camera' csv_paths = recursively_get_all_filepaths_of_extension(test_root_dir, extension='csv') test_names, datasets = [], [] for csv_path in csv_paths: test_name = get_rootname_from_path(csv_path) img_dir = f'{get_dirpath_from_filepath(csv_path)}/images' assert dir_exists(img_dir), f"Couldn't find image directory: {img_dir}" ann_path = f'{img_dir}/output.json' if not file_exists(ann_path): continue dataset = COCO_Dataset.load_from_path(ann_path, img_dir=img_dir) test_names.append(test_name) datasets.append(dataset) linemod_dataset = Linemod_Dataset.load_from_path( f'/home/clayton/workspace/prj/data/misc_dataset/darwin_datasets/coco2linemod/darwin20210105_blackout/train.json' ) linemod_ann_sample = linemod_dataset.annotations[0] kpt_3d = linemod_ann_sample.fps_3d.copy() kpt_3d.append(linemod_ann_sample.center_3d)
linemod_ann_sample = linemod_dataset.annotations[0] kpt_3d = linemod_ann_sample.fps_3d.copy() kpt_3d.append(linemod_ann_sample.center_3d) corner_3d = linemod_ann_sample.corner_3d K = linemod_ann_sample.K linemod_image_sample = linemod_dataset.images[0] dsize = (linemod_image_sample.width, linemod_image_sample.height) weights_dir = '/home/clayton/workspace/git/clean-pvnet/data/model/pvnet/custom' weight_path_list = get_all_files_of_extension(weights_dir, 'pth') weight_path_list.sort() infer_data_dump_dir = '/home/clayton/workspace/prj/data_keep/data/toyota/from_toyota/20201017/20201017_robot_camera/infer_dump' make_dir_if_not_exists(infer_data_dump_dir) # delete_all_files_in_dir(infer_data_dump_dir, ask_permission=True) weights_pbar = tqdm(total=len(weight_path_list), unit='weight(s)') for weight_path in weight_path_list: rootname = get_rootname_from_path(weight_path) weights_pbar.set_description(rootname) pred_dump_path = f'{infer_data_dump_dir}/{rootname}.json' if file_exists(pred_dump_path): weights_pbar.update() continue inferer = PVNetInferer(weight_path=weight_path) inferer.infer_coco_dataset(dataset=coco_dataset, kpt_3d=kpt_3d, corner_3d=corner_3d, K=K, blackout=True, dsize=dsize, pred_dump_path=pred_dump_path) weights_pbar.update()
def move(self, dst_dataroot: str, include_depth: bool = True, include_RT: bool = False, camera_path: str = None, fps_path: str = None, preserve_filename: bool = False, use_softlink: bool = False, ask_permission_on_delete: bool = True, show_pbar: bool = True): make_dir_if_not_exists(dst_dataroot) delete_all_files_in_dir(dst_dataroot, ask_permission=ask_permission_on_delete, verbose=False) processed_image_id_list = [] pbar = tqdm(total=len(self.annotations), unit='annotation(s)', leave=True) if show_pbar else None if pbar is not None: pbar.set_description('Moving Linemod Dataset Data') for linemod_ann in self.annotations: if not dir_exists(linemod_ann.data_root): raise FileNotFoundError( f"Couldn't find data_root at {linemod_ann.data_root}") # Images linemod_image = self.images.get(id=linemod_ann.image_id)[0] if linemod_image.id not in processed_image_id_list: img_path = f'{linemod_ann.data_root}/{get_filename(linemod_image.file_name)}' if not file_exists(img_path): raise FileNotFoundError( f"Couldn't find image at {img_path}") if preserve_filename: dst_img_path = f'{dst_dataroot}/{get_filename(linemod_image.file_name)}' if file_exists(dst_img_path): raise FileExistsError(f""" Image already exists at {dst_img_path} Hint: Use preserve_filename=False to bypass this error. """) else: dst_filename = f'{linemod_image.id}.{get_extension_from_filename(linemod_image.file_name)}' linemod_image.file_name = dst_filename dst_img_path = f'{dst_dataroot}/{dst_filename}' if not use_softlink: copy_file(src_path=img_path, dest_path=dst_img_path, silent=True) else: create_softlink(src_path=rel_to_abs_path(img_path), dst_path=rel_to_abs_path(dst_img_path)) processed_image_id_list.append(linemod_image.id) # Masks if not file_exists(linemod_ann.mask_path): raise FileNotFoundError( f"Couldn't find mask at {linemod_ann.mask_path}") mask_path = linemod_ann.mask_path if preserve_filename: dst_mask_path = f'{dst_dataroot}/{get_filename(linemod_ann.mask_path)}' if file_exists(dst_mask_path): raise FileExistsError(f""" Mask already exists at {dst_mask_path} Hint: Use preserve_filename=False to bypass this error. """) else: mask_filename = get_filename(linemod_ann.mask_path) dst_filename = f'{linemod_ann.id}_mask.{get_extension_from_filename(mask_filename)}' dst_mask_path = f'{dst_dataroot}/{dst_filename}' linemod_ann.mask_path = dst_mask_path if not use_softlink: copy_file(src_path=mask_path, dest_path=dst_mask_path, silent=True) else: create_softlink(src_path=rel_to_abs_path(mask_path), dst_path=rel_to_abs_path(dst_mask_path)) # Depth if include_depth and linemod_ann.depth_path is not None: if not file_exists(linemod_ann.depth_path): raise FileNotFoundError( f"Couldn't find depth at {linemod_ann.depth_path}") depth_path = linemod_ann.depth_path if preserve_filename: dst_depth_path = f'{dst_dataroot}/{get_filename(linemod_ann.depth_path)}' if file_exists(dst_depth_path): raise FileExistsError(f""" Depth already exists at {dst_depth_path} Hint: Use preserve_filename=False to bypass this error. """) else: depth_filename = get_filename(linemod_ann.depth_path) dst_filename = f'{linemod_ann.id}_depth.{get_extension_from_filename(depth_filename)}' dst_depth_path = f'{dst_dataroot}/{dst_filename}' linemod_ann.depth_path = dst_depth_path if not use_softlink: copy_file(src_path=depth_path, dest_path=dst_depth_path, silent=True) else: create_softlink(src_path=rel_to_abs_path(depth_path), dst_path=rel_to_abs_path(dst_depth_path)) # RT pickle files if include_RT: rootname = get_rootname_from_path(mask_path) if rootname.endswith('_mask'): rootname = rootname.replace('_mask', '') rt_filename = f'{rootname}_RT.pkl' rt_path = f'{linemod_ann.data_root}/{rt_filename}' if not file_exists(rt_path): raise FileNotFoundError( f"Couldn't find RT pickle file at {rt_path}") if preserve_filename: dst_rt_path = f'{dst_dataroot}/{rt_filename}' if file_exists(dst_depth_path): raise FileExistsError(f""" RT pickle file already exists at {dst_rt_path} Hint: Use preserve_filename=False to bypass this error. """) else: dst_rt_filename = f'{linemod_ann.id}_RT.pkl' dst_rt_path = f'{dst_dataroot}/{dst_rt_filename}' if not use_softlink: copy_file(src_path=rt_path, dest_path=dst_rt_path, silent=True) else: create_softlink(src_path=rel_to_abs_path(rt_path), dst_path=rel_to_abs_path(dst_rt_path)) if pbar is not None: pbar.update() # Camera setting if camera_path is not None: if not file_exists(camera_path): raise FileNotFoundError( f"Couldn't find camera settings at {camera_path}") dst_camera_path = f'{dst_dataroot}/{get_filename(camera_path)}' if file_exists(dst_camera_path): raise FileExistsError( f'Camera settings already saved at {dst_camera_path}') if not use_softlink: copy_file(src_path=camera_path, dest_path=dst_camera_path, silent=True) else: create_softlink(src_path=rel_to_abs_path(camera_path), dst_path=rel_to_abs_path(dst_camera_path)) # FPS setting if fps_path is not None: if not file_exists(fps_path): raise FileNotFoundError( f"Couldn't find FPS settings at {fps_path}") dst_fps_path = f'{dst_dataroot}/{get_filename(fps_path)}' if file_exists(dst_fps_path): raise FileExistsError( f'FPS settings already saved at {dst_fps_path}') if not use_softlink: copy_file(src_path=fps_path, dest_path=dst_fps_path, silent=True) else: create_softlink(src_path=rel_to_abs_path(fps_path), dst_path=rel_to_abs_path(dst_fps_path)) if pbar is not None: pbar.close()
def write_cropped_json(src_img_path: str, src_json_path: str, dst_img_path: str, dst_json_path: str, bound_type='rect', verbose: bool = False): def process_shape(shape: Shape, bbox: BBox, new_shape_handler: ShapeHandler): points = [Point.from_list(point) for point in shape.points] contained_count = 0 for point in points: if bbox.contains(point): contained_count += 1 if contained_count == 0: return elif contained_count == len(points): pass else: logger.error( f"Found a shape that is only partially contained by a bbox.") logger.error(f"Shape: {shape}") logger.error(f"BBox: {bbox}") cropped_points = [ Point(x=point.x - bbox.xmin, y=point.y - bbox.ymin) for point in points ] for point in cropped_points: if point.x < 0 or point.y < 0: logger.error(f"Encountered negative point after crop: {point}") raise Exception new_shape = shape.copy() new_shape.points = [ cropped_point.to_list() for cropped_point in cropped_points ] new_shape_handler.add(new_shape) check_input_path_and_output_dir(input_path=src_img_path, output_path=dst_img_path) check_input_path_and_output_dir(input_path=src_json_path, output_path=dst_json_path) output_img_dir = get_dirpath_from_filepath(dst_img_path) annotation = LabelMeAnnotation(annotation_path=src_img_path, img_dir=dst_img_path, bound_type=bound_type) parser = LabelMeAnnotationParser(annotation_path=src_json_path) parser.load() bbox_list = [] for rect in parser.shape_handler.rectangles: numpy_array = np.array(rect.points) if numpy_array.shape != (2, 2): logger.error( f"Encountered rectangle with invalid shape: {numpy_array.shape}" ) logger.error(f"rect: {rect}") raise Exception xmin, xmax = numpy_array.T[0].min(), numpy_array.T[0].max() ymin, ymax = numpy_array.T[1].min(), numpy_array.T[1].max() bbox_list.append(BBox.from_list([xmin, ymin, xmax, ymax])) img = cv2.imread(src_img_path) img_h, img_w = img.shape[:2] for i, bbox in enumerate(bbox_list): bbox = BBox.buffer(bbox) new_shape_handler = ShapeHandler() for shape_group in [ parser.shape_handler.points, parser.shape_handler.rectangles, parser.shape_handler.polygons ]: for shape in shape_group: process_shape(shape=shape, bbox=bbox, new_shape_handler=new_shape_handler) new_shape_list = new_shape_handler.to_shape_list() if len(new_shape_list) > 0: img_rootname, json_rootname = get_rootname_from_path( dst_img_path), get_rootname_from_path(dst_json_path) dst_img_dir, dst_json_dir = get_dirpath_from_filepath( dst_img_path), get_dirpath_from_filepath(dst_json_path) dst_img_extension = get_extension_from_path(dst_img_path) dst_cropped_img_path = f"{dst_img_dir}/{img_rootname}_{i}.{dst_img_extension}" dst_cropped_json_path = f"{dst_json_dir}/{json_rootname}_{i}.json" write_cropped_image(src_path=src_img_path, dst_path=dst_cropped_img_path, bbox=bbox, verbose=verbose) cropped_labelme_ann = annotation.copy() cropped_labelme_ann.annotation_path = dst_cropped_json_path cropped_labelme_ann.img_dir = dst_img_dir cropped_labelme_ann.img_path = dst_cropped_img_path cropped_img = cv2.imread(dst_cropped_img_path) cropped_img_h, cropped_img_w = cropped_img.shape[:2] cropped_labelme_ann.img_height = cropped_img_h cropped_labelme_ann.img_width = cropped_img_w cropped_labelme_ann.shapes = new_shape_list cropped_labelme_ann.shape_handler = new_shape_handler writer = LabelMeAnnotationWriter(cropped_labelme_ann) writer.write() if verbose: logger.info(f"Wrote {dst_cropped_json_path}")