Example #1
0
 def load(self, target, verbose: bool = False):
     if type(target) is str:
         extension = get_extension_from_path(target)
         if extension == 'yaml':
             check_file_exists(target)
             loaded_data = yaml.load(open(target, 'r'),
                                     Loader=yaml.FullLoader)
         elif extension == 'json':
             check_file_exists(target)
             loaded_data = json.load(open(target, 'r'))
         else:
             logger.error(f"Invalid extension: {extension}")
             logger.error(
                 f"Note that string targets are assumed to be paths.")
             logger.error(f"Valid file extensions: {self.valid_extensions}")
             raise Exception
     elif type(target) is list:
         loaded_data = target
     elif type(target) is dict:
         loaded_data = [target]
     else:
         logger.error(f"Invalid target type: {type(target)}")
         raise Exception
     self.check_valid_config(collection_dict_list=loaded_data)
     self.data = loaded_data
     if verbose:
         logger.good(f"Dataset path config has been loaded successfully.")
Example #2
0
def write_resized_json(input_img_path: str,
                       input_json_path: str,
                       output_img_path: str,
                       output_json_path: str,
                       target_size: Size,
                       bound_type: str = 'rect',
                       silent: bool = False):
    # Note: bound_type doesn't have any significance right now.
    check_input_path_and_output_dir(input_path=input_img_path,
                                    output_path=output_img_path)
    check_input_path_and_output_dir(input_path=input_json_path,
                                    output_path=output_json_path)

    output_img_dir = get_dirpath_from_filepath(output_img_path)

    annotation = LabelMeAnnotation(annotation_path=input_json_path,
                                   img_dir=output_img_dir,
                                   bound_type=bound_type)
    parser = LabelMeAnnotationParser(annotation_path=input_json_path)
    parser.load()
    img = cv2.imread(filename=input_img_path)
    orig_size = Size.from_cv2_shape(img.shape)
    resize = Resize(old_size=orig_size, new_size=target_size)

    for shape in parser.shape_handler.points:
        shape.points = resize.on_point(
            Point.from_labelme_point_list(shape.points)).to_labelme_format()
    for shape in parser.shape_handler.rectangles:
        shape.points = resize.on_rectangle(
            Rectangle.from_labelme_point_list(
                shape.points)).to_labelme_format()
    for shape in parser.shape_handler.polygons:
        shape.points = resize.on_polygon(
            Polygon.from_labelme_point_list(shape.points)).to_labelme_format()

    # Update Shapes
    parser.shape_handler2shapes()

    # Get Info From Resized Image
    check_file_exists(output_img_path)
    parser.img_path = output_img_path
    img = cv2.imread(output_img_path)
    parser.img_height, parser.img_width = img.shape[:2]

    annotation.copy_from_parser(parser=parser,
                                annotation_path=output_json_path,
                                img_dir=output_img_dir,
                                bound_type='rect')

    annotation_writer = LabelMeAnnotationWriter(labelme_annotation=annotation)
    annotation_writer.write()
    if not silent:
        logger.info(f"Wrote resized labelme annotation to {output_json_path}")
Example #3
0
 def load_from_path(cls: T, json_path: str) -> T:
     check_file_exists(json_path)
     json_dict_list = json.load(open(json_path, 'r'))
     json_dict = [
         item for item in json_dict_list
         if item['class_name'] == cls.__name__
     ]
     if len(json_dict) == 0:
         raise Exception('Mode settings is not found inside json path')
     else:
         json_dict = json_dict[0]
     return cls.from_dict(json_dict)
Example #4
0
    def load_from_dir(cls,
                      img_dir: str,
                      json_dir: str,
                      show_pbar: bool = True) -> NDDS_Frame_Handler:
        check_dir_exists(json_dir)
        check_dir_exists(img_dir)

        img_pathlist = get_valid_image_paths(img_dir)
        json_path_list = [
            path for path in get_all_files_of_extension(dir_path=json_dir,
                                                        extension='json')
            if not get_filename(path).startswith('_')
        ]
        json_path_list.sort()
        handler = NDDS_Frame_Handler()
        if show_pbar:
            pbar = tqdm(total=len(json_path_list), unit='ann(s)', leave=True)
            pbar.set_description(f'Loading {cls.__name__}')
        for json_path in json_path_list:
            check_file_exists(json_path)
            json_rootname = get_rootname_from_path(json_path)
            matching_img_path = None
            matching_cs_img_path = None
            matching_depth_img_path = None
            matching_is_img_path = None
            for img_path in img_pathlist:
                img_rootname = '.'.join(get_filename(img_path).split('.')[:-1])
                if img_rootname == json_rootname:
                    matching_img_path = img_path
                elif img_rootname == f'{json_rootname}.cs':
                    matching_cs_img_path = img_path
                elif img_rootname == f'{json_rootname}.depth':
                    matching_depth_img_path = img_path
                elif img_rootname == f'{json_rootname}.is':
                    matching_is_img_path = img_path
                if matching_img_path and matching_cs_img_path and matching_depth_img_path and matching_is_img_path:
                    break
            if matching_img_path is None:
                logger.error(
                    f"Couldn't find image file that matches rootname of {get_filename(json_path)} in {img_dir}"
                )
                raise FileNotFoundError
            frame = NDDS_Frame(
                img_path=matching_img_path,
                ndds_ann=NDDS_Annotation.load_from_path(json_path),
                cs_img_path=matching_cs_img_path,
                depth_img_path=matching_depth_img_path,
                is_img_path=matching_is_img_path)
            handler.append(frame)
            if show_pbar:
                pbar.update()
        return handler
Example #5
0
 def load_from_path(cls, path: str) -> DatasetConfigCollection:
     check_file_exists(path)
     extension = get_extension_from_path(path)
     if extension == 'json':
         collection_dict = json.load(open(path, 'r'))
     elif extension == 'yaml':
         collection_dict = yaml.load(open(path, 'r'),
                                     Loader=yaml.FullLoader)
     else:
         logger.error(f'Invalid file extension encountered: {extension}')
         logger.error(f'Path specified: {path}')
         raise Exception
     return DatasetConfigCollection.from_dict(collection_dict)
Example #6
0
 def from_img_path(self, img_path: str, license_id: int,
                   image_id: int) -> COCO_Image:
     check_file_exists(img_path)
     img = cv2.imread(img_path)
     img_h, img_w = img.shape[:2]
     return COCO_Image(license_id=license_id,
                       file_name=get_filename(img_path),
                       coco_url=img_path,
                       height=img_h,
                       width=img_w,
                       date_captured=get_ctime(img_path),
                       flickr_url=None,
                       id=image_id)
Example #7
0
    def get_merged_is_img(self, class_merge_map: Dict[str, str]):
        check_file_exists(self.is_img_path)
        is_img_orig = cv2.imread(self.is_img_path)
        working_is_img = is_img_orig.copy()
        for src_class, dst_class in class_merge_map.items():
            # Get Color Map
            src_bgr, dst_bgr = None, None
            for ann_obj in self.ndds_ann.objects:
                if ann_obj.class_name == src_class:
                    if src_bgr is not None:
                        raise Exception
                    src_bgr = ann_obj.get_color_from_id()
                    continue
                if ann_obj.class_name == dst_class:
                    if dst_bgr is not None:
                        raise Exception
                    dst_bgr = ann_obj.get_color_from_id()
                    continue
                if src_bgr is not None and dst_bgr is not None:
                    break

            obj_class_names = []
            for ann_obj in self.ndds_ann.objects:
                if ann_obj.class_name not in obj_class_names:
                    obj_class_names.append(ann_obj.class_name)

            if src_bgr is None or dst_bgr is None:
                logger.warning(f"Couldn't find either src_bgr or dst_bgr.")
                class_name_list = [
                    ann_obj.class_name for ann_obj in self.ndds_ann.objects
                ]
                logger.warning(f'Available class_name list: {class_name_list}')
                logger.warning(
                    f'src_class: {src_class}, dst_class: {dst_class}')
                logger.warning(f"src_bgr: {src_bgr}, dst_bgr: {dst_bgr}")
                continue

            working_is_img = self.__replace_color(img=working_is_img,
                                                  src_bgr=src_bgr,
                                                  dst_bgr=dst_bgr)

        # # Debug
        # logger.yellow(f'self.is_img_path: {self.is_img_path}')
        # is_img_compare = cv2.hconcat([is_img_orig, working_is_img])
        # from common_utils.cv_drawing_utils import cv_simple_image_viewer
        # quit_flag = cv_simple_image_viewer(img=is_img_compare, preview_width=1000, window_name=f'Class Map Merge')
        # if quit_flag:
        #     import sys
        #     sys.exit()

        return working_is_img
Example #8
0
 def _check_paths_valid(self, src_img_dir: str):
     check_dir_exists(src_img_dir)
     img_filename_list = []
     duplicate_img_filename_list = []
     for frame in self:
         img_filename = get_filename(frame.img_path)
         if img_filename not in img_filename_list:
             img_filename_list.append(frame.img_path)
         else:
             duplicate_img_filename_list.append(frame.img_path)
         img_path = f'{src_img_dir}/{img_filename}'
         check_file_exists(img_path)
         if frame.cs_img_path:
             check_file_exists(
                 f'{src_img_dir}/{get_filename(frame.cs_img_path)}')
         if frame.depth_img_path:
             check_file_exists(
                 f'{src_img_dir}/{get_filename(frame.depth_img_path)}')
         if frame.is_img_path:
             check_file_exists(
                 f'{src_img_dir}/{get_filename(frame.is_img_path)}')
     if len(duplicate_img_filename_list) > 0:
         logger.error(
             f'Found the following duplicate image filenames in {self.__class__.__name__}:\n{duplicate_img_filename_list}'
         )
         raise Exception
Example #9
0
 def _check_paths_valid(self, src_img_dir: str):
     check_dir_exists(src_img_dir)
     img_filename_list = []
     duplicate_img_filename_list = []
     for ann in self:
         img_filename = get_filename(ann.img_path)
         if img_filename not in img_filename_list:
             img_filename_list.append(ann.img_path)
         else:
             duplicate_img_filename_list.append(ann.img_path)
         img_path = f'{src_img_dir}/{img_filename}'
         check_file_exists(img_path)
     if len(duplicate_img_filename_list) > 0:
         logger.error(f'Found the following duplicate image filenames in LabelmeAnnotationHandler:\n{duplicate_img_filename_list}')
         raise Exception
Example #10
0
 def create_streamer(self,
                     src: str,
                     mode: str = 'mono',
                     scale_factor: float = 1.0,
                     verbose: bool = False) -> StreamerObject:
     if verbose: logger.info(f"Creating Streamer for src={src}")
     check_value(item=mode, valid_value_list=['mono', 'dual'])
     check_file_exists(src)
     if mode == 'mono':
         streamer = Streamer(src=src, scale_factor=scale_factor)
     elif mode == 'dual':
         streamer = DualStreamer(src=src,
                                 scale_factor=scale_factor,
                                 direction=0)
     else:
         raise Exception
     return streamer
Example #11
0
    def get_img_dir(self,
                    check_paths: bool = True,
                    show_pbar: bool = True) -> str:
        """Returns the image directory of the dataset if all of the registered images are in the same directory.
        Otherwise, None is returned.

        Keyword Arguments:
            check_paths {bool} -- [Whether or not you want to verify that the images paths exist during the scan.] (default: {True})

        Returns:
            str -- [Path to the dataset's image directory]
        """
        img_dir = None

        if show_pbar:
            pbar = tqdm(total=len(self.frames), unit='frame(s)', leave=True)
            pbar.set_description('Locating Image Directory')
        for frame in self.frames:
            if check_paths:
                check_file_exists(frame.img_path)
                check_file_exists(frame.is_img_path)
                check_file_exists(frame.cs_img_path)
                check_file_exists(frame.depth_img_path)
            pending_img_dir = get_dirpath_from_filepath(frame.img_path)
            pending_is_img_dir = get_dirpath_from_filepath(frame.is_img_path)
            pending_cs_img_dir = get_dirpath_from_filepath(frame.cs_img_path)
            pending_depth_img_dir = get_dirpath_from_filepath(
                frame.depth_img_path)

            frame_has_common_dir = all([
                pending_dir == pending_img_dir for pending_dir in [
                    pending_is_img_dir, pending_cs_img_dir,
                    pending_depth_img_dir
                ]
            ])
            if frame_has_common_dir:
                if img_dir is None:
                    img_dir = pending_img_dir
                elif img_dir != pending_img_dir:
                    if show_pbar:
                        pbar.close()
                    return None
                else:
                    pass
            else:
                if show_pbar:
                    pbar.close()
                return None
            if show_pbar:
                pbar.update()
        return img_dir
Example #12
0
    def load_from_dir(cls,
                      json_dir: str,
                      img_dir: str = None,
                      camera_config_path: str = None,
                      obj_config_path: str = None,
                      show_pbar: bool = False) -> NDDS_Dataset:
        """Loads NDDS_Dataset object from a directory path.

        Arguments:
            json_dir {str} -- [Path to directory with all of the NDDS annotation json files.]

        Keyword Arguments:
            img_dir {str} -- [Path to directory with all of the NDDS image files.] (default: json_dir)
            camera_config_path {str} -- [Path to the camera configuration json file.] (default: f'{json_dir}/_camera_settings.json')
            obj_config_path {str} -- [Path to the object configuration json file.] (default: f'{json_dir}/_object_settings.json')
            show_pbar {bool} -- [Show the progress bar.] (default: {False})

        Returns:
            NDDS_Dataset -- [NDDS_Dataset object]
        """
        check_dir_exists(json_dir)
        if img_dir is None:
            img_dir = json_dir
        else:
            check_dir_exists(img_dir)
        camera_config_path = camera_config_path if camera_config_path is not None else f'{json_dir}/_camera_settings.json'
        check_file_exists(camera_config_path)
        obj_config_path = obj_config_path if obj_config_path is not None else f'{json_dir}/_object_settings.json'
        check_file_exists(obj_config_path)

        return NDDS_Dataset(
            camera_config=CameraConfig.load_from_path(camera_config_path),
            obj_config=ObjectSettings.load_from_path(obj_config_path),
            frames=NDDS_Frame_Handler.load_from_dir(img_dir=img_dir,
                                                    json_dir=json_dir,
                                                    show_pbar=show_pbar))
Example #13
0
 def load_from_path(cls,
                    json_path: str,
                    strict: bool = True) -> COCO_Category_Handler:
     check_file_exists(json_path)
     json_data = json.load(open(json_path, 'r'))
     return COCO_Category_Handler.from_dict_list(json_data, strict=strict)
Example #14
0
    def from_dict(cls,
                  collection_dict: dict,
                  check_paths: bool = True) -> DatasetConfigCollection:
        check_required_keys(collection_dict,
                            required_keys=[
                                'collection_dir', 'dataset_names',
                                'dataset_specific'
                            ])
        collection_dir = collection_dict['collection_dir']
        check_type(collection_dir, valid_type_list=[str])
        dataset_names = collection_dict['dataset_names']
        check_type(dataset_names, valid_type_list=[list])
        check_type_from_list(dataset_names, valid_type_list=[str])
        dataset_specific = collection_dict['dataset_specific']
        check_type(dataset_specific, valid_type_list=[dict])
        collection_tag = None if 'tag' not in collection_dict else collection_dict[
            'tag']
        check_type(collection_tag, valid_type_list=[type(None), str])
        check_required_keys(
            dataset_specific,
            required_keys=['img_dir', 'ann_path', 'ann_format'])
        img_dir = dataset_specific['img_dir']
        check_type(img_dir, valid_type_list=[str, list])
        if type(img_dir) is list:
            check_type_from_list(img_dir, valid_type_list=[str])
            check_list_length(img_dir, correct_length=len(dataset_names))
        ann_path = dataset_specific['ann_path']
        check_type(ann_path, valid_type_list=[str, list])
        if type(ann_path) is list:
            check_type_from_list(ann_path, valid_type_list=[str])
            check_list_length(ann_path, correct_length=len(dataset_names))
        ann_format = dataset_specific['ann_format']
        check_type(ann_format, valid_type_list=[str, list])
        if type(ann_format) is list:
            check_type_from_list(ann_format, valid_type_list=[str])
            check_list_length(ann_format, correct_length=len(dataset_names))
        dataset_tag = None if 'tag' not in dataset_specific else dataset_specific[
            'tag']
        check_type(dataset_tag, valid_type_list=[type(None), str, list])
        if type(dataset_tag) is list:
            check_type_from_list(dataset_tag,
                                 valid_type_list=[type(None), str])
            check_list_length(dataset_tag, correct_length=len(dataset_names))

        dataset_config_list = []
        for i in range(len(dataset_names)):
            if type(img_dir) is str:
                img_dir0 = img_dir
            elif type(img_dir) is list:
                if i >= len(img_dir):
                    raise IndexError
                img_dir0 = img_dir[i]
            else:
                raise Exception

            if type(ann_path) is str:
                ann_path0 = ann_path
            elif type(ann_path) is list:
                if i >= len(ann_path):
                    raise IndexError
                ann_path0 = ann_path[i]
            else:
                raise Exception

            if type(ann_format) is str:
                ann_format0 = ann_format
            elif type(ann_format) is list:
                if i >= len(ann_format):
                    raise IndexError
                ann_format0 = ann_format[i]
            else:
                raise Exception

            if type(dataset_tag) is str or dataset_tag is None:
                dataset_tag0 = dataset_tag
            elif type(dataset_tag) is list:
                if i >= len(dataset_tag):
                    raise IndexError
                dataset_tag0 = dataset_tag[i]
            else:
                raise Exception

            img_dir1 = rel_to_abs_path(
                f'{collection_dir}/{dataset_names[i]}/{img_dir0}')
            ann_path1 = rel_to_abs_path(
                f'{collection_dir}/{dataset_names[i]}/{ann_path0}')
            if check_paths:
                check_dir_exists(img_dir1)
                check_file_exists(ann_path1)
            config = DatasetConfig(img_dir=img_dir1,
                                   ann_path=ann_path1,
                                   ann_format=ann_format0,
                                   tag=dataset_tag0)
            dataset_config_list.append(config)
        return DatasetConfigCollection(dataset_config_list=dataset_config_list,
                                       tag=collection_tag)
Example #15
0
 def load_from_path(cls, json_path: str) -> COCO_License_Handler:
     check_file_exists(json_path)
     json_data = json.load(open(json_path, 'r'))
     return COCO_License_Handler.from_dict_list(json_data)
Example #16
0
 def load_from_path(cls, json_path: str) -> LabelmeAnnotation:
     check_file_exists(json_path)
     json_dict = json.load(open(json_path, 'r'))
     return LabelmeAnnotation.from_dict(json_dict)
Example #17
0
    def _convert_frame(self,
                       orig_image: COCO_Image,
                       whole_number_dir: str,
                       digit_dir: str,
                       allow_no_measures: bool = False,
                       allow_missing_parts: bool = False):
        whole_number_cat = self.whole_number_dataset.categories.get_unique_category_from_name(
            'whole_number')

        # Get Frame Data
        check_file_exists(orig_image.coco_url)
        frame_img = cv2.imread(orig_image.coco_url)
        frame_anns = self.annotations.get_annotations_from_imgIds(
            [orig_image.id])

        # Process Images and Annotations For Measure Dataset
        self.measure_dataset.images.append(orig_image)
        measure_ann_list = self._get_measure_annotations(
            frame_anns=frame_anns,
            orig_image=orig_image,
            allow_no_measures=allow_no_measures)
        self._load_measure_annotations(measure_ann_list=measure_ann_list)

        # Process Whole Number Images
        whole_number_coco_image_list = self._process_whole_number_images(
            frame_img=frame_img,
            orig_image=orig_image,
            whole_number_dir=whole_number_dir,
            measure_ann_list=measure_ann_list)

        # Process Single Digit Cases
        whole_number_count = 0
        whole_anns_list = self._get_number_anns_list(
            frame_anns=frame_anns,
            measure_ann_list=measure_ann_list,
            orig_image=orig_image,
            supercategory='whole_number',
            allow_no_measures=allow_no_measures)

        for whole_anns, measure_ann, whole_number_coco_image in zip(
                whole_anns_list, measure_ann_list,
                whole_number_coco_image_list):
            for whole_ann in whole_anns:
                self._process_single_digit_ann(
                    frame_img=frame_img,
                    whole_number_coco_image=whole_number_coco_image,
                    whole_ann=whole_ann,
                    measure_ann=measure_ann,
                    whole_number_count=whole_number_count,
                    digit_dir=digit_dir)
                whole_number_count += 1

        # Process Multiple Digit Cases
        part_anns_list = self._get_number_anns_list(
            frame_anns=frame_anns,
            measure_ann_list=measure_ann_list,
            orig_image=orig_image,
            supercategory='part_number',
            allow_no_measures=allow_no_measures)

        for part_anns, measure_ann, whole_number_coco_image in zip(
                part_anns_list, measure_ann_list,
                whole_number_coco_image_list):
            organized_parts = self._get_organized_parts(part_anns=part_anns)

            for organized_part in organized_parts:
                if len(organized_part['anns']) == 1:
                    logger.error(
                        f"Found only 1 part for {organized_part['whole_name']}: {organized_part['part_names']}"
                    )
                    remaining_part = COCO_Annotation.buffer(
                        organized_part['anns'][0])
                    corresponding_coco_image = self.images.get_obj_from_id(
                        remaining_part.image_id)
                    logger.error(
                        f'corresponding_coco_image.file_name: {corresponding_coco_image.file_name}'
                    )
                    if not allow_missing_parts:
                        raise Exception
                    else:
                        continue
                self._process_organized_part(
                    organized_part=organized_part,
                    frame_img=frame_img,
                    whole_number_coco_image=whole_number_coco_image,
                    measure_ann=measure_ann,
                    whole_number_count=whole_number_count,
                    digit_dir=digit_dir)
                whole_number_count += 1
Example #18
0
 def load_from_path(cls,
                    json_path: str,
                    strict: bool = True) -> COCO_Annotation:
     check_file_exists(json_path)
     json_dict = json.load(open(json_path, 'r'))
     return COCO_Annotation.from_dict(ann_dict=json_dict, strict=strict)
Example #19
0
 def load_from_path(cls,
                    json_path: str,
                    strict: bool = True) -> COCO_Category:
     check_file_exists(json_path)
     json_dict = json.load(open(json_path, 'r'))
     return COCO_Category.from_dict(json_dict, strict=strict)
Example #20
0
 def load_from_path(cls, json_path: str) -> COCO_License:
     check_file_exists(json_path)
     json_dict = json.load(open(json_path, 'r'))
     return COCO_License.from_dict(json_dict)
Example #21
0
 def load_from_path(cls: H, json_path: str) -> H:
     check_file_exists(json_path)
     json_dict = json.load(open(json_path, 'r'))
     return cls.from_dict_list(json_dict)